IB/core: Remove debug prints after allocation failure

The prints after [k|v][m|z|c]alloc() functions are not needed,
because in case of failure, allocator will print their internal
error prints anyway.

Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Leon Romanovsky 2016-11-03 16:44:10 +02:00 committed by Doug Ledford
parent 2716243212
commit a0b3455fcb
6 changed files with 11 additions and 30 deletions

View File

@ -156,7 +156,6 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
/* Create new device info */ /* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
if (!port_priv) { if (!port_priv) {
dev_err(&device->dev, "No memory for ib_agent_port_private\n");
ret = -ENOMEM; ret = -ENOMEM;
goto error1; goto error1;
} }

View File

@ -254,11 +254,8 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
unsigned long flags; unsigned long flags;
context = kmalloc(sizeof *context, GFP_KERNEL); context = kmalloc(sizeof *context, GFP_KERNEL);
if (!context) { if (!context)
pr_warn("Couldn't allocate client context for %s/%s\n",
device->name, client->name);
return -ENOMEM; return -ENOMEM;
}
context->client = client; context->client = client;
context->data = NULL; context->data = NULL;

View File

@ -247,7 +247,6 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket, kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
GFP_KERNEL); GFP_KERNEL);
if (!pool->cache_bucket) { if (!pool->cache_bucket) {
pr_warn(PFX "Failed to allocate cache in pool\n");
ret = -ENOMEM; ret = -ENOMEM;
goto out_free_pool; goto out_free_pool;
} }

View File

@ -604,7 +604,6 @@ int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
} }
rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC); rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC);
if (!rem_info) { if (!rem_info) {
pr_err("%s: Unable to allocate a remote info\n", __func__);
ret = -ENOMEM; ret = -ENOMEM;
return ret; return ret;
} }

View File

@ -62,7 +62,6 @@ int iwpm_init(u8 nl_client)
sizeof(struct hlist_head), GFP_KERNEL); sizeof(struct hlist_head), GFP_KERNEL);
if (!iwpm_hash_bucket) { if (!iwpm_hash_bucket) {
ret = -ENOMEM; ret = -ENOMEM;
pr_err("%s Unable to create mapinfo hash table\n", __func__);
goto init_exit; goto init_exit;
} }
iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE * iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE *
@ -70,7 +69,6 @@ int iwpm_init(u8 nl_client)
if (!iwpm_reminfo_bucket) { if (!iwpm_reminfo_bucket) {
kfree(iwpm_hash_bucket); kfree(iwpm_hash_bucket);
ret = -ENOMEM; ret = -ENOMEM;
pr_err("%s Unable to create reminfo hash table\n", __func__);
goto init_exit; goto init_exit;
} }
} }
@ -128,10 +126,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
if (!iwpm_valid_client(nl_client)) if (!iwpm_valid_client(nl_client))
return ret; return ret;
map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL); map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
if (!map_info) { if (!map_info)
pr_err("%s: Unable to allocate a mapping info\n", __func__);
return -ENOMEM; return -ENOMEM;
}
memcpy(&map_info->local_sockaddr, local_sockaddr, memcpy(&map_info->local_sockaddr, local_sockaddr,
sizeof(struct sockaddr_storage)); sizeof(struct sockaddr_storage));
memcpy(&map_info->mapped_sockaddr, mapped_sockaddr, memcpy(&map_info->mapped_sockaddr, mapped_sockaddr,
@ -309,10 +306,9 @@ struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
unsigned long flags; unsigned long flags;
nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp); nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp);
if (!nlmsg_request) { if (!nlmsg_request)
pr_err("%s Unable to allocate a nlmsg_request\n", __func__);
return NULL; return NULL;
}
spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags); spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags);
list_add_tail(&nlmsg_request->inprocess_list, &iwpm_nlmsg_req_list); list_add_tail(&nlmsg_request->inprocess_list, &iwpm_nlmsg_req_list);
spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags); spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags);

View File

@ -304,10 +304,9 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
for_ifa(in_dev) { for_ifa(in_dev) {
struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry) { if (!entry)
pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n");
continue; continue;
}
entry->ip.sin_family = AF_INET; entry->ip.sin_family = AF_INET;
entry->ip.sin_addr.s_addr = ifa->ifa_address; entry->ip.sin_addr.s_addr = ifa->ifa_address;
list_add_tail(&entry->list, &sin_list); list_add_tail(&entry->list, &sin_list);
@ -348,10 +347,8 @@ static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry) { if (!entry)
pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv6 update\n");
continue; continue;
}
entry->sin6.sin6_family = AF_INET6; entry->sin6.sin6_family = AF_INET6;
entry->sin6.sin6_addr = ifp->addr; entry->sin6.sin6_addr = ifp->addr;
@ -459,10 +456,8 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
struct upper_list *entry = kmalloc(sizeof(*entry), struct upper_list *entry = kmalloc(sizeof(*entry),
GFP_ATOMIC); GFP_ATOMIC);
if (!entry) { if (!entry)
pr_info("roce_gid_mgmt: couldn't allocate entry to delete ndev\n");
continue; continue;
}
list_add_tail(&entry->list, &upper_list); list_add_tail(&entry->list, &upper_list);
dev_hold(upper); dev_hold(upper);
@ -555,10 +550,8 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
struct netdev_event_work *ndev_work = struct netdev_event_work *ndev_work =
kmalloc(sizeof(*ndev_work), GFP_KERNEL); kmalloc(sizeof(*ndev_work), GFP_KERNEL);
if (!ndev_work) { if (!ndev_work)
pr_warn("roce_gid_mgmt: can't allocate work for netdevice_event\n");
return NOTIFY_DONE; return NOTIFY_DONE;
}
memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds)); memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) { for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
@ -692,10 +685,8 @@ static int addr_event(struct notifier_block *this, unsigned long event,
} }
work = kmalloc(sizeof(*work), GFP_ATOMIC); work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (!work) { if (!work)
pr_warn("roce_gid_mgmt: Couldn't allocate work for addr_event\n");
return NOTIFY_DONE; return NOTIFY_DONE;
}
INIT_WORK(&work->work, update_gid_event_work_handler); INIT_WORK(&work->work, update_gid_event_work_handler);