summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/cache.c15
-rw-r--r--drivers/infiniband/core/cma.c14
-rw-r--r--drivers/infiniband/core/device.c29
-rw-r--r--drivers/infiniband/core/fmr_pool.c37
-rw-r--r--drivers/infiniband/core/packer.c14
-rw-r--r--drivers/infiniband/core/sa_query.c13
-rw-r--r--drivers/infiniband/core/ucm.c8
-rw-r--r--drivers/infiniband/core/ucma.c6
-rw-r--r--drivers/infiniband/core/ud_header.c23
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c9
-rw-r--r--drivers/infiniband/core/uverbs_main.c80
-rw-r--r--drivers/infiniband/core/verbs.c164
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h4
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c16
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c41
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c5
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h7
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c7
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c15
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c40
-rw-r--r--include/rdma/ib_mad.h4
-rw-r--r--include/rdma/ib_verbs.h5
-rw-r--r--net/9p/trans_rdma.c86
28 files changed, 424 insertions, 272 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 53343ffbff7a..cb00d59da456 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1043,8 +1043,8 @@ static void ib_cache_update(struct ib_device *device,
ret = ib_query_port(device, port, tprops);
if (ret) {
- printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
- ret, device->name);
+ pr_warn("ib_query_port failed (%d) for %s\n",
+ ret, device->name);
goto err;
}
@@ -1067,8 +1067,8 @@ static void ib_cache_update(struct ib_device *device,
for (i = 0; i < pkey_cache->table_len; ++i) {
ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
if (ret) {
- printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
- ret, device->name, i);
+ pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
+ ret, device->name, i);
goto err;
}
}
@@ -1078,8 +1078,8 @@ static void ib_cache_update(struct ib_device *device,
ret = ib_query_gid(device, port, i,
gid_cache->table + i, NULL);
if (ret) {
- printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
- ret, device->name, i);
+ pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
+ ret, device->name, i);
goto err;
}
}
@@ -1161,8 +1161,7 @@ int ib_cache_setup_one(struct ib_device *device)
GFP_KERNEL);
if (!device->cache.pkey_cache ||
!device->cache.lmc_cache) {
- printk(KERN_WARNING "Couldn't allocate cache "
- "for %s\n", device->name);
+ pr_warn("Couldn't allocate cache for %s\n", device->name);
return -ENOMEM;
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 9729639df407..e13121f41949 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1713,7 +1713,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
break;
default:
- printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
+ pr_err("RDMA CMA: unexpected IB CM event: %d\n",
ib_event->event);
goto out;
}
@@ -2186,8 +2186,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
ret = rdma_listen(id, id_priv->backlog);
if (ret)
- printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
- "listening on device %s\n", ret, cma_dev->device->name);
+ pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n",
+ ret, cma_dev->device->name);
}
static void cma_listen_on_all(struct rdma_id_private *id_priv)
@@ -3239,7 +3239,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
event.status = 0;
break;
default:
- printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
+ pr_err("RDMA CMA: unexpected IB CM event: %d\n",
ib_event->event);
goto out;
}
@@ -4003,8 +4003,8 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
if ((dev_addr->bound_dev_if == ndev->ifindex) &&
(net_eq(dev_net(ndev), dev_addr->net)) &&
memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
- printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
- ndev->name, &id_priv->id);
+ pr_info("RDMA CM addr change for ndev %s used by id %p\n",
+ ndev->name, &id_priv->id);
work = kzalloc(sizeof *work, GFP_KERNEL);
if (!work)
return -ENOMEM;
@@ -4287,7 +4287,7 @@ static int __init cma_init(void)
goto err;
if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
- printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
+ pr_warn("RDMA CMA: failed to add netlink callback\n");
cma_configfs_init();
return 0;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 00da80e02154..270c7ff6cba7 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -115,8 +115,8 @@ static int ib_device_check_mandatory(struct ib_device *device)
for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
- printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
- device->name, mandatory_table[i].name);
+ pr_warn("Device %s is missing mandatory function %s\n",
+ device->name, mandatory_table[i].name);
return -EINVAL;
}
}
@@ -255,8 +255,8 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
context = kmalloc(sizeof *context, GFP_KERNEL);
if (!context) {
- printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n",
- device->name, client->name);
+ pr_warn("Couldn't allocate client context for %s/%s\n",
+ device->name, client->name);
return -ENOMEM;
}
@@ -343,28 +343,29 @@ int ib_register_device(struct ib_device *device,
ret = read_port_immutable(device);
if (ret) {
- printk(KERN_WARNING "Couldn't create per port immutable data %s\n",
- device->name);
+ pr_warn("Couldn't create per port immutable data %s\n",
+ device->name);
goto out;
}
ret = ib_cache_setup_one(device);
if (ret) {
- printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
+ pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
goto out;
}
memset(&device->attrs, 0, sizeof(device->attrs));
ret = device->query_device(device, &device->attrs, &uhw);
if (ret) {
- printk(KERN_WARNING "Couldn't query the device attributes\n");
+ pr_warn("Couldn't query the device attributes\n");
+ ib_cache_cleanup_one(device);
goto out;
}
ret = ib_device_register_sysfs(device, port_callback);
if (ret) {
- printk(KERN_WARNING "Couldn't register device %s with driver model\n",
- device->name);
+ pr_warn("Couldn't register device %s with driver model\n",
+ device->name);
ib_cache_cleanup_one(device);
goto out;
}
@@ -565,8 +566,8 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client,
goto out;
}
- printk(KERN_WARNING "No client context found for %s/%s\n",
- device->name, client->name);
+ pr_warn("No client context found for %s/%s\n",
+ device->name, client->name);
out:
spin_unlock_irqrestore(&device->client_data_lock, flags);
@@ -959,13 +960,13 @@ static int __init ib_core_init(void)
ret = class_register(&ib_class);
if (ret) {
- printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
+ pr_warn("Couldn't create InfiniBand device class\n");
goto err_comp;
}
ret = ibnl_init();
if (ret) {
- printk(KERN_WARNING "Couldn't init IB netlink interface\n");
+ pr_warn("Couldn't init IB netlink interface\n");
goto err_sysfs;
}
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 6ac3683c144b..cdbb1f1a6d97 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -150,8 +150,8 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
#ifdef DEBUG
if (fmr->ref_count !=0) {
- printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d\n",
- fmr, fmr->ref_count);
+ pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n",
+ fmr, fmr->ref_count);
}
#endif
}
@@ -167,7 +167,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
ret = ib_unmap_fmr(&fmr_list);
if (ret)
- printk(KERN_WARNING PFX "ib_unmap_fmr returned %d\n", ret);
+ pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
spin_lock_irq(&pool->pool_lock);
list_splice(&unmap_list, &pool->free_list);
@@ -222,8 +222,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
device = pd->device;
if (!device->alloc_fmr || !device->dealloc_fmr ||
!device->map_phys_fmr || !device->unmap_fmr) {
- printk(KERN_INFO PFX "Device %s does not support FMRs\n",
- device->name);
+ pr_info(PFX "Device %s does not support FMRs\n", device->name);
return ERR_PTR(-ENOSYS);
}
@@ -233,13 +232,10 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
max_remaps = device->attrs.max_map_per_fmr;
pool = kmalloc(sizeof *pool, GFP_KERNEL);
- if (!pool) {
- printk(KERN_WARNING PFX "couldn't allocate pool struct\n");
+ if (!pool)
return ERR_PTR(-ENOMEM);
- }
pool->cache_bucket = NULL;
-
pool->flush_function = params->flush_function;
pool->flush_arg = params->flush_arg;
@@ -251,7 +247,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
GFP_KERNEL);
if (!pool->cache_bucket) {
- printk(KERN_WARNING PFX "Failed to allocate cache in pool\n");
+ pr_warn(PFX "Failed to allocate cache in pool\n");
ret = -ENOMEM;
goto out_free_pool;
}
@@ -275,7 +271,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
"ib_fmr(%s)",
device->name);
if (IS_ERR(pool->thread)) {
- printk(KERN_WARNING PFX "couldn't start cleanup thread\n");
+ pr_warn(PFX "couldn't start cleanup thread\n");
ret = PTR_ERR(pool->thread);
goto out_free_pool;
}
@@ -294,11 +290,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
for (i = 0; i < params->pool_size; ++i) {
fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
- if (!fmr) {
- printk(KERN_WARNING PFX "failed to allocate fmr "
- "struct for FMR %d\n", i);
+ if (!fmr)
goto out_fail;
- }
fmr->pool = pool;
fmr->remap_count = 0;
@@ -307,8 +300,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
if (IS_ERR(fmr->fmr)) {
- printk(KERN_WARNING PFX "fmr_create failed "
- "for FMR %d\n", i);
+ pr_warn(PFX "fmr_create failed for FMR %d\n",
+ i);
kfree(fmr);
goto out_fail;
}
@@ -363,8 +356,8 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
}
if (i < pool->pool_size)
- printk(KERN_WARNING PFX "pool still has %d regions registered\n",
- pool->pool_size - i);
+ pr_warn(PFX "pool still has %d regions registered\n",
+ pool->pool_size - i);
kfree(pool->cache_bucket);
kfree(pool);
@@ -463,7 +456,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
list_add(&fmr->list, &pool->free_list);
spin_unlock_irqrestore(&pool->pool_lock, flags);
- printk(KERN_WARNING PFX "fmr_map returns %d\n", result);
+ pr_warn(PFX "fmr_map returns %d\n", result);
return ERR_PTR(result);
}
@@ -517,8 +510,8 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
#ifdef DEBUG
if (fmr->ref_count < 0)
- printk(KERN_WARNING PFX "FMR %p has ref count %d < 0\n",
- fmr, fmr->ref_count);
+ pr_warn(PFX "FMR %p has ref count %d < 0\n",
+ fmr, fmr->ref_count);
#endif
spin_unlock_irqrestore(&pool->pool_lock, flags);
diff --git a/drivers/infiniband/core/packer.c b/drivers/infiniband/core/packer.c
index 1b65986c0be3..19b1ee3279b4 100644
--- a/drivers/infiniband/core/packer.c
+++ b/drivers/infiniband/core/packer.c
@@ -44,7 +44,7 @@ static u64 value_read(int offset, int size, void *structure)
case 4: return be32_to_cpup((__be32 *) (structure + offset));
case 8: return be64_to_cpup((__be64 *) (structure + offset));
default:
- printk(KERN_WARNING "Field size %d bits not handled\n", size * 8);
+ pr_warn("Field size %d bits not handled\n", size * 8);
return 0;
}
}
@@ -104,9 +104,8 @@ void ib_pack(const struct ib_field *desc,
} else {
if (desc[i].offset_bits % 8 ||
desc[i].size_bits % 8) {
- printk(KERN_WARNING "Structure field %s of size %d "
- "bits is not byte-aligned\n",
- desc[i].field_name, desc[i].size_bits);
+ pr_warn("Structure field %s of size %d bits is not byte-aligned\n",
+ desc[i].field_name, desc[i].size_bits);
}
if (desc[i].struct_size_bytes)
@@ -132,7 +131,7 @@ static void value_write(int offset, int size, u64 val, void *structure)
case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break;
case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break;
default:
- printk(KERN_WARNING "Field size %d bits not handled\n", size * 8);
+ pr_warn("Field size %d bits not handled\n", size * 8);
}
}
@@ -188,9 +187,8 @@ void ib_unpack(const struct ib_field *desc,
} else {
if (desc[i].offset_bits % 8 ||
desc[i].size_bits % 8) {
- printk(KERN_WARNING "Structure field %s of size %d "
- "bits is not byte-aligned\n",
- desc[i].field_name, desc[i].size_bits);
+ pr_warn("Structure field %s of size %d bits is not byte-aligned\n",
+ desc[i].field_name, desc[i].size_bits);
}
memcpy(structure + desc[i].struct_offset_bytes,
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index f334090bb612..8e3bf6c8d3c3 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -864,13 +864,12 @@ static void update_sm_ah(struct work_struct *work)
struct ib_ah_attr ah_attr;
if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
- printk(KERN_WARNING "Couldn't query port\n");
+ pr_warn("Couldn't query port\n");
return;
}
new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
if (!new_ah) {
- printk(KERN_WARNING "Couldn't allocate new SM AH\n");
return;
}
@@ -880,7 +879,7 @@ static void update_sm_ah(struct work_struct *work)
new_ah->pkey_index = 0;
if (ib_find_pkey(port->agent->device, port->port_num,
IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
- printk(KERN_ERR "Couldn't find index for default PKey\n");
+ pr_err("Couldn't find index for default PKey\n");
memset(&ah_attr, 0, sizeof ah_attr);
ah_attr.dlid = port_attr.sm_lid;
@@ -889,7 +888,7 @@ static void update_sm_ah(struct work_struct *work)
new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
if (IS_ERR(new_ah->ah)) {
- printk(KERN_WARNING "Couldn't create new SM AH\n");
+ pr_warn("Couldn't create new SM AH\n");
kfree(new_ah);
return;
}
@@ -1221,7 +1220,7 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
rec.net = NULL;
rec.ifindex = 0;
rec.gid_type = IB_GID_TYPE_IB;
- memset(rec.dmac, 0, ETH_ALEN);
+ eth_zero_addr(rec.dmac);
query->callback(status, &rec, query->context);
} else
query->callback(status, NULL, query->context);
@@ -1800,13 +1799,13 @@ static int __init ib_sa_init(void)
ret = ib_register_client(&sa_client);
if (ret) {
- printk(KERN_ERR "Couldn't register ib_sa client\n");
+ pr_err("Couldn't register ib_sa client\n");
goto err1;
}
ret = mcast_init();
if (ret) {
- printk(KERN_ERR "Couldn't initialize multicast handling\n");
+ pr_err("Couldn't initialize multicast handling\n");
goto err2;
}
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 6b4e8a008bc0..4a9aa0433b07 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1234,7 +1234,7 @@ static int find_overflow_devnum(void)
ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
"infiniband_cm");
if (ret) {
- printk(KERN_ERR "ucm: couldn't register dynamic device number\n");
+ pr_err("ucm: couldn't register dynamic device number\n");
return ret;
}
}
@@ -1329,19 +1329,19 @@ static int __init ib_ucm_init(void)
ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES,
"infiniband_cm");
if (ret) {
- printk(KERN_ERR "ucm: couldn't register device number\n");
+ pr_err("ucm: couldn't register device number\n");
goto error1;
}
ret = class_create_file(&cm_class, &class_attr_abi_version.attr);
if (ret) {
- printk(KERN_ERR "ucm: couldn't create abi_version attribute\n");
+ pr_err("ucm: couldn't create abi_version attribute\n");
goto error2;
}
ret = ib_register_client(&ucm_client);
if (ret) {
- printk(KERN_ERR "ucm: couldn't register client\n");
+ pr_err("ucm: couldn't register client\n");
goto error3;
}
return 0;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 8b5a934e1133..dd3bcceadfde 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -314,7 +314,7 @@ static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
}
}
if (!event_found)
- printk(KERN_ERR "ucma_removal_event_handler: warning: connect request event wasn't found\n");
+ pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
}
static int ucma_event_handler(struct rdma_cm_id *cm_id,
@@ -1716,13 +1716,13 @@ static int __init ucma_init(void)
ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
if (ret) {
- printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
+ pr_err("rdma_ucm: couldn't create abi_version attr\n");
goto err1;
}
ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
if (!ucma_ctl_table_hdr) {
- printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
+ pr_err("rdma_ucm: couldn't register sysctl paths\n");
ret = -ENOMEM;
goto err2;
}
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 2116132568e7..29a45d2f8898 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -479,8 +479,8 @@ int ib_ud_header_unpack(void *buf,
buf += IB_LRH_BYTES;
if (header->lrh.link_version != 0) {
- printk(KERN_WARNING "Invalid LRH.link_version %d\n",
- header->lrh.link_version);
+ pr_warn("Invalid LRH.link_version %d\n",
+ header->lrh.link_version);
return -EINVAL;
}
@@ -496,20 +496,20 @@ int ib_ud_header_unpack(void *buf,
buf += IB_GRH_BYTES;
if (header->grh.ip_version != 6) {
- printk(KERN_WARNING "Invalid GRH.ip_version %d\n",
- header->grh.ip_version);
+ pr_warn("Invalid GRH.ip_version %d\n",
+ header->grh.ip_version);
return -EINVAL;
}
if (header->grh.next_header != 0x1b) {
- printk(KERN_WARNING "Invalid GRH.next_header 0x%02x\n",
- header->grh.next_header);
+ pr_warn("Invalid GRH.next_header 0x%02x\n",
+ header->grh.next_header);
return -EINVAL;
}
break;
default:
- printk(KERN_WARNING "Invalid LRH.link_next_header %d\n",
- header->lrh.link_next_header);
+ pr_warn("Invalid LRH.link_next_header %d\n",
+ header->lrh.link_next_header);
return -EINVAL;
}
@@ -525,14 +525,13 @@ int ib_ud_header_unpack(void *buf,
header->immediate_present = 1;
break;
default:
- printk(KERN_WARNING "Invalid BTH.opcode 0x%02x\n",
- header->bth.opcode);
+ pr_warn("Invalid BTH.opcode 0x%02x\n", header->bth.opcode);
return -EINVAL;
}
if (header->bth.transport_header_version != 0) {
- printk(KERN_WARNING "Invalid BTH.transport_header_version %d\n",
- header->bth.transport_header_version);
+ pr_warn("Invalid BTH.transport_header_version %d\n",
+ header->bth.transport_header_version);
return -EINVAL;
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6ffc9c4e93af..6c6fbff19752 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1970,7 +1970,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
resp_size);
INIT_UDATA(&uhw, buf + sizeof(cmd),
(unsigned long)cmd.response + resp_size,
- in_len - sizeof(cmd), out_len - resp_size);
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - resp_size);
memset(&cmd_ex, 0, sizeof(cmd_ex));
cmd_ex.user_handle = cmd.user_handle;
@@ -3413,7 +3414,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
INIT_UDATA(&udata, buf + sizeof cmd,
(unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof resp);
ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
if (ret)
@@ -3439,7 +3441,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
INIT_UDATA(&udata, buf + sizeof cmd,
(unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof resp);
ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
if (ret)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 39680aed99dd..28ba2cc81535 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -683,12 +683,28 @@ out:
return ev_file;
}
+static int verify_command_mask(struct ib_device *ib_dev, __u32 command)
+{
+ u64 mask;
+
+ if (command <= IB_USER_VERBS_CMD_OPEN_QP)
+ mask = ib_dev->uverbs_cmd_mask;
+ else
+ mask = ib_dev->uverbs_ex_cmd_mask;
+
+ if (mask & ((u64)1 << command))
+ return 0;
+
+ return -1;
+}
+
static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct ib_uverbs_file *file = filp->private_data;
struct ib_device *ib_dev;
struct ib_uverbs_cmd_hdr hdr;
+ __u32 command;
__u32 flags;
int srcu_key;
ssize_t ret;
@@ -707,37 +723,34 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
goto out;
}
- flags = (hdr.command &
- IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
+ if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
+ IB_USER_VERBS_CMD_COMMAND_MASK)) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (!flags) {
- __u32 command;
+ command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
+ if (verify_command_mask(ib_dev, command)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
- if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
- IB_USER_VERBS_CMD_COMMAND_MASK)) {
- ret = -EINVAL;
- goto out;
- }
+ if (!file->ucontext &&
+ command != IB_USER_VERBS_CMD_GET_CONTEXT) {
+ ret = -EINVAL;
+ goto out;
+ }
- command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
+ flags = (hdr.command &
+ IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
+ if (!flags) {
if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
!uverbs_cmd_table[command]) {
ret = -EINVAL;
goto out;
}
- if (!file->ucontext &&
- command != IB_USER_VERBS_CMD_GET_CONTEXT) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!(ib_dev->uverbs_cmd_mask & (1ull << command))) {
- ret = -ENOSYS;
- goto out;
- }
-
if (hdr.in_words * 4 != count) {
ret = -EINVAL;
goto out;
@@ -749,21 +762,11 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
hdr.out_words * 4);
} else if (flags == IB_USER_VERBS_CMD_FLAG_EXTENDED) {
- __u32 command;
-
struct ib_uverbs_ex_cmd_hdr ex_hdr;
struct ib_udata ucore;
struct ib_udata uhw;
size_t written_count = count;
- if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
- IB_USER_VERBS_CMD_COMMAND_MASK)) {
- ret = -EINVAL;
- goto out;
- }
-
- command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
-
if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
!uverbs_ex_cmd_table[command]) {
ret = -ENOSYS;
@@ -775,11 +778,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
goto out;
}
- if (!(ib_dev->uverbs_ex_cmd_mask & (1ull << command))) {
- ret = -ENOSYS;
- goto out;
- }
-
if (count < (sizeof(hdr) + sizeof(ex_hdr))) {
ret = -EINVAL;
goto out;
@@ -1058,7 +1056,7 @@ static int find_overflow_devnum(void)
ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
"infiniband_verbs");
if (ret) {
- printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n");
+ pr_err("user_verbs: couldn't register dynamic device number\n");
return ret;
}
}
@@ -1279,14 +1277,14 @@ static int __init ib_uverbs_init(void)
ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
"infiniband_verbs");
if (ret) {
- printk(KERN_ERR "user_verbs: couldn't register device number\n");
+ pr_err("user_verbs: couldn't register device number\n");
goto out;
}
uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
if (IS_ERR(uverbs_class)) {
ret = PTR_ERR(uverbs_class);
- printk(KERN_ERR "user_verbs: couldn't create class infiniband_verbs\n");
+ pr_err("user_verbs: couldn't create class infiniband_verbs\n");
goto out_chrdev;
}
@@ -1294,13 +1292,13 @@ static int __init ib_uverbs_init(void)
ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
if (ret) {
- printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n");
+ pr_err("user_verbs: couldn't create abi_version attribute\n");
goto out_class;
}
ret = ib_register_client(&uverbs_client);
if (ret) {
- printk(KERN_ERR "user_verbs: couldn't register client\n");
+ pr_err("user_verbs: couldn't register client\n");
goto out_class;
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 5af6d024e053..48dc43cb8ccb 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1657,3 +1657,167 @@ next_page:
return i;
}
EXPORT_SYMBOL(ib_sg_to_pages);
+
+struct ib_drain_cqe {
+ struct ib_cqe cqe;
+ struct completion done;
+};
+
+static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
+ cqe);
+
+ complete(&cqe->done);
+}
+
+/*
+ * Post a WR and block until its completion is reaped for the SQ.
+ */
+static void __ib_drain_sq(struct ib_qp *qp)
+{
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct ib_drain_cqe sdrain;
+ struct ib_send_wr swr = {}, *bad_swr;
+ int ret;
+
+ if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
+ WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
+ "IB_POLL_DIRECT poll_ctx not supported for drain\n");
+ return;
+ }
+
+ swr.wr_cqe = &sdrain.cqe;
+ sdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&sdrain.done);
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ ret = ib_post_send(qp, &swr, &bad_swr);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ wait_for_completion(&sdrain.done);
+}
+
+/*
+ * Post a WR and block until its completion is reaped for the RQ.
+ */
+static void __ib_drain_rq(struct ib_qp *qp)
+{
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct ib_drain_cqe rdrain;
+ struct ib_recv_wr rwr = {}, *bad_rwr;
+ int ret;
+
+ if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
+ WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
+ "IB_POLL_DIRECT poll_ctx not supported for drain\n");
+ return;
+ }
+
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&rdrain.done);
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ ret = ib_post_recv(qp, &rwr, &bad_rwr);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ wait_for_completion(&rdrain.done);
+}
+
+/**
+ * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
+ * application.
+ * @qp: queue pair to drain
+ *
+ * If the device has a provider-specific drain function, then
+ * call that. Otherwise call the generic drain function
+ * __ib_drain_sq().
+ *
+ * The caller must:
+ *
+ * ensure there is room in the CQ and SQ for the drain work request and
+ * completion.
+ *
+ * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
+ * IB_POLL_DIRECT.
+ *
+ * ensure that there are no other contexts that are posting WRs concurrently.
+ * Otherwise the drain is not guaranteed.
+ */
+void ib_drain_sq(struct ib_qp *qp)
+{
+ if (qp->device->drain_sq)
+ qp->device->drain_sq(qp);
+ else
+ __ib_drain_sq(qp);
+}
+EXPORT_SYMBOL(ib_drain_sq);
+
+/**
+ * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
+ * application.
+ * @qp: queue pair to drain
+ *
+ * If the device has a provider-specific drain function, then
+ * call that. Otherwise call the generic drain function
+ * __ib_drain_rq().
+ *
+ * The caller must:
+ *
+ * ensure there is room in the CQ and RQ for the drain work request and
+ * completion.
+ *
+ * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
+ * IB_POLL_DIRECT.
+ *
+ * ensure that there are no other contexts that are posting WRs concurrently.
+ * Otherwise the drain is not guaranteed.
+ */
+void ib_drain_rq(struct ib_qp *qp)
+{
+ if (qp->device->drain_rq)
+ qp->device->drain_rq(qp);
+ else
+ __ib_drain_rq(qp);
+}
+EXPORT_SYMBOL(ib_drain_rq);
+
+/**
+ * ib_drain_qp() - Block until all CQEs have been consumed by the
+ * application on both the RQ and SQ.
+ * @qp: queue pair to drain
+ *
+ * The caller must:
+ *
+ * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
+ * and completions.
+ *
+ * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
+ * IB_POLL_DIRECT.
+ *
+ * ensure that there are no other contexts that are posting WRs concurrently.
+ * Otherwise the drain is not guaranteed.
+ */
+void ib_drain_qp(struct ib_qp *qp)
+{
+ ib_drain_sq(qp);
+ ib_drain_rq(qp);
+}
+EXPORT_SYMBOL(ib_drain_qp);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index cf21df4a8bf5..b4eeb783573c 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -815,8 +815,15 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
}
}
out:
- if (wq)
+ if (wq) {
+ if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
+ if (t4_sq_empty(wq))
+ complete(&qhp->sq_drained);
+ if (t4_rq_empty(wq))
+ complete(&qhp->rq_drained);
+ }
spin_unlock(&qhp->lock);
+ }
return ret;
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index fb2de75a0392..7c6a6e1a2c1d 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -476,6 +476,8 @@ struct c4iw_qp {
wait_queue_head_t wait;
struct timer_list timer;
int sq_sig_all;
+ struct completion rq_drained;
+ struct completion sq_drained;
};
static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -1016,6 +1018,8 @@ extern int c4iw_wr_log;
extern int db_fc_threshold;
extern int db_coalescing_threshold;
extern int use_dsgl;
+void c4iw_drain_rq(struct ib_qp *qp);
+void c4iw_drain_sq(struct ib_qp *qp);
#endif
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index ec04272fbdc2..104662d38d1e 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -564,6 +564,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.get_protocol_stats = c4iw_get_mib;
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = c4iw_port_immutable;
+ dev->ibdev.drain_sq = c4iw_drain_sq;
+ dev->ibdev.drain_rq = c4iw_drain_rq;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index e99345eb875a..7b1b1e840ef1 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1697,6 +1697,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->attr.max_ird = 0;
qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
spin_lock_init(&qhp->lock);
+ init_completion(&qhp->sq_drained);
+ init_completion(&qhp->rq_drained);
mutex_init(&qhp->mutex);
init_waitqueue_head(&qhp->wait);
atomic_set(&qhp->refcnt, 1);
@@ -1888,3 +1890,17 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
}
+
+void c4iw_drain_sq(struct ib_qp *ibqp)
+{
+ struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+
+ wait_for_completion(&qp->sq_drained);
+}
+
+void c4iw_drain_rq(struct ib_qp *ibqp)
+{
+ struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+
+ wait_for_completion(&qp->rq_drained);
+}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 4659256cd95e..3b2ddd64a371 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -75,7 +75,8 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in,
- struct ib_udata *udata, int buf_size, int *inlen)
+ struct ib_udata *udata, int buf_size, int *inlen,
+ int is_xrc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_create_srq ucmd = {};
@@ -87,13 +88,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
int ncont;
u32 offset;
u32 uidx = MLX5_IB_DEFAULT_UIDX;
- int drv_data = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
- if (drv_data < 0)
- return -EINVAL;
-
- ucmdlen = (drv_data < sizeof(ucmd)) ?
- drv_data : sizeof(ucmd);
+ ucmdlen = min(udata->inlen, sizeof(ucmd));
if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
mlx5_ib_dbg(dev, "failed copy udata\n");
@@ -103,15 +99,17 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
if (ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;
- if (drv_data > sizeof(ucmd) &&
+ if (udata->inlen > sizeof(ucmd) &&
!ib_is_udata_cleared(udata, sizeof(ucmd),
- drv_data - sizeof(ucmd)))
+ udata->inlen - sizeof(ucmd)))
return -EINVAL;
- err = get_srq_user_index(to_mucontext(pd->uobject->context),
- &ucmd, udata->inlen, &uidx);
- if (err)
- return err;
+ if (is_xrc) {
+ err = get_srq_user_index(to_mucontext(pd->uobject->context),
+ &ucmd, udata->inlen, &uidx);
+ if (err)
+ return err;
+ }
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
@@ -151,7 +149,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
(*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
- if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
+ if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
+ is_xrc){
xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
@@ -170,7 +169,7 @@ err_umem:
static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in, int buf_size,
- int *inlen)
+ int *inlen, int is_xrc)
{
int err;
int i;
@@ -224,7 +223,8 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
+ if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
+ is_xrc){
xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
/* 0xffffff means we ask to work with cqe version 0 */
@@ -302,10 +302,14 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
srq->msrq.max_avail_gather);
+ is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
+
if (pd->uobject)
- err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen);
+ err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen,
+ is_xrc);
else
- err = create_srq_kernel(dev, srq, &in, buf_size, &inlen);
+ err = create_srq_kernel(dev, srq, &in, buf_size, &inlen,
+ is_xrc);
if (err) {
mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
@@ -313,7 +317,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
goto err_srq;
}
- is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
in->ctx.state_log_sz = ilog2(srq->msrq.max);
flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
xrcdn = 0;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index a6f3eab0f350..85be0de3ab26 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -244,6 +244,7 @@ struct ipoib_cm_tx {
unsigned tx_tail;
unsigned long flags;
u32 mtu;
+ unsigned max_send_sge;
};
struct ipoib_cm_rx_buf {
@@ -390,6 +391,7 @@ struct ipoib_dev_priv {
int hca_caps;
struct ipoib_ethtool_st ethtool;
struct timer_list poll_timer;
+ unsigned max_send_sge;
};
struct ipoib_ah {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 917e46ea3bf6..c8ed53562c9b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -710,6 +710,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_tx_buf *tx_req;
int rc;
+ unsigned usable_sge = tx->max_send_sge - !!skb_headlen(skb);
if (unlikely(skb->len > tx->mtu)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -719,7 +720,23 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
return;
}
-
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ if (skb_linearize(skb) < 0) {
+ ipoib_warn(priv, "skb could not be linearized\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ /* Does skb_linearize return ok without reducing nr_frags? */
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ ipoib_warn(priv, "too many frags after skb linearize\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ }
ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
tx->tx_head, skb->len, tx->qp->qp_num);
@@ -1031,7 +1048,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
struct ib_qp *tx_qp;
if (dev->features & NETIF_F_SG)
- attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+ attr.cap.max_send_sge =
+ min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
tx_qp = ib_create_qp(priv->pd, &attr);
if (PTR_ERR(tx_qp) == -EINVAL) {
@@ -1040,6 +1058,7 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
tx_qp = ib_create_qp(priv->pd, &attr);
}
+ tx->max_send_sge = attr.cap.max_send_sge;
return tx_qp;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index fa9c42ff1fb0..899e6b7fb8a5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -538,6 +538,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_tx_buf *tx_req;
int hlen, rc;
void *phead;
+ unsigned usable_sge = priv->max_send_sge - !!skb_headlen(skb);
if (skb_is_gso(skb)) {
hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -561,6 +562,23 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
phead = NULL;
hlen = 0;
}
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ if (skb_linearize(skb) < 0) {
+ ipoib_warn(priv, "skb could not be linearized\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ /* Does skb_linearize return ok without reducing nr_frags? */
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ ipoib_warn(priv, "too many frags after skb linearize\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ }
ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
skb->len, address, qpn);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index d48c5bae7877..b809c373e40e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -206,7 +206,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
if (dev->features & NETIF_F_SG)
- init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+ init_attr.cap.max_send_sge =
+ min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
@@ -233,6 +234,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->rx_wr.next = NULL;
priv->rx_wr.sg_list = priv->rx_sge;
+ priv->max_send_sge = init_attr.cap.max_send_sge;
+
return 0;
out_free_send_cq:
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 95f0a64e076b..0351059783b1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -458,9 +458,6 @@ struct iser_fr_pool {
* @comp: iser completion context
* @fr_pool: connection fast registration poool
* @pi_support: Indicate device T10-PI support
- * @last: last send wr to signal all flush errors were drained
- * @last_cqe: cqe handler for last wr
- * @last_comp: completes when all connection completions consumed
*/
struct ib_conn {
struct rdma_cm_id *cma_id;
@@ -472,10 +469,7 @@ struct ib_conn {
struct iser_comp *comp;
struct iser_fr_pool fr_pool;
bool pi_support;
- struct ib_send_wr last;
- struct ib_cqe last_cqe;
struct ib_cqe reg_cqe;
- struct completion last_comp;
};
/**
@@ -617,7 +611,6 @@ void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc);
-void iser_last_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_task_rdma_init(struct iscsi_iser_task *task);
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index ed54b388e7ad..81ae2e30dd12 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -729,13 +729,6 @@ void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
kmem_cache_free(ig.desc_cache, desc);
}
-void iser_last_comp(struct ib_cq *cq, struct ib_wc *wc)
-{
- struct ib_conn *ib_conn = wc->qp->qp_context;
-
- complete(&ib_conn->last_comp);
-}
-
void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
{
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 40c0f4978e2f..47e1159c07c2 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -663,7 +663,6 @@ void iser_conn_release(struct iser_conn *iser_conn)
int iser_conn_terminate(struct iser_conn *iser_conn)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
- struct ib_send_wr *bad_wr;
int err = 0;
/* terminate the iser conn only if the conn state is UP */
@@ -688,14 +687,8 @@ int iser_conn_terminate(struct iser_conn *iser_conn)
iser_err("Failed to disconnect, conn: 0x%p err %d\n",
iser_conn, err);
- /* post an indication that all flush errors were consumed */
- err = ib_post_send(ib_conn->qp, &ib_conn->last, &bad_wr);
- if (err) {
- iser_err("conn %p failed to post last wr", ib_conn);
- return 1;
- }
-
- wait_for_completion(&ib_conn->last_comp);
+ /* block until all flush errors are consumed */
+ ib_drain_sq(ib_conn->qp);
}
return 1;
@@ -954,10 +947,6 @@ void iser_conn_init(struct iser_conn *iser_conn)
ib_conn->post_recv_buf_count = 0;
ib_conn->reg_cqe.done = iser_reg_comp;
- ib_conn->last_cqe.done = iser_last_comp;
- ib_conn->last.wr_cqe = &ib_conn->last_cqe;
- ib_conn->last.opcode = IB_WR_SEND;
- init_completion(&ib_conn->last_comp);
}
/**
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 03022f6420d7..b6bf20496021 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -446,49 +446,17 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
dev->max_pages_per_mr);
}
-static void srp_drain_done(struct ib_cq *cq, struct ib_wc *wc)
-{
- struct srp_rdma_ch *ch = cq->cq_context;
-
- complete(&ch->done);
-}
-
-static struct ib_cqe srp_drain_cqe = {
- .done = srp_drain_done,
-};
-
/**
* srp_destroy_qp() - destroy an RDMA queue pair
* @ch: SRP RDMA channel.
*
- * Change a queue pair into the error state and wait until all receive
- * completions have been processed before destroying it. This avoids that
- * the receive completion handler can access the queue pair while it is
+ * Drain the qp before destroying it. This avoids that the receive
+ * completion handler can access the queue pair while it is
* being destroyed.
*/
static void srp_destroy_qp(struct srp_rdma_ch *ch)
{
- static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
- static struct ib_recv_wr wr = { 0 };
- struct ib_recv_wr *bad_wr;
- int ret;
-
- wr.wr_cqe = &srp_drain_cqe;
- /* Destroying a QP and reusing ch->done is only safe if not connected */
- WARN_ON_ONCE(ch->connected);
-
- ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
- WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
- if (ret)
- goto out;
-
- init_completion(&ch->done);
- ret = ib_post_recv(ch->qp, &wr, &bad_wr);
- WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
- if (ret == 0)
- wait_for_completion(&ch->done);
-
-out:
+ ib_drain_rq(ch->qp);
ib_destroy_qp(ch->qp);
}
@@ -508,7 +476,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
if (!init_attr)
return -ENOMEM;
- /* queue_size + 1 for ib_drain_qp */
+ /* queue_size + 1 for ib_drain_rq() */
recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
ch->comp_vector, IB_POLL_SOFTIRQ);
if (IS_ERR(recv_cq)) {
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 0ff049bd9ad4..37dd534cbeab 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -424,11 +424,11 @@ typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent,
/**
* ib_mad_snoop_handler - Callback handler for snooping sent MADs.
* @mad_agent: MAD agent that snooped the MAD.
- * @send_wr: Work request information on the sent MAD.
+ * @send_buf: send MAD data buffer.
* @mad_send_wc: Work completion information on the sent MAD. Valid
* only for snooping that occurs on a send completion.
*
- * Clients snooping MADs should not modify data referenced by the @send_wr
+ * Clients snooping MADs should not modify data referenced by the @send_buf
* or @mad_send_wc.
*/
typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 284b00c8fea4..68b7e978a27d 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1846,6 +1846,8 @@ struct ib_device {
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
+ void (*drain_rq)(struct ib_qp *qp);
+ void (*drain_sq)(struct ib_qp *qp);
struct ib_dma_mapping_ops *dma_ops;
@@ -3094,4 +3096,7 @@ int ib_sg_to_pages(struct ib_mr *mr,
int sg_nents,
int (*set_page)(struct ib_mr *, u64));
+void ib_drain_rq(struct ib_qp *qp);
+void ib_drain_sq(struct ib_qp *qp);
+void ib_drain_qp(struct ib_qp *qp);
#endif /* IB_VERBS_H */
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 52b4a2f993f2..1852e383afd6 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -109,14 +109,13 @@ struct p9_trans_rdma {
/**
* p9_rdma_context - Keeps track of in-process WR
*
- * @wc_op: The original WR op for when the CQE completes in error.
* @busa: Bus address to unmap when the WR completes
* @req: Keeps track of requests (send)
* @rc: Keepts track of replies (receive)
*/
struct p9_rdma_req;
struct p9_rdma_context {
- enum ib_wc_opcode wc_op;
+ struct ib_cqe cqe;
dma_addr_t busa;
union {
struct p9_req_t *req;
@@ -284,9 +283,12 @@ p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
}
static void
-handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
- struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
+recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
+ struct p9_client *client = cq->cq_context;
+ struct p9_trans_rdma *rdma = client->trans;
+ struct p9_rdma_context *c =
+ container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
struct p9_req_t *req;
int err = 0;
int16_t tag;
@@ -295,7 +297,7 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
DMA_FROM_DEVICE);
- if (status != IB_WC_SUCCESS)
+ if (wc->status != IB_WC_SUCCESS)
goto err_out;
err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
@@ -316,21 +318,32 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
req->rc = c->rc;
p9_client_cb(client, req, REQ_STATUS_RCVD);
+ out:
+ up(&rdma->rq_sem);
+ kfree(c);
return;
err_out:
- p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n", req, err, status);
+ p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n",
+ req, err, wc->status);
rdma->state = P9_RDMA_FLUSHING;
client->status = Disconnected;
+ goto out;
}
static void
-handle_send(struct p9_client *client, struct p9_trans_rdma *rdma,
- struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
+send_done(struct ib_cq *cq, struct ib_wc *wc)
{
+ struct p9_client *client = cq->cq_context;
+ struct p9_trans_rdma *rdma = client->trans;
+ struct p9_rdma_context *c =
+ container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
+
ib_dma_unmap_single(rdma->cm_id->device,
c->busa, c->req->tc->size,
DMA_TO_DEVICE);
+ up(&rdma->sq_sem);
+ kfree(c);
}
static void qp_event_handler(struct ib_event *event, void *context)
@@ -339,42 +352,6 @@ static void qp_event_handler(struct ib_event *event, void *context)
event->event, context);
}
-static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
-{
- struct p9_client *client = cq_context;
- struct p9_trans_rdma *rdma = client->trans;
- int ret;
- struct ib_wc wc;
-
- ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
- while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
- struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id;
-
- switch (c->wc_op) {
- case IB_WC_RECV:
- handle_recv(client, rdma, c, wc.status, wc.byte_len);
- up(&rdma->rq_sem);
- break;
-
- case IB_WC_SEND:
- handle_send(client, rdma, c, wc.status, wc.byte_len);
- up(&rdma->sq_sem);
- break;
-
- default:
- pr_err("unexpected completion type, c->wc_op=%d, wc.opcode=%d, status=%d\n",
- c->wc_op, wc.opcode, wc.status);
- break;
- }
- kfree(c);
- }
-}
-
-static void cq_event_handler(struct ib_event *e, void *v)
-{
- p9_debug(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v);
-}
-
static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
{
if (!rdma)
@@ -387,7 +364,7 @@ static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
ib_dealloc_pd(rdma->pd);
if (rdma->cq && !IS_ERR(rdma->cq))
- ib_destroy_cq(rdma->cq);
+ ib_free_cq(rdma->cq);
if (rdma->cm_id && !IS_ERR(rdma->cm_id))
rdma_destroy_id(rdma->cm_id);
@@ -408,13 +385,14 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
goto error;
+ c->cqe.done = recv_done;
+
sge.addr = c->busa;
sge.length = client->msize;
sge.lkey = rdma->pd->local_dma_lkey;
wr.next = NULL;
- c->wc_op = IB_WC_RECV;
- wr.wr_id = (unsigned long) c;
+ wr.wr_cqe = &c->cqe;
wr.sg_list = &sge;
wr.num_sge = 1;
return ib_post_recv(rdma->qp, &wr, &bad_wr);
@@ -499,13 +477,14 @@ dont_need_post_recv:
goto send_error;
}
+ c->cqe.done = send_done;
+
sge.addr = c->busa;
sge.length = c->req->tc->size;
sge.lkey = rdma->pd->local_dma_lkey;
wr.next = NULL;
- c->wc_op = IB_WC_SEND;
- wr.wr_id = (unsigned long) c;
+ wr.wr_cqe = &c->cqe;
wr.opcode = IB_WR_SEND;
wr.send_flags = IB_SEND_SIGNALED;
wr.sg_list = &sge;
@@ -642,7 +621,6 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
struct p9_trans_rdma *rdma;
struct rdma_conn_param conn_param;
struct ib_qp_init_attr qp_attr;
- struct ib_cq_init_attr cq_attr = {};
/* Parse the transport specific mount options */
err = parse_opts(args, &opts);
@@ -695,13 +673,11 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
goto error;
/* Create the Completion Queue */
- cq_attr.cqe = opts.sq_depth + opts.rq_depth + 1;
- rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler,
- cq_event_handler, client,
- &cq_attr);
+ rdma->cq = ib_alloc_cq(rdma->cm_id->device, client,
+ opts.sq_depth + opts.rq_depth + 1,
+ 0, IB_POLL_SOFTIRQ);
if (IS_ERR(rdma->cq))
goto error;
- ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
/* Create the Protection Domain */
rdma->pd = ib_alloc_pd(rdma->cm_id->device);