Merge branch 'k.o/for-rc' into k.o/wip/dl-for-next

There is a 14 patch series waiting to come into for-next that has a
dependecy on code submitted into this kernel's for-rc series.  So, merge
the for-rc branch into the current for-next in order to make the patch
series apply cleanly.

Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Doug Ledford 2018-02-22 22:27:20 -05:00
commit 4bb46608ed
23 changed files with 242 additions and 115 deletions

View file

@ -305,16 +305,21 @@ void nldev_exit(void);
static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
struct ib_pd *pd,
struct ib_qp_init_attr *attr,
struct ib_udata *udata)
struct ib_udata *udata,
struct ib_uobject *uobj)
{
struct ib_qp *qp;
if (!dev->create_qp)
return ERR_PTR(-EOPNOTSUPP);
qp = dev->create_qp(pd, attr, udata);
if (IS_ERR(qp))
return qp;
qp->device = dev;
qp->pd = pd;
qp->uobject = uobj;
/*
* We don't track XRC QPs for now, because they don't have PD
* and more importantly they are created internaly by driver,

View file

@ -141,7 +141,12 @@ static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
*/
uobj->context = context;
uobj->type = type;
atomic_set(&uobj->usecnt, 0);
/*
* Allocated objects start out as write locked to deny any other
* syscalls from accessing them until they are committed. See
* rdma_alloc_commit_uobject
*/
atomic_set(&uobj->usecnt, -1);
kref_init(&uobj->ref);
return uobj;
@ -196,7 +201,15 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t
goto free;
}
uverbs_uobject_get(uobj);
/*
* The idr_find is guaranteed to return a pointer to something that
* isn't freed yet, or NULL, as the free after idr_remove goes through
* kfree_rcu(). However the object may still have been released and
* kfree() could be called at any time.
*/
if (!kref_get_unless_zero(&uobj->ref))
uobj = ERR_PTR(-ENOENT);
free:
rcu_read_unlock();
return uobj;
@ -399,13 +412,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
return ret;
}
static void lockdep_check(struct ib_uobject *uobj, bool exclusive)
static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
{
#ifdef CONFIG_LOCKDEP
if (exclusive)
WARN_ON(atomic_read(&uobj->usecnt) > 0);
WARN_ON(atomic_read(&uobj->usecnt) != -1);
else
WARN_ON(atomic_read(&uobj->usecnt) == -1);
WARN_ON(atomic_read(&uobj->usecnt) <= 0);
#endif
}
@ -444,7 +457,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
return 0;
}
lockdep_check(uobj, true);
assert_uverbs_usecnt(uobj, true);
ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
up_read(&ucontext->cleanup_rwsem);
@ -474,16 +487,17 @@ int rdma_explicit_destroy(struct ib_uobject *uobject)
WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
return 0;
}
lockdep_check(uobject, true);
assert_uverbs_usecnt(uobject, true);
ret = uobject->type->type_class->remove_commit(uobject,
RDMA_REMOVE_DESTROY);
if (ret)
return ret;
goto out;
uobject->type = &null_obj_type;
out:
up_read(&ucontext->cleanup_rwsem);
return 0;
return ret;
}
static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
@ -527,6 +541,10 @@ int rdma_alloc_commit_uobject(struct ib_uobject *uobj)
return ret;
}
/* matches atomic_set(-1) in alloc_uobj */
assert_uverbs_usecnt(uobj, true);
atomic_set(&uobj->usecnt, 0);
uobj->type->type_class->alloc_commit(uobj);
up_read(&uobj->context->cleanup_rwsem);
@ -561,7 +579,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
{
lockdep_check(uobj, exclusive);
assert_uverbs_usecnt(uobj, exclusive);
uobj->type->type_class->lookup_put(uobj, exclusive);
/*
* In order to unlock an object, either decrease its usecnt for

View file

@ -7,7 +7,6 @@
#include <rdma/restrack.h>
#include <linux/mutex.h>
#include <linux/sched/task.h>
#include <linux/uaccess.h>
#include <linux/pid_namespace.h>
void rdma_restrack_init(struct rdma_restrack_root *res)
@ -63,7 +62,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
{
enum rdma_restrack_type type = res->type;
struct ib_device *dev;
struct ib_xrcd *xrcd;
struct ib_pd *pd;
struct ib_cq *cq;
struct ib_qp *qp;
@ -81,10 +79,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
qp = container_of(res, struct ib_qp, res);
dev = qp->device;
break;
case RDMA_RESTRACK_XRCD:
xrcd = container_of(res, struct ib_xrcd, res);
dev = xrcd->device;
break;
default:
WARN_ONCE(true, "Wrong resource tracking type %u\n", type);
return NULL;
@ -93,6 +87,21 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
return dev;
}
static bool res_is_user(struct rdma_restrack_entry *res)
{
switch (res->type) {
case RDMA_RESTRACK_PD:
return container_of(res, struct ib_pd, res)->uobject;
case RDMA_RESTRACK_CQ:
return container_of(res, struct ib_cq, res)->uobject;
case RDMA_RESTRACK_QP:
return container_of(res, struct ib_qp, res)->uobject;
default:
WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type);
return false;
}
}
void rdma_restrack_add(struct rdma_restrack_entry *res)
{
struct ib_device *dev = res_to_dev(res);
@ -100,7 +109,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
if (!dev)
return;
if (!uaccess_kernel()) {
if (res_is_user(res)) {
get_task_struct(current);
res->task = current;
res->kern_name = NULL;

View file

@ -562,9 +562,10 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
if (f.file)
fdput(f);
mutex_unlock(&file->device->xrcd_tree_mutex);
uobj_alloc_commit(&obj->uobject);
mutex_unlock(&file->device->xrcd_tree_mutex);
return in_len;
err_copy:
@ -603,10 +604,8 @@ ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle,
file->ucontext);
if (IS_ERR(uobj)) {
mutex_unlock(&file->device->xrcd_tree_mutex);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
}
ret = uobj_remove_commit(uobj);
return ret ?: in_len;
@ -979,6 +978,9 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
struct ib_uverbs_ex_create_cq_resp resp;
struct ib_cq_init_attr attr = {};
if (!ib_dev->create_cq)
return ERR_PTR(-EOPNOTSUPP);
if (cmd->comp_vector >= file->device->num_comp_vectors)
return ERR_PTR(-EINVAL);
@ -1030,14 +1032,14 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
resp.response_length = offsetof(typeof(resp), response_length) +
sizeof(resp.response_length);
cq->res.type = RDMA_RESTRACK_CQ;
rdma_restrack_add(&cq->res);
ret = cb(file, obj, &resp, ucore, context);
if (ret)
goto err_cb;
uobj_alloc_commit(&obj->uobject);
cq->res.type = RDMA_RESTRACK_CQ;
rdma_restrack_add(&cq->res);
return obj;
err_cb:
@ -1518,7 +1520,8 @@ static int create_qp(struct ib_uverbs_file *file,
if (cmd->qp_type == IB_QPT_XRC_TGT)
qp = ib_create_qp(pd, &attr);
else
qp = _ib_create_qp(device, pd, &attr, uhw);
qp = _ib_create_qp(device, pd, &attr, uhw,
&obj->uevent.uobject);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
@ -1550,8 +1553,10 @@ static int create_qp(struct ib_uverbs_file *file,
atomic_inc(&attr.srq->usecnt);
if (ind_tbl)
atomic_inc(&ind_tbl->usecnt);
} else {
/* It is done in _ib_create_qp for other QP types */
qp->uobject = &obj->uevent.uobject;
}
qp->uobject = &obj->uevent.uobject;
obj->uevent.uobject.object = qp;
@ -1971,8 +1976,15 @@ static int modify_qp(struct ib_uverbs_file *file,
goto release_qp;
}
if ((cmd->base.attr_mask & IB_QP_AV) &&
!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
ret = -EINVAL;
goto release_qp;
}
if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
!rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) {
(!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
!rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) {
ret = -EINVAL;
goto release_qp;
}
@ -2941,6 +2953,11 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
wq_init_attr.create_flags = cmd.create_flags;
obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list);
if (!pd->device->create_wq) {
err = -EOPNOTSUPP;
goto err_put_cq;
}
wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
if (IS_ERR(wq)) {
err = PTR_ERR(wq);
@ -3084,7 +3101,12 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
wq_attr.flags = cmd.flags;
wq_attr.flags_mask = cmd.flags_mask;
}
if (!wq->device->modify_wq) {
ret = -EOPNOTSUPP;
goto out;
}
ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
out:
uobj_put_obj_read(wq);
return ret;
}
@ -3181,6 +3203,11 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
init_attr.ind_tbl = wqs;
if (!ib_dev->create_rwq_ind_table) {
err = -EOPNOTSUPP;
goto err_uobj;
}
rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);
if (IS_ERR(rwq_ind_tbl)) {
@ -3770,6 +3797,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
struct ib_device_attr attr = {0};
int err;
if (!ib_dev->query_device)
return -EOPNOTSUPP;
if (ucore->inlen < sizeof(cmd))
return -EINVAL;

View file

@ -59,6 +59,9 @@ static int uverbs_process_attr(struct ib_device *ibdev,
return 0;
}
if (test_bit(attr_id, attr_bundle_h->valid_bitmap))
return -EINVAL;
spec = &attr_spec_bucket->attrs[attr_id];
e = &elements[attr_id];
e->uattr = uattr_ptr;

View file

@ -114,6 +114,7 @@ static size_t get_elements_above_id(const void **iters,
short min = SHRT_MAX;
const void *elem;
int i, j, last_stored = -1;
unsigned int equal_min = 0;
for_each_element(elem, i, j, elements, num_elements, num_offset,
data_offset) {
@ -136,6 +137,10 @@ static size_t get_elements_above_id(const void **iters,
*/
iters[last_stored == i ? num_iters - 1 : num_iters++] = elem;
last_stored = i;
if (min == GET_ID(id))
equal_min++;
else
equal_min = 1;
min = GET_ID(id);
}
@ -146,15 +151,10 @@ static size_t get_elements_above_id(const void **iters,
* Therefore, we need to clean the beginning of the array to make sure
* all ids of final elements are equal to min.
*/
for (i = num_iters - 1; i >= 0 &&
GET_ID(*(u16 *)(iters[i] + id_offset)) == min; i--)
;
num_iters -= i + 1;
memmove(iters, iters + i + 1, sizeof(*iters) * num_iters);
memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min);
*min_id = min;
return num_iters;
return equal_min;
}
#define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
@ -322,7 +322,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
hash = kzalloc(sizeof(*hash) +
ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1),
sizeof(long)) +
BITS_TO_LONGS(attr_max_bucket) * sizeof(long),
BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long),
GFP_KERNEL);
if (!hash) {
res = -ENOMEM;
@ -509,7 +509,7 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_
* first handler which != NULL. This also defines the
* set of flags used for this handler.
*/
for (i = num_object_defs - 1;
for (i = num_method_defs - 1;
i >= 0 && !method_defs[i]->handler; i--)
;
hash->methods[min_id++] = method;

View file

@ -650,12 +650,21 @@ static int verify_command_mask(struct ib_device *ib_dev, __u32 command)
return -1;
}
static bool verify_command_idx(u32 command, bool extended)
{
if (extended)
return command < ARRAY_SIZE(uverbs_ex_cmd_table);
return command < ARRAY_SIZE(uverbs_cmd_table);
}
static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct ib_uverbs_file *file = filp->private_data;
struct ib_device *ib_dev;
struct ib_uverbs_cmd_hdr hdr;
bool extended_command;
__u32 command;
__u32 flags;
int srcu_key;
@ -688,6 +697,15 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
}
command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
flags = (hdr.command &
IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
extended_command = flags & IB_USER_VERBS_CMD_FLAG_EXTENDED;
if (!verify_command_idx(command, extended_command)) {
ret = -EINVAL;
goto out;
}
if (verify_command_mask(ib_dev, command)) {
ret = -EOPNOTSUPP;
goto out;
@ -699,12 +717,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
goto out;
}
flags = (hdr.command &
IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
if (!flags) {
if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
!uverbs_cmd_table[command]) {
if (!uverbs_cmd_table[command]) {
ret = -EINVAL;
goto out;
}
@ -725,8 +739,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
struct ib_udata uhw;
size_t written_count = count;
if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
!uverbs_ex_cmd_table[command]) {
if (!uverbs_ex_cmd_table[command]) {
ret = -ENOSYS;
goto out;
}
@ -942,6 +955,7 @@ static const struct file_operations uverbs_fops = {
.llseek = no_llseek,
#if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
.unlocked_ioctl = ib_uverbs_ioctl,
.compat_ioctl = ib_uverbs_ioctl,
#endif
};
@ -954,6 +968,7 @@ static const struct file_operations uverbs_mmap_fops = {
.llseek = no_llseek,
#if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
.unlocked_ioctl = ib_uverbs_ioctl,
.compat_ioctl = ib_uverbs_ioctl,
#endif
};

View file

@ -234,15 +234,18 @@ static void create_udata(struct uverbs_attr_bundle *ctx,
uverbs_attr_get(ctx, UVERBS_UHW_OUT);
if (!IS_ERR(uhw_in)) {
udata->inbuf = uhw_in->ptr_attr.ptr;
udata->inlen = uhw_in->ptr_attr.len;
if (uverbs_attr_ptr_is_inline(uhw_in))
udata->inbuf = &uhw_in->uattr->data;
else
udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);
} else {
udata->inbuf = NULL;
udata->inlen = 0;
}
if (!IS_ERR(uhw_out)) {
udata->outbuf = uhw_out->ptr_attr.ptr;
udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);
udata->outlen = uhw_out->ptr_attr.len;
} else {
udata->outbuf = NULL;
@ -323,7 +326,8 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev,
cq->res.type = RDMA_RESTRACK_CQ;
rdma_restrack_add(&cq->res);
ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe);
ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe,
sizeof(cq->cqe));
if (ret)
goto err_cq;
@ -375,7 +379,7 @@ static int uverbs_destroy_cq_handler(struct ib_device *ib_dev,
resp.comp_events_reported = obj->comp_events_reported;
resp.async_events_reported = obj->async_events_reported;
return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp);
return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp, sizeof(resp));
}
static DECLARE_UVERBS_METHOD(

View file

@ -887,7 +887,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (qp_init_attr->cap.max_rdma_ctxs)
rdma_rw_init_qp(device, qp_init_attr);
qp = _ib_create_qp(device, pd, qp_init_attr, NULL);
qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);
if (IS_ERR(qp))
return qp;
@ -898,7 +898,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
}
qp->real_qp = qp;
qp->uobject = NULL;
qp->qp_type = qp_init_attr->qp_type;
qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;

View file

@ -120,7 +120,6 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5
#define BNXT_RE_FLAG_TASK_IN_PROG 6
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;
@ -158,6 +157,7 @@ struct bnxt_re_dev {
atomic_t srq_count;
atomic_t mr_count;
atomic_t mw_count;
atomic_t sched_count;
/* Max of 2 lossless traffic class supported per port */
u16 cosq[2];

View file

@ -174,10 +174,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_pd = dev_attr->max_pd;
ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
if (dev_attr->is_atomic) {
ib_attr->atomic_cap = IB_ATOMIC_HCA;
ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
}
ib_attr->atomic_cap = IB_ATOMIC_NONE;
ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
ib_attr->max_ee_rd_atom = 0;
ib_attr->max_res_rd_atom = 0;
@ -787,20 +785,51 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
return 0;
}
static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
{
unsigned long flags;
spin_lock_irqsave(&qp->scq->cq_lock, flags);
if (qp->rcq != qp->scq)
spin_lock(&qp->rcq->cq_lock);
else
__acquire(&qp->rcq->cq_lock);
return flags;
}
static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
unsigned long flags)
__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
{
if (qp->rcq != qp->scq)
spin_unlock(&qp->rcq->cq_lock);
else
__release(&qp->rcq->cq_lock);
spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
}
/* Queue Pairs */
int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev;
int rc;
unsigned int flags;
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
bnxt_qplib_del_flush_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
return rc;
}
flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_clean_qp(&qp->qplib_qp);
bnxt_re_unlock_cqs(qp, flags);
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
&rdev->sqp_ah->qplib_ah);
@ -810,7 +839,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
return rc;
}
bnxt_qplib_del_flush_qp(&qp->qplib_qp);
bnxt_qplib_clean_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
&rdev->qp1_sqp->qplib_qp);
if (rc) {
@ -1069,6 +1098,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
goto fail;
}
qp->qplib_qp.scq = &cq->qplib_cq;
qp->scq = cq;
}
if (qp_init_attr->recv_cq) {
@ -1080,6 +1110,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
goto fail;
}
qp->qplib_qp.rcq = &cq->qplib_cq;
qp->rcq = cq;
}
if (qp_init_attr->srq) {
@ -1185,7 +1216,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
goto fail;
goto free_umem;
}
}
@ -1213,6 +1244,13 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
return &qp->ib_qp;
qp_destroy:
bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
free_umem:
if (udata) {
if (qp->rumem)
ib_umem_release(qp->rumem);
if (qp->sumem)
ib_umem_release(qp->sumem);
}
fail:
kfree(qp);
return ERR_PTR(rc);
@ -1603,7 +1641,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
dev_dbg(rdev_to_dev(rdev),
"Move QP = %p out of flush list\n",
qp);
bnxt_qplib_del_flush_qp(&qp->qplib_qp);
bnxt_qplib_clean_qp(&qp->qplib_qp);
}
}
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {

View file

@ -89,6 +89,8 @@ struct bnxt_re_qp {
/* QP1 */
u32 send_psn;
struct ib_ud_header qp1_hdr;
struct bnxt_re_cq *scq;
struct bnxt_re_cq *rcq;
};
struct bnxt_re_cq {

View file

@ -656,7 +656,6 @@ static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
mutex_unlock(&bnxt_re_dev_lock);
synchronize_rcu();
flush_workqueue(bnxt_re_wq);
ib_dealloc_device(&rdev->ibdev);
/* rdev is gone */
@ -1441,7 +1440,7 @@ static void bnxt_re_task(struct work_struct *work)
break;
}
smp_mb__before_atomic();
clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
atomic_dec(&rdev->sched_count);
kfree(re_work);
}
@ -1503,7 +1502,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
/* netdev notifier will call NETDEV_UNREGISTER again later since
* we are still holding the reference to the netdev
*/
if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags))
if (atomic_read(&rdev->sched_count) > 0)
goto exit;
bnxt_re_ib_unreg(rdev, false);
bnxt_re_remove_one(rdev);
@ -1523,7 +1522,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
re_work->vlan_dev = (real_dev == netdev ?
NULL : netdev);
INIT_WORK(&re_work->work, bnxt_re_task);
set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
atomic_inc(&rdev->sched_count);
queue_work(bnxt_re_wq, &re_work->work);
}
}
@ -1578,6 +1577,11 @@ static void __exit bnxt_re_mod_exit(void)
*/
list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
dev_info(rdev_to_dev(rdev), "Unregistering Device");
/*
* Flush out any scheduled tasks before destroying the
* resources
*/
flush_workqueue(bnxt_re_wq);
bnxt_re_dev_stop(rdev);
bnxt_re_ib_unreg(rdev, true);
bnxt_re_remove_one(rdev);

View file

@ -173,7 +173,7 @@ static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
}
}
void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
{
unsigned long flags;
@ -1419,7 +1419,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_qp req;
struct creq_destroy_qp_resp resp;
unsigned long flags;
u16 cmd_flags = 0;
int rc;
@ -1437,19 +1436,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
return rc;
}
/* Must walk the associated CQs to nullified the QP ptr */
spin_lock_irqsave(&qp->scq->hwq.lock, flags);
__clean_cq(qp->scq, (u64)(unsigned long)qp);
if (qp->rcq && qp->rcq != qp->scq) {
spin_lock(&qp->rcq->hwq.lock);
__clean_cq(qp->rcq, (u64)(unsigned long)qp);
spin_unlock(&qp->rcq->hwq.lock);
}
spin_unlock_irqrestore(&qp->scq->hwq.lock, flags);
return 0;
}
void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp)
{
bnxt_qplib_free_qp_hdr_buf(res, qp);
bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
kfree(qp->sq.swq);
@ -1462,7 +1454,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
if (qp->orrq.max_elements)
bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
return 0;
}
void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,

View file

@ -478,6 +478,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp);
void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_sge *sge);
void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
@ -500,7 +503,6 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
unsigned long *flags);
void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,

View file

@ -52,18 +52,6 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
/* Device */
static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
{
int rc;
u16 pcie_ctl2;
rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2,
&pcie_ctl2);
if (rc)
return false;
return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
}
static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
char *fw_ver)
{
@ -165,7 +153,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
}
attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
attr->is_atomic = 0;
bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc;

View file

@ -114,6 +114,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
struct pvrdma_create_cq_resp cq_resp = {0};
struct pvrdma_create_cq ucmd;
BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
@ -197,6 +198,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
cq->ibcq.cqe = resp->cqe;
cq->cq_handle = resp->cq_handle;
cq_resp.cqn = resp->cq_handle;
spin_lock_irqsave(&dev->cq_tbl_lock, flags);
dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
@ -205,7 +207,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
cq->uar = &(to_vucontext(context)->uar);
/* Copy udata back. */
if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) {
if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
dev_warn(&dev->pdev->dev,
"failed to copy back udata\n");
pvrdma_destroy_cq(&cq->ibcq);

View file

@ -113,6 +113,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
struct pvrdma_create_srq_resp srq_resp = {0};
struct pvrdma_create_srq ucmd;
unsigned long flags;
int ret;
@ -204,12 +205,13 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
}
srq->srq_handle = resp->srqn;
srq_resp.srqn = resp->srqn;
spin_lock_irqsave(&dev->srq_tbl_lock, flags);
dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
/* Copy udata back. */
if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) {
if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
pvrdma_destroy_srq(&srq->ibsrq);
return ERR_PTR(-EINVAL);

View file

@ -447,6 +447,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
struct pvrdma_alloc_pd_resp pd_resp = {0};
int ret;
void *ptr;
@ -475,9 +476,10 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
pd->privileged = !context;
pd->pd_handle = resp->pd_handle;
pd->pdn = resp->pd_handle;
pd_resp.pdn = resp->pd_handle;
if (context) {
if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {
dev_warn(&dev->pdev->dev,
"failed to copy back protection domain\n");
pvrdma_dealloc_pd(&pd->ibpd);

View file

@ -281,8 +281,6 @@ void ipoib_delete_debug_files(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n");
WARN_ONCE(!priv->path_dentry, "null path debug file\n");
debugfs_remove(priv->mcg_dentry);
debugfs_remove(priv->path_dentry);
priv->mcg_dentry = priv->path_dentry = NULL;

View file

@ -28,10 +28,6 @@ enum rdma_restrack_type {
* @RDMA_RESTRACK_QP: Queue pair (QP)
*/
RDMA_RESTRACK_QP,
/**
* @RDMA_RESTRACK_XRCD: XRC domain (XRCD)
*/
RDMA_RESTRACK_XRCD,
/**
* @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
*/

View file

@ -276,10 +276,7 @@ struct uverbs_object_tree_def {
*/
struct uverbs_ptr_attr {
union {
u64 data;
void __user *ptr;
};
u64 data;
u16 len;
/* Combination of bits from enum UVERBS_ATTR_F_XXXX */
u16 flags;
@ -351,38 +348,60 @@ static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr
}
static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
size_t idx, const void *from)
size_t idx, const void *from, size_t size)
{
const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
u16 flags;
size_t min_size;
if (IS_ERR(attr))
return PTR_ERR(attr);
min_size = min_t(size_t, attr->ptr_attr.len, size);
if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
return -EFAULT;
flags = attr->ptr_attr.flags | UVERBS_ATTR_F_VALID_OUTPUT;
return (!copy_to_user(attr->ptr_attr.ptr, from, attr->ptr_attr.len) &&
!put_user(flags, &attr->uattr->flags)) ? 0 : -EFAULT;
if (put_user(flags, &attr->uattr->flags))
return -EFAULT;
return 0;
}
static inline int _uverbs_copy_from(void *to, size_t to_size,
static inline bool uverbs_attr_ptr_is_inline(const struct uverbs_attr *attr)
{
return attr->ptr_attr.len <= sizeof(attr->ptr_attr.data);
}
static inline int _uverbs_copy_from(void *to,
const struct uverbs_attr_bundle *attrs_bundle,
size_t idx)
size_t idx,
size_t size)
{
const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
if (IS_ERR(attr))
return PTR_ERR(attr);
if (to_size <= sizeof(((struct ib_uverbs_attr *)0)->data))
/*
* Validation ensures attr->ptr_attr.len >= size. If the caller is
* using UVERBS_ATTR_SPEC_F_MIN_SZ then it must call copy_from with
* the right size.
*/
if (unlikely(size < attr->ptr_attr.len))
return -EINVAL;
if (uverbs_attr_ptr_is_inline(attr))
memcpy(to, &attr->ptr_attr.data, attr->ptr_attr.len);
else if (copy_from_user(to, attr->ptr_attr.ptr, attr->ptr_attr.len))
else if (copy_from_user(to, u64_to_user_ptr(attr->ptr_attr.data),
attr->ptr_attr.len))
return -EFAULT;
return 0;
}
#define uverbs_copy_from(to, attrs_bundle, idx) \
_uverbs_copy_from(to, sizeof(*(to)), attrs_bundle, idx)
_uverbs_copy_from(to, attrs_bundle, idx, sizeof(*to))
/* =================================================
* Definitions -> Specs infrastructure

View file

@ -65,7 +65,7 @@ struct ib_uverbs_attr {
__u16 len; /* only for pointers */
__u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
__u16 reserved;
__u64 data; /* ptr to command, inline data or idr/fd */
__aligned_u64 data; /* ptr to command, inline data or idr/fd */
};
struct ib_uverbs_ioctl_hdr {
@ -73,7 +73,7 @@ struct ib_uverbs_ioctl_hdr {
__u16 object_id;
__u16 method_id;
__u16 num_attrs;
__u64 reserved;
__aligned_u64 reserved;
struct ib_uverbs_attr attrs[0];
};