IB/core: Change idr objects to use the new schema

This changes only the handlers which deals with idr based objects to
use the new idr allocation, fetching and destruction schema.
This patch consists of the following changes:
(1) Allocation, fetching and destruction is done via idr ops.
(2) Context initializing and release is done through
    uverbs_initialize_ucontext and uverbs_cleanup_ucontext.
(3) Ditching the live flag. Mostly, this is pretty straight
    forward. The only place that is a bit trickier is in
    ib_uverbs_open_qp. Commit [1] added code to check whether
    the uobject is already live and initialized. This mostly
    happens because of a race between open_qp and events.
    We delayed assigning the uobject's pointer in order to
    eliminate this race without using the live variable.

[1] commit a040f95dc8
	("IB/core: Fix XRC race condition in ib_uverbs_open_qp")

Signed-off-by: Matan Barak <matanb@mellanox.com>
Reviewed-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Matan Barak 2017-04-04 13:31:44 +03:00 committed by Doug Ledford
parent 6be60aed12
commit fd3c7904db
6 changed files with 403 additions and 1145 deletions

View File

@ -52,4 +52,19 @@
void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed); void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed);
void uverbs_initialize_ucontext(struct ib_ucontext *ucontext); void uverbs_initialize_ucontext(struct ib_ucontext *ucontext);
/*
* uverbs_uobject_get is called in order to increase the reference count on
* an uobject. This is useful when a handler wants to keep the uobject's memory
* alive, regardless if this uobject is still alive in the context's objects
* repository. Objects are put via uverbs_uobject_put.
*/
void uverbs_uobject_get(struct ib_uobject *uobject);
/*
* In order to indicate we no longer needs this uobject, uverbs_uobject_put
* is called. When the reference count is decreased, the uobject is freed.
* For example, this is used when attaching a completion channel to a CQ.
*/
void uverbs_uobject_put(struct ib_uobject *uobject);
#endif /* RDMA_CORE_H */ #endif /* RDMA_CORE_H */

View File

@ -180,8 +180,6 @@ struct ib_ucq_object {
u32 async_events_reported; u32 async_events_reported;
}; };
void idr_remove_uobj(struct ib_uobject *uobj);
struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
struct ib_device *ib_dev, struct ib_device *ib_dev,
int is_async); int is_async);

File diff suppressed because it is too large Load Diff

View File

@ -52,6 +52,7 @@
#include "uverbs.h" #include "uverbs.h"
#include "core_priv.h" #include "core_priv.h"
#include "rdma_core.h"
MODULE_AUTHOR("Roland Dreier"); MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand userspace verbs access"); MODULE_DESCRIPTION("InfiniBand userspace verbs access");
@ -214,140 +215,11 @@ void ib_uverbs_detach_umcast(struct ib_qp *qp,
} }
static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
struct ib_ucontext *context) struct ib_ucontext *context,
bool device_removed)
{ {
struct ib_uobject *uobj, *tmp;
context->closing = 1; context->closing = 1;
uverbs_cleanup_ucontext(context, device_removed);
list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) {
struct ib_ah *ah = uobj->object;
idr_remove_uobj(uobj);
ib_destroy_ah(ah);
ib_rdmacg_uncharge(&uobj->cg_obj, context->device,
RDMACG_RESOURCE_HCA_OBJECT);
kfree(uobj);
}
/* Remove MWs before QPs, in order to support type 2A MWs. */
list_for_each_entry_safe(uobj, tmp, &context->mw_list, list) {
struct ib_mw *mw = uobj->object;
idr_remove_uobj(uobj);
uverbs_dealloc_mw(mw);
ib_rdmacg_uncharge(&uobj->cg_obj, context->device,
RDMACG_RESOURCE_HCA_OBJECT);
kfree(uobj);
}
list_for_each_entry_safe(uobj, tmp, &context->rule_list, list) {
struct ib_flow *flow_id = uobj->object;
idr_remove_uobj(uobj);
ib_destroy_flow(flow_id);
ib_rdmacg_uncharge(&uobj->cg_obj, context->device,
RDMACG_RESOURCE_HCA_OBJECT);
kfree(uobj);
}
list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) {
struct ib_qp *qp = uobj->object;
struct ib_uqp_object *uqp =
container_of(uobj, struct ib_uqp_object, uevent.uobject);
idr_remove_uobj(uobj);
if (qp == qp->real_qp)
ib_uverbs_detach_umcast(qp, uqp);
ib_destroy_qp(qp);
ib_rdmacg_uncharge(&uobj->cg_obj, context->device,
RDMACG_RESOURCE_HCA_OBJECT);
ib_uverbs_release_uevent(file, &uqp->uevent);
kfree(uqp);
}
list_for_each_entry_safe(uobj, tmp, &context->rwq_ind_tbl_list, list) {
struct ib_rwq_ind_table *rwq_ind_tbl = uobj->object;
struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
idr_remove_uobj(uobj);
ib_destroy_rwq_ind_table(rwq_ind_tbl);
kfree(ind_tbl);
kfree(uobj);
}
list_for_each_entry_safe(uobj, tmp, &context->wq_list, list) {
struct ib_wq *wq = uobj->object;
struct ib_uwq_object *uwq =
container_of(uobj, struct ib_uwq_object, uevent.uobject);
idr_remove_uobj(uobj);
ib_destroy_wq(wq);
ib_uverbs_release_uevent(file, &uwq->uevent);
kfree(uwq);
}
list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
struct ib_srq *srq = uobj->object;
struct ib_uevent_object *uevent =
container_of(uobj, struct ib_uevent_object, uobject);
idr_remove_uobj(uobj);
ib_destroy_srq(srq);
ib_rdmacg_uncharge(&uobj->cg_obj, context->device,
RDMACG_RESOURCE_HCA_OBJECT);
ib_uverbs_release_uevent(file, uevent);
kfree(uevent);
}
list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) {
struct ib_cq *cq = uobj->object;
struct ib_uverbs_event_file *ev_file = cq->cq_context;
struct ib_ucq_object *ucq =
container_of(uobj, struct ib_ucq_object, uobject);
idr_remove_uobj(uobj);
ib_destroy_cq(cq);
ib_rdmacg_uncharge(&uobj->cg_obj, context->device,
RDMACG_RESOURCE_HCA_OBJECT);
ib_uverbs_release_ucq(file, ev_file, ucq);
kfree(ucq);
}
list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
struct ib_mr *mr = uobj->object;
idr_remove_uobj(uobj);
ib_dereg_mr(mr);
ib_rdmacg_uncharge(&uobj->cg_obj, context->device,
RDMACG_RESOURCE_HCA_OBJECT);
kfree(uobj);
}
mutex_lock(&file->device->xrcd_tree_mutex);
list_for_each_entry_safe(uobj, tmp, &context->xrcd_list, list) {
struct ib_xrcd *xrcd = uobj->object;
struct ib_uxrcd_object *uxrcd =
container_of(uobj, struct ib_uxrcd_object, uobject);
idr_remove_uobj(uobj);
ib_uverbs_dealloc_xrcd(file->device, xrcd,
file->ucontext ? RDMA_REMOVE_CLOSE :
RDMA_REMOVE_DRIVER_REMOVE);
kfree(uxrcd);
}
mutex_unlock(&file->device->xrcd_tree_mutex);
list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) {
struct ib_pd *pd = uobj->object;
idr_remove_uobj(uobj);
ib_dealloc_pd(pd);
ib_rdmacg_uncharge(&uobj->cg_obj, context->device,
RDMACG_RESOURCE_HCA_OBJECT);
kfree(uobj);
}
put_pid(context->tgid); put_pid(context->tgid);
ib_rdmacg_uncharge(&context->cg_obj, context->device, ib_rdmacg_uncharge(&context->cg_obj, context->device,
@ -592,7 +464,7 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
struct ib_uevent_object *uobj; struct ib_uevent_object *uobj;
/* for XRC target qp's, check that qp is live */ /* for XRC target qp's, check that qp is live */
if (!event->element.qp->uobject || !event->element.qp->uobject->live) if (!event->element.qp->uobject)
return; return;
uobj = container_of(event->element.qp->uobject, uobj = container_of(event->element.qp->uobject,
@ -1010,7 +882,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
mutex_lock(&file->cleanup_mutex); mutex_lock(&file->cleanup_mutex);
if (file->ucontext) { if (file->ucontext) {
ib_uverbs_cleanup_ucontext(file, file->ucontext); ib_uverbs_cleanup_ucontext(file, file->ucontext, false);
file->ucontext = NULL; file->ucontext = NULL;
} }
mutex_unlock(&file->cleanup_mutex); mutex_unlock(&file->cleanup_mutex);
@ -1260,7 +1132,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
* (e.g mmput). * (e.g mmput).
*/ */
ib_dev->disassociate_ucontext(ucontext); ib_dev->disassociate_ucontext(ucontext);
ib_uverbs_cleanup_ucontext(file, ucontext); ib_uverbs_cleanup_ucontext(file, ucontext, true);
} }
mutex_lock(&uverbs_dev->lists_mutex); mutex_lock(&uverbs_dev->lists_mutex);

View File

@ -1377,17 +1377,6 @@ struct ib_rdmacg_object {
struct ib_ucontext { struct ib_ucontext {
struct ib_device *device; struct ib_device *device;
struct ib_uverbs_file *ufile; struct ib_uverbs_file *ufile;
struct list_head pd_list;
struct list_head mr_list;
struct list_head mw_list;
struct list_head cq_list;
struct list_head qp_list;
struct list_head srq_list;
struct list_head ah_list;
struct list_head xrcd_list;
struct list_head rule_list;
struct list_head wq_list;
struct list_head rwq_ind_tbl_list;
int closing; int closing;
/* locking the uobjects_list */ /* locking the uobjects_list */
@ -1426,10 +1415,8 @@ struct ib_uobject {
struct ib_rdmacg_object cg_obj; /* rdmacg object */ struct ib_rdmacg_object cg_obj; /* rdmacg object */
int id; /* index into kernel idr */ int id; /* index into kernel idr */
struct kref ref; struct kref ref;
struct rw_semaphore mutex; /* protects .live */
atomic_t usecnt; /* protects exclusive access */ atomic_t usecnt; /* protects exclusive access */
struct rcu_head rcu; /* kfree_rcu() overhead */ struct rcu_head rcu; /* kfree_rcu() overhead */
int live;
const struct uverbs_obj_type *type; const struct uverbs_obj_type *type;
}; };

View File

@ -46,5 +46,68 @@ extern const struct uverbs_obj_idr_type uverbs_type_attrs_mr;
extern const struct uverbs_obj_idr_type uverbs_type_attrs_mw; extern const struct uverbs_obj_idr_type uverbs_type_attrs_mw;
extern const struct uverbs_obj_idr_type uverbs_type_attrs_pd; extern const struct uverbs_obj_idr_type uverbs_type_attrs_pd;
extern const struct uverbs_obj_idr_type uverbs_type_attrs_xrcd; extern const struct uverbs_obj_idr_type uverbs_type_attrs_xrcd;
static inline struct ib_uobject *__uobj_get(const struct uverbs_obj_type *type,
bool write,
struct ib_ucontext *ucontext,
int id)
{
return rdma_lookup_get_uobject(type, ucontext, id, write);
}
#define uobj_get_type(_type) uverbs_type_attrs_##_type.type
#define uobj_get_read(_type, _id, _ucontext) \
__uobj_get(&(_type), false, _ucontext, _id)
#define uobj_get_obj_read(_type, _id, _ucontext) \
({ \
struct ib_uobject *uobj = \
__uobj_get(&uobj_get_type(_type), \
false, _ucontext, _id); \
\
(struct ib_##_type *)(IS_ERR(uobj) ? NULL : uobj->object); \
})
#define uobj_get_write(_type, _id, _ucontext) \
__uobj_get(&(_type), true, _ucontext, _id)
static inline void uobj_put_read(struct ib_uobject *uobj)
{
rdma_lookup_put_uobject(uobj, false);
}
#define uobj_put_obj_read(_obj) \
uobj_put_read((_obj)->uobject)
static inline void uobj_put_write(struct ib_uobject *uobj)
{
rdma_lookup_put_uobject(uobj, true);
}
static inline int __must_check uobj_remove_commit(struct ib_uobject *uobj)
{
return rdma_remove_commit_uobject(uobj);
}
static inline void uobj_alloc_commit(struct ib_uobject *uobj)
{
rdma_alloc_commit_uobject(uobj);
}
static inline void uobj_alloc_abort(struct ib_uobject *uobj)
{
rdma_alloc_abort_uobject(uobj);
}
static inline struct ib_uobject *__uobj_alloc(const struct uverbs_obj_type *type,
struct ib_ucontext *ucontext)
{
return rdma_alloc_begin_uobject(type, ucontext);
}
#define uobj_alloc(_type, ucontext) \
__uobj_alloc(&(_type), ucontext)
#endif #endif