IB/iser: Centralize iser completion contexts

Introduce iser_comp which centralizes all iser completion related
items and is referenced by iser_device and each ib_conn.

Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
Sagi Grimberg 2014-10-01 14:02:07 +03:00 committed by Roland Dreier
parent aea8f4df6d
commit bf17554035
2 changed files with 83 additions and 86 deletions

View file

@ -213,7 +213,6 @@ struct iser_data_buf {
/* fwd declarations */ /* fwd declarations */
struct iser_device; struct iser_device;
struct iser_cq_desc;
struct iscsi_iser_task; struct iscsi_iser_task;
struct iscsi_endpoint; struct iscsi_endpoint;
@ -268,20 +267,34 @@ struct iser_conn;
struct ib_conn; struct ib_conn;
struct iscsi_iser_task; struct iscsi_iser_task;
/**
* struct iser_comp - iSER completion context
*
* @device: pointer to device handle
* @rx_cq: RX completion queue
* @tx_cq: TX completion queue
* @tasklet: Tasklet handle
* @active_qps: Number of active QPs attached
* to completion context
*/
struct iser_comp {
struct iser_device *device;
struct ib_cq *rx_cq;
struct ib_cq *tx_cq;
struct tasklet_struct tasklet;
int active_qps;
};
struct iser_device { struct iser_device {
struct ib_device *ib_device; struct ib_device *ib_device;
struct ib_pd *pd; struct ib_pd *pd;
struct ib_device_attr dev_attr; struct ib_device_attr dev_attr;
struct ib_cq *rx_cq[ISER_MAX_CQ];
struct ib_cq *tx_cq[ISER_MAX_CQ];
struct ib_mr *mr; struct ib_mr *mr;
struct tasklet_struct cq_tasklet[ISER_MAX_CQ];
struct ib_event_handler event_handler; struct ib_event_handler event_handler;
struct list_head ig_list; /* entry in ig devices list */ struct list_head ig_list; /* entry in ig devices list */
int refcount; int refcount;
int cq_active_qps[ISER_MAX_CQ]; int comps_used;
int cqs_used; struct iser_comp comps[ISER_MAX_CQ];
struct iser_cq_desc *cq_desc;
int (*iser_alloc_rdma_reg_res)(struct ib_conn *ib_conn, int (*iser_alloc_rdma_reg_res)(struct ib_conn *ib_conn,
unsigned cmds_max); unsigned cmds_max);
void (*iser_free_rdma_reg_res)(struct ib_conn *ib_conn); void (*iser_free_rdma_reg_res)(struct ib_conn *ib_conn);
@ -327,6 +340,7 @@ struct fast_reg_descriptor {
* @post_send_buf_count: post send counter * @post_send_buf_count: post send counter
* @rx_wr: receive work request for batch posts * @rx_wr: receive work request for batch posts
* @device: reference to iser device * @device: reference to iser device
* @comp: iser completion context
* @pi_support: Indicate device T10-PI support * @pi_support: Indicate device T10-PI support
* @lock: protects fmr/fastreg pool * @lock: protects fmr/fastreg pool
* @union.fmr: * @union.fmr:
@ -345,7 +359,7 @@ struct ib_conn {
atomic_t post_send_buf_count; atomic_t post_send_buf_count;
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
struct iser_device *device; struct iser_device *device;
int cq_index; struct iser_comp *comp;
bool pi_support; bool pi_support;
spinlock_t lock; spinlock_t lock;
union { union {
@ -404,11 +418,6 @@ struct iser_page_vec {
int data_size; int data_size;
}; };
struct iser_cq_desc {
struct iser_device *device;
int cq_index;
};
struct iser_global { struct iser_global {
struct mutex device_list_mutex;/* */ struct mutex device_list_mutex;/* */
struct list_head device_list; /* all iSER devices */ struct list_head device_list; /* all iSER devices */

View file

@ -44,7 +44,7 @@
static void iser_cq_tasklet_fn(unsigned long data); static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context); static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
static int iser_drain_tx_cq(struct iser_device *device, int cq_index); static int iser_drain_tx_cq(struct iser_comp *comp);
static void iser_cq_event_callback(struct ib_event *cause, void *context) static void iser_cq_event_callback(struct ib_event *cause, void *context)
{ {
@ -72,7 +72,6 @@ static void iser_event_handler(struct ib_event_handler *handler,
*/ */
static int iser_create_device_ib_res(struct iser_device *device) static int iser_create_device_ib_res(struct iser_device *device)
{ {
struct iser_cq_desc *cq_desc;
struct ib_device_attr *dev_attr = &device->dev_attr; struct ib_device_attr *dev_attr = &device->dev_attr;
int ret, i; int ret, i;
@ -102,51 +101,44 @@ static int iser_create_device_ib_res(struct iser_device *device)
return -1; return -1;
} }
device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors); device->comps_used = min(ISER_MAX_CQ,
device->ib_device->num_comp_vectors);
iser_info("using %d CQs, device %s supports %d vectors\n", iser_info("using %d CQs, device %s supports %d vectors\n",
device->cqs_used, device->ib_device->name, device->comps_used, device->ib_device->name,
device->ib_device->num_comp_vectors); device->ib_device->num_comp_vectors);
device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
GFP_KERNEL);
if (device->cq_desc == NULL)
goto cq_desc_err;
cq_desc = device->cq_desc;
device->pd = ib_alloc_pd(device->ib_device); device->pd = ib_alloc_pd(device->ib_device);
if (IS_ERR(device->pd)) if (IS_ERR(device->pd))
goto pd_err; goto pd_err;
for (i = 0; i < device->cqs_used; i++) { for (i = 0; i < device->comps_used; i++) {
cq_desc[i].device = device; struct iser_comp *comp = &device->comps[i];
cq_desc[i].cq_index = i;
device->rx_cq[i] = ib_create_cq(device->ib_device, comp->device = device;
iser_cq_callback, comp->rx_cq = ib_create_cq(device->ib_device,
iser_cq_event_callback, iser_cq_callback,
(void *)&cq_desc[i], iser_cq_event_callback,
ISER_MAX_RX_CQ_LEN, i); (void *)comp,
if (IS_ERR(device->rx_cq[i])) { ISER_MAX_RX_CQ_LEN, i);
device->rx_cq[i] = NULL; if (IS_ERR(comp->rx_cq)) {
comp->rx_cq = NULL;
goto cq_err; goto cq_err;
} }
device->tx_cq[i] = ib_create_cq(device->ib_device, comp->tx_cq = ib_create_cq(device->ib_device, NULL,
NULL, iser_cq_event_callback, iser_cq_event_callback,
(void *)&cq_desc[i], (void *)comp,
ISER_MAX_TX_CQ_LEN, i); ISER_MAX_TX_CQ_LEN, i);
if (IS_ERR(comp->tx_cq)) {
if (IS_ERR(device->tx_cq[i])) { comp->tx_cq = NULL;
device->tx_cq[i] = NULL;
goto cq_err; goto cq_err;
} }
if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP)) if (ib_req_notify_cq(comp->rx_cq, IB_CQ_NEXT_COMP))
goto cq_err; goto cq_err;
tasklet_init(&device->cq_tasklet[i], tasklet_init(&comp->tasklet, iser_cq_tasklet_fn,
iser_cq_tasklet_fn, (unsigned long)comp);
(unsigned long)&cq_desc[i]);
} }
device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
@ -165,19 +157,19 @@ static int iser_create_device_ib_res(struct iser_device *device)
handler_err: handler_err:
ib_dereg_mr(device->mr); ib_dereg_mr(device->mr);
dma_mr_err: dma_mr_err:
for (i = 0; i < device->cqs_used; i++) for (i = 0; i < device->comps_used; i++)
tasklet_kill(&device->cq_tasklet[i]); tasklet_kill(&device->comps[i].tasklet);
cq_err: cq_err:
for (i = 0; i < device->cqs_used; i++) { for (i = 0; i < device->comps_used; i++) {
if (device->tx_cq[i]) struct iser_comp *comp = &device->comps[i];
ib_destroy_cq(device->tx_cq[i]);
if (device->rx_cq[i]) if (comp->tx_cq)
ib_destroy_cq(device->rx_cq[i]); ib_destroy_cq(comp->tx_cq);
if (comp->rx_cq)
ib_destroy_cq(comp->rx_cq);
} }
ib_dealloc_pd(device->pd); ib_dealloc_pd(device->pd);
pd_err: pd_err:
kfree(device->cq_desc);
cq_desc_err:
iser_err("failed to allocate an IB resource\n"); iser_err("failed to allocate an IB resource\n");
return -1; return -1;
} }
@ -191,20 +183,20 @@ static void iser_free_device_ib_res(struct iser_device *device)
int i; int i;
BUG_ON(device->mr == NULL); BUG_ON(device->mr == NULL);
for (i = 0; i < device->cqs_used; i++) { for (i = 0; i < device->comps_used; i++) {
tasklet_kill(&device->cq_tasklet[i]); struct iser_comp *comp = &device->comps[i];
(void)ib_destroy_cq(device->tx_cq[i]);
(void)ib_destroy_cq(device->rx_cq[i]); tasklet_kill(&comp->tasklet);
device->tx_cq[i] = NULL; ib_destroy_cq(comp->tx_cq);
device->rx_cq[i] = NULL; ib_destroy_cq(comp->rx_cq);
comp->tx_cq = NULL;
comp->rx_cq = NULL;
} }
(void)ib_unregister_event_handler(&device->event_handler); (void)ib_unregister_event_handler(&device->event_handler);
(void)ib_dereg_mr(device->mr); (void)ib_dereg_mr(device->mr);
(void)ib_dealloc_pd(device->pd); (void)ib_dealloc_pd(device->pd);
kfree(device->cq_desc);
device->mr = NULL; device->mr = NULL;
device->pd = NULL; device->pd = NULL;
} }
@ -456,19 +448,20 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
mutex_lock(&ig.connlist_mutex); mutex_lock(&ig.connlist_mutex);
/* select the CQ with the minimal number of usages */ /* select the CQ with the minimal number of usages */
for (index = 0; index < device->cqs_used; index++) for (index = 0; index < device->comps_used; index++) {
if (device->cq_active_qps[index] < if (device->comps[index].active_qps <
device->cq_active_qps[min_index]) device->comps[min_index].active_qps)
min_index = index; min_index = index;
device->cq_active_qps[min_index]++; }
ib_conn->cq_index = min_index; ib_conn->comp = &device->comps[min_index];
ib_conn->comp->active_qps++;
mutex_unlock(&ig.connlist_mutex); mutex_unlock(&ig.connlist_mutex);
iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn); iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
init_attr.event_handler = iser_qp_event_callback; init_attr.event_handler = iser_qp_event_callback;
init_attr.qp_context = (void *)ib_conn; init_attr.qp_context = (void *)ib_conn;
init_attr.send_cq = device->tx_cq[min_index]; init_attr.send_cq = ib_conn->comp->tx_cq;
init_attr.recv_cq = device->rx_cq[min_index]; init_attr.recv_cq = ib_conn->comp->rx_cq;
init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
init_attr.cap.max_send_sge = 2; init_attr.cap.max_send_sge = 2;
init_attr.cap.max_recv_sge = 1; init_attr.cap.max_recv_sge = 1;
@ -604,7 +597,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
iser_free_rx_descriptors(iser_conn); iser_free_rx_descriptors(iser_conn);
if (ib_conn->qp != NULL) { if (ib_conn->qp != NULL) {
ib_conn->device->cq_active_qps[ib_conn->cq_index]--; ib_conn->comp->active_qps--;
rdma_destroy_qp(ib_conn->cma_id); rdma_destroy_qp(ib_conn->cma_id);
ib_conn->qp = NULL; ib_conn->qp = NULL;
} }
@ -655,14 +648,13 @@ void iser_conn_release(struct iser_conn *iser_conn)
*/ */
static void iser_poll_for_flush_errors(struct ib_conn *ib_conn) static void iser_poll_for_flush_errors(struct ib_conn *ib_conn)
{ {
struct iser_device *device = ib_conn->device;
int count = 0; int count = 0;
while (ib_conn->post_recv_buf_count > 0 || while (ib_conn->post_recv_buf_count > 0 ||
atomic_read(&ib_conn->post_send_buf_count) > 0) { atomic_read(&ib_conn->post_send_buf_count) > 0) {
msleep(100); msleep(100);
if (atomic_read(&ib_conn->post_send_buf_count) > 0) if (atomic_read(&ib_conn->post_send_buf_count) > 0)
iser_drain_tx_cq(device, ib_conn->cq_index); iser_drain_tx_cq(ib_conn->comp);
count++; count++;
/* Don't flood with prints */ /* Don't flood with prints */
@ -1189,9 +1181,9 @@ iser_handle_comp_error(struct iser_tx_desc *desc,
kmem_cache_free(ig.desc_cache, desc); kmem_cache_free(ig.desc_cache, desc);
} }
static int iser_drain_tx_cq(struct iser_device *device, int cq_index) static int iser_drain_tx_cq(struct iser_comp *comp)
{ {
struct ib_cq *cq = device->tx_cq[cq_index]; struct ib_cq *cq = comp->tx_cq;
struct ib_wc wc; struct ib_wc wc;
struct iser_tx_desc *tx_desc; struct iser_tx_desc *tx_desc;
struct ib_conn *ib_conn; struct ib_conn *ib_conn;
@ -1222,20 +1214,18 @@ static int iser_drain_tx_cq(struct iser_device *device, int cq_index)
static void iser_cq_tasklet_fn(unsigned long data) static void iser_cq_tasklet_fn(unsigned long data)
{ {
struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)data; struct iser_comp *comp = (struct iser_comp *)data;
struct iser_device *device = cq_desc->device; struct ib_cq *cq = comp->rx_cq;
int cq_index = cq_desc->cq_index; struct ib_wc wc;
struct ib_cq *cq = device->rx_cq[cq_index]; struct iser_rx_desc *desc;
struct ib_wc wc; unsigned long xfer_len;
struct iser_rx_desc *desc;
unsigned long xfer_len;
struct ib_conn *ib_conn; struct ib_conn *ib_conn;
int completed_tx, completed_rx = 0; int completed_tx, completed_rx = 0;
/* First do tx drain, so in a case where we have rx flushes and a successful /* First do tx drain, so in a case where we have rx flushes and a successful
* tx completion we will still go through completion error handling. * tx completion we will still go through completion error handling.
*/ */
completed_tx = iser_drain_tx_cq(device, cq_index); completed_tx = iser_drain_tx_cq(comp);
while (ib_poll_cq(cq, 1, &wc) == 1) { while (ib_poll_cq(cq, 1, &wc) == 1) {
desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id; desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
@ -1257,7 +1247,7 @@ static void iser_cq_tasklet_fn(unsigned long data)
} }
completed_rx++; completed_rx++;
if (!(completed_rx & 63)) if (!(completed_rx & 63))
completed_tx += iser_drain_tx_cq(device, cq_index); completed_tx += iser_drain_tx_cq(comp);
} }
/* #warning "it is assumed here that arming CQ only once its empty" * /* #warning "it is assumed here that arming CQ only once its empty" *
* " would not cause interrupts to be missed" */ * " would not cause interrupts to be missed" */
@ -1268,11 +1258,9 @@ static void iser_cq_tasklet_fn(unsigned long data)
static void iser_cq_callback(struct ib_cq *cq, void *cq_context) static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
{ {
struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)cq_context; struct iser_comp *comp = cq_context;
struct iser_device *device = cq_desc->device;
int cq_index = cq_desc->cq_index;
tasklet_schedule(&device->cq_tasklet[cq_index]); tasklet_schedule(&comp->tasklet);
} }
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,