scsi: lpfc: Limit tracking of tgt queue depth in fast path

Performance is affected when target queue depth is tracked.  An atomic
counter is incremented on the submission path which competes with it being
decremented on the completion path.  In addition, multiple CPUs can
simultaniously be manipulating this counter for the same ndlp.

Reduce the overhead by only performing the target increment/decrement when
the target queue depth is less than the overall adapter depth, thus is
actually meaningful.

Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
James Smart 2018-07-31 17:23:22 -07:00 committed by Martin K. Petersen
parent 93a3922da4
commit 2a5b7d626e
6 changed files with 74 additions and 53 deletions

View file

@ -150,6 +150,9 @@ struct lpfc_node_rrq {
unsigned long rrq_stop_time;
};
#define lpfc_ndlp_check_qdepth(phba, ndlp) \
(ndlp->cmd_qdepth < phba->sli4_hba.max_cfg_param.max_xri)
/* Defines for nlp_flag (uint32) */
#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */

View file

@ -1985,12 +1985,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get_be32(prli_disc, nvpr))
ndlp->nlp_type |= NLP_NVME_DISCOVERY;
/* This node is an NVME target. Adjust the command
* queue depth on this node to not exceed the available
* xris.
*/
ndlp->cmd_qdepth = phba->sli4_hba.nvme_xri_max;
/*
* If prli_fba is set, the Target supports FirstBurst.
* If prli_fb_sz is 0, the FirstBurst size is unlimited,

View file

@ -1135,9 +1135,6 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
else
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
if (ndlp && NLP_CHK_NODE_ACT(ndlp))
atomic_dec(&ndlp->cmd_pending);
/* Update stats and complete the IO. There is
* no need for dma unprep because the nvme_transport
* owns the dma address.
@ -1546,17 +1543,19 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
/* The node is shared with FCP IO, make sure the IO pending count does
* not exceed the programmed depth.
*/
if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
!expedite) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6174 Fail IO, ndlp qdepth exceeded: "
"idx %d DID %x pend %d qdepth %d\n",
lpfc_queue_info->index, ndlp->nlp_DID,
atomic_read(&ndlp->cmd_pending),
ndlp->cmd_qdepth);
atomic_inc(&lport->xmt_fcp_qdepth);
ret = -EBUSY;
goto out_fail;
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
!expedite) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6174 Fail IO, ndlp qdepth exceeded: "
"idx %d DID %x pend %d qdepth %d\n",
lpfc_queue_info->index, ndlp->nlp_DID,
atomic_read(&ndlp->cmd_pending),
ndlp->cmd_qdepth);
atomic_inc(&lport->xmt_fcp_qdepth);
ret = -EBUSY;
goto out_fail;
}
}
lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
@ -1614,8 +1613,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_free_nvme_buf;
}
atomic_inc(&ndlp->cmd_pending);
lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_queue_info->index, ndlp->nlp_DID);
@ -1623,7 +1620,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
if (ret) {
atomic_inc(&lport->xmt_fcp_wqerr);
atomic_dec(&ndlp->cmd_pending);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6113 Fail IO, Could not issue WQE err %x "
"sid: x%x did: x%x oxid: x%x\n",
@ -2378,6 +2374,11 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
lpfc_ncmd = lpfc_nvme_buf(phba);
}
spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) {
atomic_inc(&ndlp->cmd_pending);
lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
}
return lpfc_ncmd;
}
@ -2396,7 +2397,13 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
{
unsigned long iflag = 0;
if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
lpfc_ncmd->nonsg_phys = 0;
lpfc_ncmd->ndlp = NULL;
lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;
if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6310 XB release deferred for "

View file

@ -86,6 +86,7 @@ struct lpfc_nvme_buf {
uint16_t flags; /* TBD convert exch_busy to flags */
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
#define LPFC_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint16_t cpu;

View file

@ -995,6 +995,11 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
spin_unlock(&phba->scsi_buf_list_put_lock);
}
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
atomic_inc(&ndlp->cmd_pending);
lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
}
return lpfc_cmd;
}
/**
@ -1044,6 +1049,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
if (!found)
return NULL;
if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
atomic_inc(&ndlp->cmd_pending);
lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
}
return lpfc_cmd;
}
/**
@ -1134,7 +1144,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
static void
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
atomic_dec(&psb->ndlp->cmd_pending);
psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
phba->lpfc_release_scsi_buf(phba, psb);
}
@ -4130,7 +4143,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
spin_lock_irqsave(shost->host_lock, flags);
if (pnode && NLP_CHK_NODE_ACT(pnode)) {
atomic_dec(&pnode->cmd_pending);
if (pnode->cmd_qdepth >
atomic_read(&pnode->cmd_pending) &&
(atomic_read(&pnode->cmd_pending) >
@ -4143,8 +4155,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
pnode->last_change_time = jiffies;
}
spin_unlock_irqrestore(shost->host_lock, flags);
} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
atomic_dec(&pnode->cmd_pending);
}
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
@ -4568,33 +4578,36 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
*/
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
goto out_tgt_busy;
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
"3377 Target Queue Full, scsi Id:%d Qdepth:%d"
" Pending command:%d"
" WWNN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
" WWPN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
ndlp->nlp_sid, ndlp->cmd_qdepth,
atomic_read(&ndlp->cmd_pending),
ndlp->nlp_nodename.u.wwn[0],
ndlp->nlp_nodename.u.wwn[1],
ndlp->nlp_nodename.u.wwn[2],
ndlp->nlp_nodename.u.wwn[3],
ndlp->nlp_nodename.u.wwn[4],
ndlp->nlp_nodename.u.wwn[5],
ndlp->nlp_nodename.u.wwn[6],
ndlp->nlp_nodename.u.wwn[7],
ndlp->nlp_portname.u.wwn[0],
ndlp->nlp_portname.u.wwn[1],
ndlp->nlp_portname.u.wwn[2],
ndlp->nlp_portname.u.wwn[3],
ndlp->nlp_portname.u.wwn[4],
ndlp->nlp_portname.u.wwn[5],
ndlp->nlp_portname.u.wwn[6],
ndlp->nlp_portname.u.wwn[7]);
goto out_tgt_busy;
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
"3377 Target Queue Full, scsi Id:%d "
"Qdepth:%d Pending command:%d"
" WWNN:%02x:%02x:%02x:%02x:"
"%02x:%02x:%02x:%02x, "
" WWPN:%02x:%02x:%02x:%02x:"
"%02x:%02x:%02x:%02x",
ndlp->nlp_sid, ndlp->cmd_qdepth,
atomic_read(&ndlp->cmd_pending),
ndlp->nlp_nodename.u.wwn[0],
ndlp->nlp_nodename.u.wwn[1],
ndlp->nlp_nodename.u.wwn[2],
ndlp->nlp_nodename.u.wwn[3],
ndlp->nlp_nodename.u.wwn[4],
ndlp->nlp_nodename.u.wwn[5],
ndlp->nlp_nodename.u.wwn[6],
ndlp->nlp_nodename.u.wwn[7],
ndlp->nlp_portname.u.wwn[0],
ndlp->nlp_portname.u.wwn[1],
ndlp->nlp_portname.u.wwn[2],
ndlp->nlp_portname.u.wwn[3],
ndlp->nlp_portname.u.wwn[4],
ndlp->nlp_portname.u.wwn[5],
ndlp->nlp_portname.u.wwn[6],
ndlp->nlp_portname.u.wwn[7]);
goto out_tgt_busy;
}
}
atomic_inc(&ndlp->cmd_pending);
lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
if (lpfc_cmd == NULL) {
@ -4612,6 +4625,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
*/
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
lpfc_cmd->ndlp = ndlp;
lpfc_cmd->timeout = 0;
lpfc_cmd->start_time = jiffies;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
@ -4694,7 +4708,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
atomic_dec(&ndlp->cmd_pending);
return SCSI_MLQUEUE_HOST_BUSY;
out_tgt_busy:
@ -5047,6 +5060,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata;
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->ndlp = pnode;
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
task_mgmt_cmd);

View file

@ -134,11 +134,13 @@ struct lpfc_scsi_buf {
struct list_head list;
struct scsi_cmnd *pCmd;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *ndlp;
uint32_t timeout;
uint16_t flags; /* TBD convert exch_busy to flags */
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
#define LPFC_SBUF_BUMP_QDEPTH 0x8 /* bumped queue depth counter */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */