scsi: push host_lock down into scsi_{host,target}_queue_ready

Prepare for not taking a host-wide lock in the dispatch path by pushing
the lock down into the places that actually need it.  Note that this
patch is just a preparation step, as it will actually increase lock
roundtrips and thus decrease performance on its own.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Webb Scales <webbnh@hp.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Tested-by: Bart Van Assche <bvanassche@acm.org>
Tested-by: Robert Elliott <elliott@hp.com>
This commit is contained in:
Christoph Hellwig 2014-01-22 14:36:32 +01:00
parent 3b5382c459
commit cf68d334dd

View file

@ -1274,18 +1274,18 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
/* /*
* scsi_target_queue_ready: checks if there we can send commands to target * scsi_target_queue_ready: checks if there we can send commands to target
* @sdev: scsi device on starget to check. * @sdev: scsi device on starget to check.
*
* Called with the host lock held.
*/ */
static inline int scsi_target_queue_ready(struct Scsi_Host *shost, static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
struct scsi_device *sdev) struct scsi_device *sdev)
{ {
struct scsi_target *starget = scsi_target(sdev); struct scsi_target *starget = scsi_target(sdev);
int ret = 0;
spin_lock_irq(shost->host_lock);
if (starget->single_lun) { if (starget->single_lun) {
if (starget->starget_sdev_user && if (starget->starget_sdev_user &&
starget->starget_sdev_user != sdev) starget->starget_sdev_user != sdev)
return 0; goto out;
starget->starget_sdev_user = sdev; starget->starget_sdev_user = sdev;
} }
@ -1293,57 +1293,66 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
/* /*
* unblock after target_blocked iterates to zero * unblock after target_blocked iterates to zero
*/ */
if (--starget->target_blocked == 0) { if (--starget->target_blocked != 0)
SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, goto out;
"unblocking target at zero depth\n"));
} else SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
return 0; "unblocking target at zero depth\n"));
} }
if (scsi_target_is_busy(starget)) { if (scsi_target_is_busy(starget)) {
list_move_tail(&sdev->starved_entry, &shost->starved_list); list_move_tail(&sdev->starved_entry, &shost->starved_list);
return 0; goto out;
} }
return 1; scsi_target(sdev)->target_busy++;
ret = 1;
out:
spin_unlock_irq(shost->host_lock);
return ret;
} }
/* /*
* scsi_host_queue_ready: if we can send requests to shost, return 1 else * scsi_host_queue_ready: if we can send requests to shost, return 1 else
* return 0. We must end up running the queue again whenever 0 is * return 0. We must end up running the queue again whenever 0 is
* returned, else IO can hang. * returned, else IO can hang.
*
* Called with host_lock held.
*/ */
static inline int scsi_host_queue_ready(struct request_queue *q, static inline int scsi_host_queue_ready(struct request_queue *q,
struct Scsi_Host *shost, struct Scsi_Host *shost,
struct scsi_device *sdev) struct scsi_device *sdev)
{ {
int ret = 0;
spin_lock_irq(shost->host_lock);
if (scsi_host_in_recovery(shost)) if (scsi_host_in_recovery(shost))
return 0; goto out;
if (shost->host_busy == 0 && shost->host_blocked) { if (shost->host_busy == 0 && shost->host_blocked) {
/* /*
* unblock after host_blocked iterates to zero * unblock after host_blocked iterates to zero
*/ */
if (--shost->host_blocked == 0) { if (--shost->host_blocked != 0)
SCSI_LOG_MLQUEUE(3, goto out;
shost_printk(KERN_INFO, shost,
"unblocking host at zero depth\n")); SCSI_LOG_MLQUEUE(3,
} else { shost_printk(KERN_INFO, shost,
return 0; "unblocking host at zero depth\n"));
}
} }
if (scsi_host_is_busy(shost)) { if (scsi_host_is_busy(shost)) {
if (list_empty(&sdev->starved_entry)) if (list_empty(&sdev->starved_entry))
list_add_tail(&sdev->starved_entry, &shost->starved_list); list_add_tail(&sdev->starved_entry, &shost->starved_list);
return 0; goto out;
} }
/* We're OK to process the command, so we can't be starved */ /* We're OK to process the command, so we can't be starved */
if (!list_empty(&sdev->starved_entry)) if (!list_empty(&sdev->starved_entry))
list_del_init(&sdev->starved_entry); list_del_init(&sdev->starved_entry);
return 1; shost->host_busy++;
ret = 1;
out:
spin_unlock_irq(shost->host_lock);
return ret;
} }
/* /*
@ -1524,7 +1533,7 @@ static void scsi_request_fn(struct request_queue *q)
blk_start_request(req); blk_start_request(req);
sdev->device_busy++; sdev->device_busy++;
spin_unlock(q->queue_lock); spin_unlock_irq(q->queue_lock);
cmd = req->special; cmd = req->special;
if (unlikely(cmd == NULL)) { if (unlikely(cmd == NULL)) {
printk(KERN_CRIT "impossible request in %s.\n" printk(KERN_CRIT "impossible request in %s.\n"
@ -1534,7 +1543,6 @@ static void scsi_request_fn(struct request_queue *q)
blk_dump_rq_flags(req, "foo"); blk_dump_rq_flags(req, "foo");
BUG(); BUG();
} }
spin_lock(shost->host_lock);
/* /*
* We hit this when the driver is using a host wide * We hit this when the driver is using a host wide
@ -1545,9 +1553,11 @@ static void scsi_request_fn(struct request_queue *q)
* a run when a tag is freed. * a run when a tag is freed.
*/ */
if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
spin_lock_irq(shost->host_lock);
if (list_empty(&sdev->starved_entry)) if (list_empty(&sdev->starved_entry))
list_add_tail(&sdev->starved_entry, list_add_tail(&sdev->starved_entry,
&shost->starved_list); &shost->starved_list);
spin_unlock_irq(shost->host_lock);
goto not_ready; goto not_ready;
} }
@ -1555,16 +1565,7 @@ static void scsi_request_fn(struct request_queue *q)
goto not_ready; goto not_ready;
if (!scsi_host_queue_ready(q, shost, sdev)) if (!scsi_host_queue_ready(q, shost, sdev))
goto not_ready; goto host_not_ready;
scsi_target(sdev)->target_busy++;
shost->host_busy++;
/*
* XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
* take the lock again.
*/
spin_unlock_irq(shost->host_lock);
/* /*
* Finally, initialize any error handling parameters, and set up * Finally, initialize any error handling parameters, and set up
@ -1587,9 +1588,11 @@ static void scsi_request_fn(struct request_queue *q)
return; return;
not_ready: host_not_ready:
spin_lock_irq(shost->host_lock);
scsi_target(sdev)->target_busy--;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
not_ready:
/* /*
* lock q, handle tag, requeue req, and decrement device_busy. We * lock q, handle tag, requeue req, and decrement device_busy. We
* must return with queue_lock held. * must return with queue_lock held.