From 74665016086615bbaa3fa6f83af410a0a4e029ee Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 22 Jan 2014 15:29:29 +0100 Subject: scsi: convert host_busy to atomic_t Avoid taking the host-wide host_lock to check the per-host queue limit. Instead we do an atomic_inc_return early on to grab our slot in the queue, and if necessary decrement it after finishing all checks. Signed-off-by: Christoph Hellwig Reviewed-by: Martin K. Petersen Reviewed-by: Hannes Reinecke Reviewed-by: Webb Scales Acked-by: Jens Axboe Tested-by: Bart Van Assche Tested-by: Robert Elliott --- drivers/scsi/scsi_lib.c | 74 ++++++++++++++++++++++++++++++------------------- 1 file changed, 45 insertions(+), 29 deletions(-) (limited to 'drivers/scsi/scsi_lib.c') diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0580711c2c5..d0bd7e0ab7a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -292,14 +292,17 @@ void scsi_device_unbusy(struct scsi_device *sdev) struct scsi_target *starget = scsi_target(sdev); unsigned long flags; - spin_lock_irqsave(shost->host_lock, flags); - shost->host_busy--; + atomic_dec(&shost->host_busy); atomic_dec(&starget->target_busy); + if (unlikely(scsi_host_in_recovery(shost) && - (shost->host_failed || shost->host_eh_scheduled))) + (shost->host_failed || shost->host_eh_scheduled))) { + spin_lock_irqsave(shost->host_lock, flags); scsi_eh_wakeup(shost); - spin_unlock(shost->host_lock); - spin_lock(sdev->request_queue->queue_lock); + spin_unlock_irqrestore(shost->host_lock, flags); + } + + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); sdev->device_busy--; spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); } @@ -367,7 +370,8 @@ static inline int scsi_target_is_busy(struct scsi_target *starget) static inline int scsi_host_is_busy(struct Scsi_Host *shost) { - if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || + if ((shost->can_queue > 0 && + atomic_read(&shost->host_busy) >= shost->can_queue) || shost->host_blocked || shost->host_self_blocked) return 1; @@ -1334,38 +1338,54 @@ static inline int scsi_host_queue_ready(struct request_queue *q, struct Scsi_Host *shost, struct scsi_device *sdev) { - int ret = 0; - - spin_lock_irq(shost->host_lock); + unsigned int busy; if (scsi_host_in_recovery(shost)) - goto out; - if (shost->host_busy == 0 && shost->host_blocked) { + return 0; + + busy = atomic_inc_return(&shost->host_busy) - 1; + if (shost->host_blocked) { + if (busy) + goto starved; + /* * unblock after host_blocked iterates to zero */ - if (--shost->host_blocked != 0) - goto out; + spin_lock_irq(shost->host_lock); + if (--shost->host_blocked != 0) { + spin_unlock_irq(shost->host_lock); + goto out_dec; + } + spin_unlock_irq(shost->host_lock); SCSI_LOG_MLQUEUE(3, shost_printk(KERN_INFO, shost, "unblocking host at zero depth\n")); } - if (scsi_host_is_busy(shost)) { - if (list_empty(&sdev->starved_entry)) - list_add_tail(&sdev->starved_entry, &shost->starved_list); - goto out; - } + + if (shost->can_queue > 0 && busy >= shost->can_queue) + goto starved; + if (shost->host_self_blocked) + goto starved; /* We're OK to process the command, so we can't be starved */ - if (!list_empty(&sdev->starved_entry)) - list_del_init(&sdev->starved_entry); + if (!list_empty(&sdev->starved_entry)) { + spin_lock_irq(shost->host_lock); + if (!list_empty(&sdev->starved_entry)) + list_del_init(&sdev->starved_entry); + spin_unlock_irq(shost->host_lock); + } - shost->host_busy++; - ret = 1; -out: + return 1; + +starved: + spin_lock_irq(shost->host_lock); + if (list_empty(&sdev->starved_entry)) + list_add_tail(&sdev->starved_entry, &shost->starved_list); spin_unlock_irq(shost->host_lock); - return ret; +out_dec: + atomic_dec(&shost->host_busy); + return 0; } /* @@ -1429,12 +1449,8 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) * with the locks as normal issue path does. */ sdev->device_busy++; - spin_unlock(sdev->request_queue->queue_lock); - spin_lock(shost->host_lock); - shost->host_busy++; + atomic_inc(&shost->host_busy); atomic_inc(&starget->target_busy); - spin_unlock(shost->host_lock); - spin_lock(sdev->request_queue->queue_lock); blk_complete_request(req); } -- cgit v1.2.3-70-g09d2