diff options
author | James Smart <James.Smart@Emulex.Com> | 2008-06-14 22:52:53 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2008-07-12 08:22:28 -0500 |
commit | 5e9d9b8276980fc5dfa88ce34f6ec88ce3026232 (patch) | |
tree | 30b495edab629068f929a32f88a66ad705687f34 /drivers/scsi/lpfc/lpfc_hbadisc.c | |
parent | 0d2b6b83030d6a88cbf7db57f84f2daf0e0b251b (diff) |
[SCSI] lpfc 8.2.7 : Rework the worker thread
Rework of the worker thread to make it more efficient.
Make a finer-grain notfication of pending work so less time is
spent checking conditions. Also made other general cleanups.
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_hbadisc.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 93 |
1 files changed, 23 insertions, 70 deletions
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index f3dc19dfac5..ba4873c9e2c 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -153,11 +153,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) * count until this queued work is done */ evtp->evt_arg1 = lpfc_nlp_get(ndlp); - evtp->evt = LPFC_EVT_DEV_LOSS; - list_add_tail(&evtp->evt_listp, &phba->work_list); - if (phba->work_wait) - wake_up(phba->work_wait); - + if (evtp->evt_arg1) { + evtp->evt = LPFC_EVT_DEV_LOSS; + list_add_tail(&evtp->evt_listp, &phba->work_list); + lpfc_worker_wake_up(phba); + } spin_unlock_irq(&phba->hbalock); return; @@ -276,14 +276,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } - -void -lpfc_worker_wake_up(struct lpfc_hba *phba) -{ - wake_up(phba->work_wait); - return; -} - static void lpfc_work_list_done(struct lpfc_hba *phba) { @@ -429,6 +421,8 @@ lpfc_work_done(struct lpfc_hba *phba) || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { if (pring->flag & LPFC_STOP_IOCB_EVENT) { pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); } else { pring->flag &= ~LPFC_DEFERRED_RING_EVENT; lpfc_sli_handle_slow_ring_event(phba, pring, @@ -459,69 +453,29 @@ lpfc_work_done(struct lpfc_hba *phba) lpfc_work_list_done(phba); } -static int -check_work_wait_done(struct lpfc_hba *phba) -{ - struct lpfc_vport *vport; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; - int rc = 0; - - spin_lock_irq(&phba->hbalock); - list_for_each_entry(vport, &phba->port_list, listentry) { - if (vport->work_port_events) { - rc = 1; - break; - } - } - if (rc || phba->work_ha || (!list_empty(&phba->work_list)) || - kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) { - rc = 1; - phba->work_found++; - } else - phba->work_found = 0; - spin_unlock_irq(&phba->hbalock); - return rc; -} - - int lpfc_do_work(void *p) { struct lpfc_hba *phba = p; int rc; - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq); set_user_nice(current, -20); - phba->work_wait = &work_waitq; - phba->work_found = 0; + phba->data_flags = 0; while (1) { - - rc = wait_event_interruptible(work_waitq, - check_work_wait_done(phba)); - + /* wait and check worker queue activities */ + rc = wait_event_interruptible(phba->work_waitq, + (test_and_clear_bit(LPFC_DATA_READY, + &phba->data_flags) + || kthread_should_stop())); BUG_ON(rc); if (kthread_should_stop()) break; + /* Attend pending lpfc data processing */ lpfc_work_done(phba); - - /* If there is alot of slow ring work, like during link up - * check_work_wait_done() may cause this thread to not give - * up the CPU for very long periods of time. This may cause - * soft lockups or other problems. To avoid these situations - * give up the CPU here after LPFC_MAX_WORKER_ITERATION - * consecutive iterations. - */ - if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) { - phba->work_found = 0; - schedule(); - } } - spin_lock_irq(&phba->hbalock); - phba->work_wait = NULL; - spin_unlock_irq(&phba->hbalock); return 0; } @@ -551,10 +505,10 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, spin_lock_irqsave(&phba->hbalock, flags); list_add_tail(&evtp->evt_listp, &phba->work_list); - if (phba->work_wait) - lpfc_worker_wake_up(phba); spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_worker_wake_up(phba); + return 1; } @@ -2636,21 +2590,20 @@ lpfc_disc_timeout(unsigned long ptr) { struct lpfc_vport *vport = (struct lpfc_vport *) ptr; struct lpfc_hba *phba = vport->phba; + uint32_t tmo_posted; unsigned long flags = 0; if (unlikely(!phba)) return; - if ((vport->work_port_events & WORKER_DISC_TMO) == 0) { - spin_lock_irqsave(&vport->work_port_lock, flags); + spin_lock_irqsave(&vport->work_port_lock, flags); + tmo_posted = vport->work_port_events & WORKER_DISC_TMO; + if (!tmo_posted) vport->work_port_events |= WORKER_DISC_TMO; - spin_unlock_irqrestore(&vport->work_port_lock, flags); + spin_unlock_irqrestore(&vport->work_port_lock, flags); - spin_lock_irqsave(&phba->hbalock, flags); - if (phba->work_wait) - lpfc_worker_wake_up(phba); - spin_unlock_irqrestore(&phba->hbalock, flags); - } + if (!tmo_posted) + lpfc_worker_wake_up(phba); return; } |