diff options
author | Vasu Dev <vasu.dev@intel.com> | 2009-02-27 10:54:57 -0800 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2009-03-06 15:37:49 -0600 |
commit | bc0e17f691085315ae9303eb5b0883fe16dfe6b1 (patch) | |
tree | 53138e80aec810604d4eca4626e4f8df65376ccc /drivers/scsi/fcoe | |
parent | a7e84f2b83f17f8f11da34ccef3ba5a862dc0182 (diff) |
[SCSI] libfc, fcoe: fixed locking issues with lport->lp_mutex around lport->link_status
The fcoe_xmit could call fc_pause in case the pending skb queue len is larger
than FCOE_MAX_QUEUE_DEPTH, the fc_pause was trying to grab lport->lp_muex to
change lport->link_status and that had these issues :-
1. The fcoe_xmit was getting called with bh disabled, thus causing
"BUG: scheduling while atomic" when grabbing lport->lp_muex with bh disabled.
2. fc_linkup and fc_linkdown function calls lport_enter function with
lport->lp_mutex held and these enter function in turn calls fcoe_xmit to send
lport related FC frame, e.g. fc_linkup => fc_lport_enter_flogi to send flogi
req. In this case grabbing the same lport->lp_mutex again in fc_puase from
fcoe_xmit would cause deadlock.
The lport->lp_mutex was used for setting FC_PAUSE in fcoe_xmit path but
FC_PAUSE bit was not used anywhere beside just setting and clear this
bit in lport->link_status, instead used a separate field qfull in fc_lport
to eliminate need for lport->lp_mutex to track pending queue full condition
and in turn avoid above described two locking issues.
Also added check for lp->qfull in fc_fcp_lport_queue_ready to trigger
SCSI_MLQUEUE_HOST_BUSY when lp->qfull is set to prevent more scsi-ml cmds
while lp->qfull is set.
This patch eliminated FC_LINK_UP and FC_PAUSE and instead used dedicated
fields in fc_lport for this, this simplified all related conditional
code.
Also removed fc_pause and fc_unpause functions and instead used newly added
lport->qfull directly in fcoe.
Signed-off-by: Vasu Dev <vasu.dev@intel.com>
Signed-off-by: Robert Love <robert.w.love@intel.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/fcoe')
-rw-r--r-- | drivers/scsi/fcoe/fcoe_sw.c | 6 | ||||
-rw-r--r-- | drivers/scsi/fcoe/libfcoe.c | 41 |
2 files changed, 20 insertions, 27 deletions
diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c index dc4cd5e2576..cf83675a0fb 100644 --- a/drivers/scsi/fcoe/fcoe_sw.c +++ b/drivers/scsi/fcoe/fcoe_sw.c @@ -116,7 +116,8 @@ static int fcoe_sw_lport_config(struct fc_lport *lp) { int i = 0; - lp->link_status = 0; + lp->link_up = 0; + lp->qfull = 0; lp->max_retry_count = 3; lp->e_d_tov = 2 * 1000; /* FC-FS default */ lp->r_a_tov = 2 * 2 * 1000; @@ -181,9 +182,8 @@ static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev) if (fc_set_mfs(lp, mfs)) return -EINVAL; - lp->link_status = ~FC_PAUSE & ~FC_LINK_UP; if (!fcoe_link_ok(lp)) - lp->link_status |= FC_LINK_UP; + lp->link_up = 1; /* offload features support */ if (fc->real_dev->features & NETIF_F_SG) diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index e419f486cdb..296071043f5 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -504,7 +504,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) if (rc) { fcoe_insert_wait_queue(lp, skb); if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) - fc_pause(lp); + lp->qfull = 1; } return 0; @@ -718,7 +718,7 @@ static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) * fcoe_watchdog - fcoe timer callback * @vp: * - * This checks the pending queue length for fcoe and put fcoe to be paused state + * This checks the pending queue length for fcoe and set lport qfull * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the * fcoe_hostlist. * @@ -728,17 +728,17 @@ void fcoe_watchdog(ulong vp) { struct fc_lport *lp; struct fcoe_softc *fc; - int paused = 0; + int qfilled = 0; read_lock(&fcoe_hostlist_lock); list_for_each_entry(fc, &fcoe_hostlist, list) { lp = fc->lp; if (lp) { if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) - paused = 1; + qfilled = 1; if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) { - if (paused) - fc_unpause(lp); + if (qfilled) + lp->qfull = 0; } } } @@ -767,8 +767,7 @@ void fcoe_watchdog(ulong vp) **/ static int fcoe_check_wait_queue(struct fc_lport *lp) { - int rc, unpause = 0; - int paused = 0; + int rc; struct sk_buff *skb; struct fcoe_softc *fc; @@ -776,10 +775,10 @@ static int fcoe_check_wait_queue(struct fc_lport *lp) spin_lock_bh(&fc->fcoe_pending_queue.lock); /* - * is this interface paused? + * if interface pending queue full then set qfull in lport. */ if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) - paused = 1; + lp->qfull = 1; if (fc->fcoe_pending_queue.qlen) { while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { spin_unlock_bh(&fc->fcoe_pending_queue.lock); @@ -791,11 +790,9 @@ static int fcoe_check_wait_queue(struct fc_lport *lp) spin_lock_bh(&fc->fcoe_pending_queue.lock); } if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH) - unpause = 1; + lp->qfull = 0; } spin_unlock_bh(&fc->fcoe_pending_queue.lock); - if ((unpause) && (paused)) - fc_unpause(lp); return fc->fcoe_pending_queue.qlen; } @@ -873,7 +870,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, struct net_device *real_dev = ptr; struct fcoe_softc *fc; struct fcoe_dev_stats *stats; - u16 new_status; + u32 new_link_up; u32 mfs; int rc = NOTIFY_OK; @@ -890,17 +887,15 @@ static int fcoe_device_notification(struct notifier_block *notifier, goto out; } - new_status = lp->link_status; + new_link_up = lp->link_up; switch (event) { case NETDEV_DOWN: case NETDEV_GOING_DOWN: - new_status &= ~FC_LINK_UP; + new_link_up = 0; break; case NETDEV_UP: case NETDEV_CHANGE: - new_status &= ~FC_LINK_UP; - if (!fcoe_link_ok(lp)) - new_status |= FC_LINK_UP; + new_link_up = !fcoe_link_ok(lp); break; case NETDEV_CHANGEMTU: mfs = fc->real_dev->mtu - @@ -908,17 +903,15 @@ static int fcoe_device_notification(struct notifier_block *notifier, sizeof(struct fcoe_crc_eof)); if (mfs >= FC_MIN_MAX_FRAME) fc_set_mfs(lp, mfs); - new_status &= ~FC_LINK_UP; - if (!fcoe_link_ok(lp)) - new_status |= FC_LINK_UP; + new_link_up = !fcoe_link_ok(lp); break; case NETDEV_REGISTER: break; default: FC_DBG("unknown event %ld call", event); } - if (lp->link_status != new_status) { - if ((new_status & FC_LINK_UP) == FC_LINK_UP) + if (lp->link_up != new_link_up) { + if (new_link_up) fc_linkup(lp); else { stats = lp->dev_stats[smp_processor_id()]; |