diff options
Diffstat (limited to 'drivers/scsi/libfc')
-rw-r--r-- | drivers/scsi/libfc/Makefile | 4 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_disc.c | 85 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_elsct.c | 79 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_exch.c | 932 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_fcp.c | 1044 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_frame.c | 13 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_libfc.c | 134 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_libfc.h | 112 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_lport.c | 855 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_npiv.c | 161 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_rport.c | 409 |
11 files changed, 2539 insertions, 1289 deletions
diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile index 55f982de3a9..4bb23ac86a5 100644 --- a/drivers/scsi/libfc/Makefile +++ b/drivers/scsi/libfc/Makefile @@ -3,10 +3,12 @@ obj-$(CONFIG_LIBFC) += libfc.o libfc-objs := \ + fc_libfc.o \ fc_disc.o \ fc_exch.o \ fc_elsct.o \ fc_frame.o \ fc_lport.o \ fc_rport.o \ - fc_fcp.o + fc_fcp.o \ + fc_npiv.o diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index c48799e9dd8..9b0a5192a96 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -40,6 +40,8 @@ #include <scsi/libfc.h> +#include "fc_libfc.h" + #define FC_DISC_RETRY_LIMIT 3 /* max retries */ #define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */ @@ -51,8 +53,8 @@ static int fc_disc_single(struct fc_lport *, struct fc_disc_port *); static void fc_disc_restart(struct fc_disc *); /** - * fc_disc_stop_rports() - delete all the remote ports associated with the lport - * @disc: The discovery job to stop rports on + * fc_disc_stop_rports() - Delete all the remote ports associated with the lport + * @disc: The discovery job to stop remote ports on * * Locking Note: This function expects that the lport mutex is locked before * calling it. @@ -72,9 +74,9 @@ void fc_disc_stop_rports(struct fc_disc *disc) /** * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN) - * @sp: Current sequence of the RSCN exchange - * @fp: RSCN Frame - * @lport: Fibre Channel host port instance + * @sp: The sequence of the RSCN exchange + * @fp: The RSCN frame + * @lport: The local port that the request will be sent on * * Locking Note: This function expects that the disc_mutex is locked * before it is called. @@ -183,9 +185,9 @@ reject: /** * fc_disc_recv_req() - Handle incoming requests - * @sp: Current sequence of the request exchange - * @fp: The frame - * @lport: The FC local port + * @sp: The sequence of the request exchange + * @fp: The request frame + * @lport: The local port receiving the request * * Locking Note: This function is called from the EM and will lock * the disc_mutex before calling the handler for the @@ -213,7 +215,7 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, /** * fc_disc_restart() - Restart discovery - * @lport: FC discovery context + * @disc: The discovery object to be restarted * * Locking Note: This function expects that the disc mutex * is already locked. @@ -240,9 +242,9 @@ static void fc_disc_restart(struct fc_disc *disc) } /** - * fc_disc_start() - Fibre Channel Target discovery - * @lport: FC local port - * @disc_callback: function to be called when discovery is complete + * fc_disc_start() - Start discovery on a local port + * @lport: The local port to have discovery started on + * @disc_callback: Callback function to be called when discovery is complete */ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, enum fc_disc_event), @@ -263,8 +265,8 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, /** * fc_disc_done() - Discovery has been completed - * @disc: FC discovery context - * @event: discovery completion status + * @disc: The discovery context + * @event: The discovery completion status * * Locking Note: This function expects that the disc mutex is locked before * it is called. The discovery callback is then made with the lock released, @@ -284,8 +286,8 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) } /* - * Go through all remote ports. If they were found in the latest - * discovery, reverify or log them in. Otherwise, log them out. + * Go through all remote ports. If they were found in the latest + * discovery, reverify or log them in. Otherwise, log them out. * Skip ports which were never discovered. These are the dNS port * and ports which were created by PLOGI. */ @@ -305,8 +307,8 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) /** * fc_disc_error() - Handle error on dNS request - * @disc: FC discovery context - * @fp: The frame pointer + * @disc: The discovery context + * @fp: The error code encoded as a frame pointer */ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) { @@ -342,7 +344,7 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) /** * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request - * @lport: FC discovery context + * @lport: The discovery context * * Locking Note: This function expects that the disc_mutex is locked * before it is called. @@ -368,17 +370,17 @@ static void fc_disc_gpn_ft_req(struct fc_disc *disc) if (lport->tt.elsct_send(lport, 0, fp, FC_NS_GPN_FT, fc_disc_gpn_ft_resp, - disc, lport->e_d_tov)) + disc, 3 * lport->r_a_tov)) return; err: - fc_disc_error(disc, fp); + fc_disc_error(disc, NULL); } /** * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response. - * @lport: Fibre Channel host port instance - * @buf: GPN_FT response buffer - * @len: size of response buffer + * @lport: The local port the GPN_FT was received on + * @buf: The GPN_FT response buffer + * @len: The size of response buffer * * Goes through the list of IDs and names resulting from a request. */ @@ -477,10 +479,8 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) } /** - * fc_disc_timeout() - Retry handler for the disc component - * @work: Structure holding disc obj that needs retry discovery - * - * Handle retry of memory allocation for remote ports. + * fc_disc_timeout() - Handler for discovery timeouts + * @work: Structure holding discovery context that needs to retry discovery */ static void fc_disc_timeout(struct work_struct *work) { @@ -494,9 +494,9 @@ static void fc_disc_timeout(struct work_struct *work) /** * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT) - * @sp: Current sequence of GPN_FT exchange - * @fp: response frame - * @lp_arg: Fibre Channel host port instance + * @sp: The sequence that the GPN_FT response was received on + * @fp: The GPN_FT response frame + * @lp_arg: The discovery context * * Locking Note: This function is called without disc mutex held, and * should do all its processing with the mutex held @@ -567,9 +567,9 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, /** * fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID) - * @sp: exchange sequence - * @fp: response frame - * @rdata_arg: remote port private data + * @sp: The sequence the GPN_ID is on + * @fp: The response frame + * @rdata_arg: The remote port that sent the GPN_ID response * * Locking Note: This function is called without disc mutex held. */ @@ -637,7 +637,7 @@ out: /** * fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request - * @lport: local port + * @lport: The local port to initiate discovery on * @rdata: remote port private data * * Locking Note: This function expects that the disc_mutex is locked @@ -654,7 +654,8 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport, if (!fp) return -ENOMEM; if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID, - fc_disc_gpn_id_resp, rdata, lport->e_d_tov)) + fc_disc_gpn_id_resp, rdata, + 3 * lport->r_a_tov)) return -ENOMEM; kref_get(&rdata->kref); return 0; @@ -662,8 +663,8 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport, /** * fc_disc_single() - Discover the directory information for a single target - * @lport: local port - * @dp: The port to rediscover + * @lport: The local port the remote port is associated with + * @dp: The port to rediscover * * Locking Note: This function expects that the disc_mutex is locked * before it is called. @@ -681,7 +682,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp) /** * fc_disc_stop() - Stop discovery for a given lport - * @lport: The lport that discovery should stop for + * @lport: The local port that discovery should stop on */ void fc_disc_stop(struct fc_lport *lport) { @@ -695,7 +696,7 @@ void fc_disc_stop(struct fc_lport *lport) /** * fc_disc_stop_final() - Stop discovery for a given lport - * @lport: The lport that discovery should stop for + * @lport: The lport that discovery should stop on * * This function will block until discovery has been * completely stopped and all rports have been deleted. @@ -707,8 +708,8 @@ void fc_disc_stop_final(struct fc_lport *lport) } /** - * fc_disc_init() - Initialize the discovery block - * @lport: FC local port + * fc_disc_init() - Initialize the discovery layer for a local port + * @lport: The local port that needs the discovery layer to be initialized */ int fc_disc_init(struct fc_lport *lport) { diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index 5cfa68732e9..53748724f2c 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c @@ -28,17 +28,22 @@ #include <scsi/libfc.h> #include <scsi/fc_encode.h> -/* - * fc_elsct_send - sends ELS/CT frame +/** + * fc_elsct_send() - Send an ELS or CT frame + * @lport: The local port to send the frame on + * @did: The destination ID for the frame + * @fp: The frame to be sent + * @op: The operational code + * @resp: The callback routine when the response is received + * @arg: The argument to pass to the response callback routine + * @timer_msec: The timeout period for the frame (in msecs) */ -static struct fc_seq *fc_elsct_send(struct fc_lport *lport, - u32 did, - struct fc_frame *fp, - unsigned int op, - void (*resp)(struct fc_seq *, - struct fc_frame *fp, - void *arg), - void *arg, u32 timer_msec) +struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timer_msec) { enum fc_rctl r_ctl; enum fc_fh_type fh_type; @@ -53,15 +58,22 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport, did = FC_FID_DIR_SERV; } - if (rc) + if (rc) { + fc_frame_free(fp); return NULL; + } fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec); } +EXPORT_SYMBOL(fc_elsct_send); +/** + * fc_elsct_init() - Initialize the ELS/CT layer + * @lport: The local port to initialize the ELS/CT layer for + */ int fc_elsct_init(struct fc_lport *lport) { if (!lport->tt.elsct_send) @@ -72,12 +84,15 @@ int fc_elsct_init(struct fc_lport *lport) EXPORT_SYMBOL(fc_elsct_init); /** - * fc_els_resp_type() - return string describing ELS response for debug. - * @fp: frame pointer with possible error code. + * fc_els_resp_type() - Return a string describing the ELS response + * @fp: The frame pointer or possible error code */ const char *fc_els_resp_type(struct fc_frame *fp) { const char *msg; + struct fc_frame_header *fh; + struct fc_ct_hdr *ct; + if (IS_ERR(fp)) { switch (-PTR_ERR(fp)) { case FC_NO_ERR: @@ -94,15 +109,41 @@ const char *fc_els_resp_type(struct fc_frame *fp) break; } } else { - switch (fc_frame_payload_op(fp)) { - case ELS_LS_ACC: - msg = "accept"; + fh = fc_frame_header_get(fp); + switch (fh->fh_type) { + case FC_TYPE_ELS: + switch (fc_frame_payload_op(fp)) { + case ELS_LS_ACC: + msg = "accept"; + break; + case ELS_LS_RJT: + msg = "reject"; + break; + default: + msg = "response unknown ELS"; + break; + } break; - case ELS_LS_RJT: - msg = "reject"; + case FC_TYPE_CT: + ct = fc_frame_payload_get(fp, sizeof(*ct)); + if (ct) { + switch (ntohs(ct->ct_cmd)) { + case FC_FS_ACC: + msg = "CT accept"; + break; + case FC_FS_RJT: + msg = "CT reject"; + break; + default: + msg = "response unknown CT"; + break; + } + } else { + msg = "short CT response"; + } break; default: - msg = "response unknown ELS"; + msg = "response not ELS or CT"; break; } } diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index c1c15748220..19d711cb938 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -32,10 +32,13 @@ #include <scsi/libfc.h> #include <scsi/fc_encode.h> +#include "fc_libfc.h" + u16 fc_cpu_mask; /* cpu mask for possible cpus */ EXPORT_SYMBOL(fc_cpu_mask); static u16 fc_cpu_order; /* 2's power to represent total possible cpus */ -static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ +static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ +struct workqueue_struct *fc_exch_workqueue; /* * Structure and function definitions for managing Fibre Channel Exchanges @@ -50,35 +53,46 @@ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ * fc_seq holds the state for an individual sequence. */ -/* - * Per cpu exchange pool +/** + * struct fc_exch_pool - Per cpu exchange pool + * @next_index: Next possible free exchange index + * @total_exches: Total allocated exchanges + * @lock: Exch pool lock + * @ex_list: List of exchanges * * This structure manages per cpu exchanges in array of exchange pointers. * This array is allocated followed by struct fc_exch_pool memory for * assigned range of exchanges to per cpu pool. */ struct fc_exch_pool { - u16 next_index; /* next possible free exchange index */ - u16 total_exches; /* total allocated exchanges */ - spinlock_t lock; /* exch pool lock */ - struct list_head ex_list; /* allocated exchanges list */ + u16 next_index; + u16 total_exches; + spinlock_t lock; + struct list_head ex_list; }; -/* - * Exchange manager. +/** + * struct fc_exch_mgr - The Exchange Manager (EM). + * @class: Default class for new sequences + * @kref: Reference counter + * @min_xid: Minimum exchange ID + * @max_xid: Maximum exchange ID + * @ep_pool: Reserved exchange pointers + * @pool_max_index: Max exch array index in exch pool + * @pool: Per cpu exch pool + * @stats: Statistics structure * * This structure is the center for creating exchanges and sequences. * It manages the allocation of exchange IDs. */ struct fc_exch_mgr { - enum fc_class class; /* default class for sequences */ - struct kref kref; /* exchange mgr reference count */ - u16 min_xid; /* min exchange ID */ - u16 max_xid; /* max exchange ID */ - struct list_head ex_list; /* allocated exchanges list */ - mempool_t *ep_pool; /* reserve ep's */ - u16 pool_max_index; /* max exch array index in exch pool */ - struct fc_exch_pool *pool; /* per cpu exch pool */ + enum fc_class class; + struct kref kref; + u16 min_xid; + u16 max_xid; + mempool_t *ep_pool; + u16 pool_max_index; + struct fc_exch_pool *pool; /* * currently exchange mgr stats are updated but not used. @@ -96,6 +110,18 @@ struct fc_exch_mgr { }; #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) +/** + * struct fc_exch_mgr_anchor - primary structure for list of EMs + * @ema_list: Exchange Manager Anchor list + * @mp: Exchange Manager associated with this anchor + * @match: Routine to determine if this anchor's EM should be used + * + * When walking the list of anchors the match routine will be called + * for each anchor to determine if that EM should be used. The last + * anchor in the list will always match to handle any exchanges not + * handled by other EMs. The non-default EMs would be added to the + * anchor list by HW that provides FCoE offloads. + */ struct fc_exch_mgr_anchor { struct list_head ema_list; struct fc_exch_mgr *mp; @@ -108,7 +134,6 @@ static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason, enum fc_els_rjt_explan); static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *); static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *); -static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp); /* * Internal implementation notes. @@ -196,6 +221,15 @@ static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT; #define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0])) +/** + * fc_exch_name_lookup() - Lookup name by opcode + * @op: Opcode to be looked up + * @table: Opcode/name table + * @max_index: Index not to be exceeded + * + * This routine is used to determine a human-readable string identifying + * a R_CTL opcode. + */ static inline const char *fc_exch_name_lookup(unsigned int op, char **table, unsigned int max_index) { @@ -208,25 +242,34 @@ static inline const char *fc_exch_name_lookup(unsigned int op, char **table, return name; } +/** + * fc_exch_rctl_name() - Wrapper routine for fc_exch_name_lookup() + * @op: The opcode to be looked up + */ static const char *fc_exch_rctl_name(unsigned int op) { return fc_exch_name_lookup(op, fc_exch_rctl_names, FC_TABLE_SIZE(fc_exch_rctl_names)); } -/* - * Hold an exchange - keep it from being freed. +/** + * fc_exch_hold() - Increment an exchange's reference count + * @ep: Echange to be held */ -static void fc_exch_hold(struct fc_exch *ep) +static inline void fc_exch_hold(struct fc_exch *ep) { atomic_inc(&ep->ex_refcnt); } -/* - * setup fc hdr by initializing few more FC header fields and sof/eof. - * Initialized fields by this func: - * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt - * - sof and eof +/** + * fc_exch_setup_hdr() - Initialize a FC header by initializing some fields + * and determine SOF and EOF. + * @ep: The exchange to that will use the header + * @fp: The frame whose header is to be modified + * @f_ctl: F_CTL bits that will be used for the frame header + * + * The fields initialized by this routine are: fh_ox_id, fh_rx_id, + * fh_seq_id, fh_seq_cnt and the SOF and EOF. */ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, u32 f_ctl) @@ -243,7 +286,7 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, if (fc_sof_needs_ack(ep->class)) fr_eof(fp) = FC_EOF_N; /* - * Form f_ctl. + * From F_CTL. * The number of fill bytes to make the length a 4-byte * multiple is the low order 2-bits of the f_ctl. * The fill itself will have been cleared by the frame @@ -273,10 +316,12 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, fh->fh_seq_cnt = htons(ep->seq.cnt); } - -/* - * Release a reference to an exchange. - * If the refcnt goes to zero and the exchange is complete, it is freed. +/** + * fc_exch_release() - Decrement an exchange's reference count + * @ep: Exchange to be released + * + * If the reference count reaches zero and the exchange is complete, + * it is freed. */ static void fc_exch_release(struct fc_exch *ep) { @@ -291,6 +336,10 @@ static void fc_exch_release(struct fc_exch *ep) } } +/** + * fc_exch_done_locked() - Complete an exchange with the exchange lock held + * @ep: The exchange that is complete + */ static int fc_exch_done_locked(struct fc_exch *ep) { int rc = 1; @@ -315,6 +364,15 @@ static int fc_exch_done_locked(struct fc_exch *ep) return rc; } +/** + * fc_exch_ptr_get() - Return an exchange from an exchange pool + * @pool: Exchange Pool to get an exchange from + * @index: Index of the exchange within the pool + * + * Use the index to get an exchange from within an exchange pool. exches + * will point to an array of exchange pointers. The index will select + * the exchange within the array. + */ static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, u16 index) { @@ -322,12 +380,22 @@ static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, return exches[index]; } +/** + * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool + * @pool: The pool to assign the exchange to + * @index: The index in the pool where the exchange will be assigned + * @ep: The exchange to assign to the pool + */ static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, struct fc_exch *ep) { ((struct fc_exch **)(pool + 1))[index] = ep; } +/** + * fc_exch_delete() - Delete an exchange + * @ep: The exchange to be deleted + */ static void fc_exch_delete(struct fc_exch *ep) { struct fc_exch_pool *pool; @@ -343,8 +411,14 @@ static void fc_exch_delete(struct fc_exch *ep) fc_exch_release(ep); /* drop hold for exch in mp */ } -/* - * Internal version of fc_exch_timer_set - used with lock held. +/** + * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the + * the exchange lock held + * @ep: The exchange whose timer will start + * @timer_msec: The timeout period + * + * Used for upper level protocols to time out the exchange. + * The timer is cancelled when it fires or when the exchange completes. */ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, unsigned int timer_msec) @@ -354,17 +428,15 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, FC_EXCH_DBG(ep, "Exchange timer armed\n"); - if (schedule_delayed_work(&ep->timeout_work, - msecs_to_jiffies(timer_msec))) + if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, + msecs_to_jiffies(timer_msec))) fc_exch_hold(ep); /* hold for timer */ } -/* - * Set timer for an exchange. - * The time is a minimum delay in milliseconds until the timer fires. - * Used for upper level protocols to time out the exchange. - * The timer is cancelled when it fires or when the exchange completes. - * Returns non-zero if a timer couldn't be allocated. +/** + * fc_exch_timer_set() - Lock the exchange and set the timer + * @ep: The exchange whose timer will start + * @timer_msec: The timeout period */ static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) { @@ -373,7 +445,115 @@ static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) spin_unlock_bh(&ep->ex_lock); } -int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec) +/** + * fc_seq_send() - Send a frame using existing sequence/exchange pair + * @lport: The local port that the exchange will be sent on + * @sp: The sequence to be sent + * @fp: The frame to be sent on the exchange + */ +static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, + struct fc_frame *fp) +{ + struct fc_exch *ep; + struct fc_frame_header *fh = fc_frame_header_get(fp); + int error; + u32 f_ctl; + + ep = fc_seq_exch(sp); + WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); + + f_ctl = ntoh24(fh->fh_f_ctl); + fc_exch_setup_hdr(ep, fp, f_ctl); + + /* + * update sequence count if this frame is carrying + * multiple FC frames when sequence offload is enabled + * by LLD. + */ + if (fr_max_payload(fp)) + sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)), + fr_max_payload(fp)); + else + sp->cnt++; + + /* + * Send the frame. + */ + error = lport->tt.frame_send(lport, fp); + + /* + * Update the exchange and sequence flags, + * assuming all frames for the sequence have been sent. + * We can only be called to send once for each sequence. + */ + spin_lock_bh(&ep->ex_lock); + ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ + if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT)) + ep->esb_stat &= ~ESB_ST_SEQ_INIT; + spin_unlock_bh(&ep->ex_lock); + return error; +} + +/** + * fc_seq_alloc() - Allocate a sequence for a given exchange + * @ep: The exchange to allocate a new sequence for + * @seq_id: The sequence ID to be used + * + * We don't support multiple originated sequences on the same exchange. + * By implication, any previously originated sequence on this exchange + * is complete, and we reallocate the same sequence. + */ +static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id) +{ + struct fc_seq *sp; + + sp = &ep->seq; + sp->ssb_stat = 0; + sp->cnt = 0; + sp->id = seq_id; + return sp; +} + +/** + * fc_seq_start_next_locked() - Allocate a new sequence on the same + * exchange as the supplied sequence + * @sp: The sequence/exchange to get a new sequence for + */ +static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) +{ + struct fc_exch *ep = fc_seq_exch(sp); + + sp = fc_seq_alloc(ep, ep->seq_id++); + FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n", + ep->f_ctl, sp->id); + return sp; +} + +/** + * fc_seq_start_next() - Lock the exchange and get a new sequence + * for a given sequence/exchange pair + * @sp: The sequence/exchange to get a new exchange for + */ +static struct fc_seq *fc_seq_start_next(struct fc_seq *sp) +{ + struct fc_exch *ep = fc_seq_exch(sp); + + spin_lock_bh(&ep->ex_lock); + sp = fc_seq_start_next_locked(sp); + spin_unlock_bh(&ep->ex_lock); + + return sp; +} + +/** + * fc_seq_exch_abort() - Abort an exchange and sequence + * @req_sp: The sequence to be aborted + * @timer_msec: The period of time to wait before aborting + * + * Generally called because of a timeout or an abort from the upper layer. + */ +static int fc_seq_exch_abort(const struct fc_seq *req_sp, + unsigned int timer_msec) { struct fc_seq *sp; struct fc_exch *ep; @@ -422,11 +602,10 @@ int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec) error = -ENOBUFS; return error; } -EXPORT_SYMBOL(fc_seq_exch_abort); -/* - * Exchange timeout - handle exchange timer expiration. - * The timer will have been cancelled before this is called. +/** + * fc_exch_timeout() - Handle exchange timer expiration + * @work: The work_struct identifying the exchange that timed out */ static void fc_exch_timeout(struct work_struct *work) { @@ -474,28 +653,10 @@ done: fc_exch_release(ep); } -/* - * Allocate a sequence. - * - * We don't support multiple originated sequences on the same exchange. - * By implication, any previously originated sequence on this exchange - * is complete, and we reallocate the same sequence. - */ -static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id) -{ - struct fc_seq *sp; - - sp = &ep->seq; - sp->ssb_stat = 0; - sp->cnt = 0; - sp->id = seq_id; - return sp; -} - /** - * fc_exch_em_alloc() - allocate an exchange from a specified EM. - * @lport: ptr to the local port - * @mp: ptr to the exchange manager + * fc_exch_em_alloc() - Allocate an exchange from a specified EM. + * @lport: The local port that the exchange is for + * @mp: The exchange manager that will allocate the exchange * * Returns pointer to allocated fc_exch with exch lock held. */ @@ -563,16 +724,18 @@ err: } /** - * fc_exch_alloc() - allocate an exchange. - * @lport: ptr to the local port - * @fp: ptr to the FC frame + * fc_exch_alloc() - Allocate an exchange from an EM on a + * local port's list of EMs. + * @lport: The local port that will own the exchange + * @fp: The FC frame that the exchange will be for * - * This function walks the list of the exchange manager(EM) - * anchors to select a EM for new exchange allocation. The - * EM is selected having either a NULL match function pointer - * or call to match function returning true. + * This function walks the list of exchange manager(EM) + * anchors to select an EM for a new exchange allocation. The + * EM is selected when a NULL match function pointer is encountered + * or when a call to a match function returns true. */ -struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp) +static struct fc_exch *fc_exch_alloc(struct fc_lport *lport, + struct fc_frame *fp) { struct fc_exch_mgr_anchor *ema; struct fc_exch *ep; @@ -586,10 +749,11 @@ struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp) } return NULL; } -EXPORT_SYMBOL(fc_exch_alloc); -/* - * Lookup and hold an exchange. +/** + * fc_exch_find() - Lookup and hold an exchange + * @mp: The exchange manager to lookup the exchange from + * @xid: The XID of the exchange to look up */ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) { @@ -609,7 +773,13 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) return ep; } -void fc_exch_done(struct fc_seq *sp) + +/** + * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and + * the memory allocated for the related objects may be freed. + * @sp: The sequence that has completed + */ +static void fc_exch_done(struct fc_seq *sp) { struct fc_exch *ep = fc_seq_exch(sp); int rc; @@ -620,10 +790,13 @@ void fc_exch_done(struct fc_seq *sp) if (!rc) fc_exch_delete(ep); } -EXPORT_SYMBOL(fc_exch_done); -/* - * Allocate a new exchange as responder. +/** + * fc_exch_resp() - Allocate a new exchange for a response frame + * @lport: The local port that the exchange was for + * @mp: The exchange manager to allocate the exchange from + * @fp: The response frame + * * Sets the responder ID in the frame header. */ static struct fc_exch *fc_exch_resp(struct fc_lport *lport, @@ -664,8 +837,13 @@ static struct fc_exch *fc_exch_resp(struct fc_lport *lport, return ep; } -/* - * Find a sequence for receive where the other end is originating the sequence. +/** + * fc_seq_lookup_recip() - Find a sequence where the other end + * originated the sequence + * @lport: The local port that the frame was sent to + * @mp: The Exchange Manager to lookup the exchange from + * @fp: The frame associated with the sequence we're looking for + * * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold * on the ep that should be released by the caller. */ @@ -771,10 +949,12 @@ rel: return reject; } -/* - * Find the sequence for a frame being received. - * We originated the sequence, so it should be found. - * We may or may not have originated the exchange. +/** + * fc_seq_lookup_orig() - Find a sequence where this end + * originated the sequence + * @mp: The Exchange Manager to lookup the exchange from + * @fp: The frame associated with the sequence we're looking for + * * Does not hold the sequence for the caller. */ static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp, @@ -806,8 +986,12 @@ static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp, return sp; } -/* - * Set addresses for an exchange. +/** + * fc_exch_set_addr() - Set the source and destination IDs for an exchange + * @ep: The exchange to set the addresses for + * @orig_id: The originator's ID + * @resp_id: The responder's ID + * * Note this must be done before the first sequence of the exchange is sent. */ static void fc_exch_set_addr(struct fc_exch *ep, @@ -823,76 +1007,15 @@ static void fc_exch_set_addr(struct fc_exch *ep, } } -static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) -{ - struct fc_exch *ep = fc_seq_exch(sp); - - sp = fc_seq_alloc(ep, ep->seq_id++); - FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n", - ep->f_ctl, sp->id); - return sp; -} -/* - * Allocate a new sequence on the same exchange as the supplied sequence. - * This will never return NULL. +/** + * fc_seq_els_rsp_send() - Send an ELS response using infomation from + * the existing sequence/exchange. + * @sp: The sequence/exchange to get information from + * @els_cmd: The ELS command to be sent + * @els_data: The ELS data to be sent */ -struct fc_seq *fc_seq_start_next(struct fc_seq *sp) -{ - struct fc_exch *ep = fc_seq_exch(sp); - - spin_lock_bh(&ep->ex_lock); - sp = fc_seq_start_next_locked(sp); - spin_unlock_bh(&ep->ex_lock); - - return sp; -} -EXPORT_SYMBOL(fc_seq_start_next); - -int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp) -{ - struct fc_exch *ep; - struct fc_frame_header *fh = fc_frame_header_get(fp); - int error; - u32 f_ctl; - - ep = fc_seq_exch(sp); - WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); - - f_ctl = ntoh24(fh->fh_f_ctl); - fc_exch_setup_hdr(ep, fp, f_ctl); - - /* - * update sequence count if this frame is carrying - * multiple FC frames when sequence offload is enabled - * by LLD. - */ - if (fr_max_payload(fp)) - sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)), - fr_max_payload(fp)); - else - sp->cnt++; - - /* - * Send the frame. - */ - error = lp->tt.frame_send(lp, fp); - - /* - * Update the exchange and sequence flags, - * assuming all frames for the sequence have been sent. - * We can only be called to send once for each sequence. - */ - spin_lock_bh(&ep->ex_lock); - ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ - if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT)) - ep->esb_stat &= ~ESB_ST_SEQ_INIT; - spin_unlock_bh(&ep->ex_lock); - return error; -} -EXPORT_SYMBOL(fc_seq_send); - -void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, - struct fc_seq_els_data *els_data) +static void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, + struct fc_seq_els_data *els_data) { switch (els_cmd) { case ELS_LS_RJT: @@ -911,10 +1034,13 @@ void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd); } } -EXPORT_SYMBOL(fc_seq_els_rsp_send); -/* - * Send a sequence, which is also the last sequence in the exchange. +/** + * fc_seq_send_last() - Send a sequence that is the last in the exchange + * @sp: The sequence that is to be sent + * @fp: The frame that will be sent on the sequence + * @rctl: The R_CTL information to be sent + * @fh_type: The frame header type */ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp, enum fc_rctl rctl, enum fc_fh_type fh_type) @@ -928,9 +1054,12 @@ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp, fc_seq_send(ep->lp, sp, fp); } -/* +/** + * fc_seq_send_ack() - Send an acknowledgement that we've received a frame + * @sp: The sequence to send the ACK on + * @rx_fp: The received frame that is being acknoledged + * * Send ACK_1 (or equiv.) indicating we received something. - * The frame we're acking is supplied. */ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) { @@ -938,14 +1067,14 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) struct fc_frame_header *rx_fh; struct fc_frame_header *fh; struct fc_exch *ep = fc_seq_exch(sp); - struct fc_lport *lp = ep->lp; + struct fc_lport *lport = ep->lp; unsigned int f_ctl; /* * Don't send ACKs for class 3. */ if (fc_sof_needs_ack(fr_sof(rx_fp))) { - fp = fc_frame_alloc(lp, 0); + fp = fc_frame_alloc(lport, 0); if (!fp) return; @@ -980,12 +1109,16 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) else fr_eof(fp) = FC_EOF_N; - (void) lp->tt.frame_send(lp, fp); + lport->tt.frame_send(lport, fp); } } -/* - * Send BLS Reject. +/** + * fc_exch_send_ba_rjt() - Send BLS Reject + * @rx_fp: The frame being rejected + * @reason: The reason the frame is being rejected + * @explan: The explaination for the rejection + * * This is for rejecting BA_ABTS only. */ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, @@ -996,11 +1129,11 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, struct fc_frame_header *rx_fh; struct fc_frame_header *fh; struct fc_ba_rjt *rp; - struct fc_lport *lp; + struct fc_lport *lport; unsigned int f_ctl; - lp = fr_dev(rx_fp); - fp = fc_frame_alloc(lp, sizeof(*rp)); + lport = fr_dev(rx_fp); + fp = fc_frame_alloc(lport, sizeof(*rp)); if (!fp) return; fh = fc_frame_header_get(fp); @@ -1045,13 +1178,17 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, if (fc_sof_needs_ack(fr_sof(fp))) fr_eof(fp) = FC_EOF_N; - (void) lp->tt.frame_send(lp, fp); + lport->tt.frame_send(lport, fp); } -/* - * Handle an incoming ABTS. This would be for target mode usually, - * but could be due to lost FCP transfer ready, confirm or RRQ. - * We always handle this as an exchange abort, ignoring the parameter. +/** + * fc_exch_recv_abts() - Handle an incoming ABTS + * @ep: The exchange the abort was on + * @rx_fp: The ABTS frame + * + * This would be for target mode usually, but could be due to lost + * FCP transfer ready, confirm or RRQ. We always handle this as an + * exchange abort, ignoring the parameter. */ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) { @@ -1100,10 +1237,14 @@ free: fc_frame_free(rx_fp); } -/* - * Handle receive where the other end is originating the sequence. +/** + * fc_exch_recv_req() - Handler for an incoming request where is other + * end is originating the sequence + * @lport: The local port that received the request + * @mp: The EM that the exchange is on + * @fp: The request frame */ -static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, +static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); @@ -1114,8 +1255,17 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, u32 f_ctl; enum fc_pf_rjt_reason reject; + /* We can have the wrong fc_lport at this point with NPIV, which is a + * problem now that we know a new exchange needs to be allocated + */ + lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); + if (!lport) { + fc_frame_free(fp); + return; + } + fr_seq(fp) = NULL; - reject = fc_seq_lookup_recip(lp, mp, fp); + reject = fc_seq_lookup_recip(lport, mp, fp); if (reject == FC_RJT_NONE) { sp = fr_seq(fp); /* sequence will be held */ ep = fc_seq_exch(sp); @@ -1138,17 +1288,21 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, if (ep->resp) ep->resp(sp, fp, ep->arg); else - lp->tt.lport_recv(lp, sp, fp); + lport->tt.lport_recv(lport, sp, fp); fc_exch_release(ep); /* release from lookup */ } else { - FC_LPORT_DBG(lp, "exch/seq lookup failed: reject %x\n", reject); + FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n", + reject); fc_frame_free(fp); } } -/* - * Handle receive where the other end is originating the sequence in - * response to our exchange. +/** + * fc_exch_recv_seq_resp() - Handler for an incoming response where the other + * end is the originator of the sequence that is a + * response to our initial exchange + * @mp: The EM that the exchange is on + * @fp: The response frame */ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) { @@ -1239,8 +1393,11 @@ out: fc_frame_free(fp); } -/* - * Handle receive for a sequence where other end is responding to our sequence. +/** + * fc_exch_recv_resp() - Handler for a sequence where other end is + * responding to our sequence + * @mp: The EM that the exchange is on + * @fp: The response frame */ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) { @@ -1256,9 +1413,13 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) fc_frame_free(fp); } -/* - * Handle the response to an ABTS for exchange or sequence. - * This can be BA_ACC or BA_RJT. +/** + * fc_exch_abts_resp() - Handler for a response to an ABT + * @ep: The exchange that the frame is on + * @fp: The response frame + * + * This response would be to an ABTS cancelling an exchange or sequence. + * The response can be either BA_ACC or BA_RJT */ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) { @@ -1333,9 +1494,12 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) } -/* - * Receive BLS sequence. - * This is always a sequence initiated by the remote side. +/** + * fc_exch_recv_bls() - Handler for a BLS sequence + * @mp: The EM that the exchange is on + * @fp: The request frame + * + * The BLS frame is always a sequence initiated by the remote side. * We may be either the originator or recipient of the exchange. */ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) @@ -1392,8 +1556,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) fc_exch_release(ep); /* release hold taken by fc_exch_find */ } -/* - * Accept sequence with LS_ACC. +/** + * fc_seq_ls_acc() - Accept sequence with LS_ACC + * @req_sp: The request sequence + * * If this fails due to allocation or transmit congestion, assume the * originator will repeat the sequence. */ @@ -1413,8 +1579,12 @@ static void fc_seq_ls_acc(struct fc_seq *req_sp) } } -/* - * Reject sequence with ELS LS_RJT. +/** + * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT + * @req_sp: The request sequence + * @reason: The reason the sequence is being rejected + * @explan: The explaination for the rejection + * * If this fails due to allocation or transmit congestion, assume the * originator will repeat the sequence. */ @@ -1437,6 +1607,10 @@ static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason, } } +/** + * fc_exch_reset() - Reset an exchange + * @ep: The exchange to be reset + */ static void fc_exch_reset(struct fc_exch *ep) { struct fc_seq *sp; @@ -1446,12 +1620,6 @@ static void fc_exch_reset(struct fc_exch *ep) spin_lock_bh(&ep->ex_lock); ep->state |= FC_EX_RST_CLEANUP; - /* - * we really want to call del_timer_sync, but cannot due - * to the lport calling with the lport lock held (some resp - * functions can also grab the lport lock which could cause - * a deadlock). - */ if (cancel_delayed_work(&ep->timeout_work)) atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ resp = ep->resp; @@ -1471,16 +1639,16 @@ static void fc_exch_reset(struct fc_exch *ep) } /** - * fc_exch_pool_reset() - Resets an per cpu exches pool. - * @lport: ptr to the local port - * @pool: ptr to the per cpu exches pool - * @sid: source FC ID - * @did: destination FC ID + * fc_exch_pool_reset() - Reset a per cpu exchange pool + * @lport: The local port that the exchange pool is on + * @pool: The exchange pool to be reset + * @sid: The source ID + * @did: The destination ID * - * Resets an per cpu exches pool, releasing its all sequences - * and exchanges. If sid is non-zero, then reset only exchanges - * we sourced from that FID. If did is non-zero, reset only - * exchanges destined to that FID. + * Resets a per cpu exches pool, releasing all of its sequences + * and exchanges. If sid is non-zero then reset only exchanges + * we sourced from the local port's FID. If did is non-zero then + * only reset exchanges destined for the local port's FID. */ static void fc_exch_pool_reset(struct fc_lport *lport, struct fc_exch_pool *pool, @@ -1514,15 +1682,15 @@ restart: } /** - * fc_exch_mgr_reset() - Resets all EMs of a lport - * @lport: ptr to the local port - * @sid: source FC ID - * @did: destination FC ID + * fc_exch_mgr_reset() - Reset all EMs of a local port + * @lport: The local port whose EMs are to be reset + * @sid: The source ID + * @did: The destination ID * - * Reset all EMs of a lport, releasing its all sequences and - * exchanges. If sid is non-zero, then reset only exchanges - * we sourced from that FID. If did is non-zero, reset only - * exchanges destined to that FID. + * Reset all EMs associated with a given local port. Release all + * sequences and exchanges. If sid is non-zero then reset only the + * exchanges sent from the local port's FID. If did is non-zero then + * reset only exchanges destined for the local port's FID. */ void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did) { @@ -1538,8 +1706,11 @@ void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did) } EXPORT_SYMBOL(fc_exch_mgr_reset); -/* - * Handle incoming ELS REC - Read Exchange Concise. +/** + * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests + * @sp: The sequence the REC is on + * @rfp: The REC frame + * * Note that the requesting port may be different than the S_ID in the request. */ static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp) @@ -1621,10 +1792,11 @@ reject: fc_frame_free(rfp); } -/* - * Handle response from RRQ. - * Not much to do here, really. - * Should report errors. +/** + * fc_exch_rrq_resp() - Handler for RRQ responses + * @sp: The sequence that the RRQ is on + * @fp: The RRQ frame + * @arg: The exchange that the RRQ is on * * TODO: fix error handler. */ @@ -1664,21 +1836,99 @@ cleanup: fc_exch_release(aborted_ep); } -/* - * Send ELS RRQ - Reinstate Recovery Qualifier. + +/** + * fc_exch_seq_send() - Send a frame using a new exchange and sequence + * @lport: The local port to send the frame on + * @fp: The frame to be sent + * @resp: The response handler for this request + * @destructor: The destructor for the exchange + * @arg: The argument to be passed to the response handler + * @timer_msec: The timeout period for the exchange + * + * The frame pointer with some of the header's fields must be + * filled before calling this routine, those fields are: + * + * - routing control + * - FC port did + * - FC port sid + * - FC header type + * - frame control + * - parameter or relative offset + */ +static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, + struct fc_frame *fp, + void (*resp)(struct fc_seq *, + struct fc_frame *fp, + void *arg), + void (*destructor)(struct fc_seq *, + void *), + void *arg, u32 timer_msec) +{ + struct fc_exch *ep; + struct fc_seq *sp = NULL; + struct fc_frame_header *fh; + int rc = 1; + + ep = fc_exch_alloc(lport, fp); + if (!ep) { + fc_frame_free(fp); + return NULL; + } + ep->esb_stat |= ESB_ST_SEQ_INIT; + fh = fc_frame_header_get(fp); + fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id)); + ep->resp = resp; + ep->destructor = destructor; + ep->arg = arg; + ep->r_a_tov = FC_DEF_R_A_TOV; + ep->lp = lport; + sp = &ep->seq; + + ep->fh_type = fh->fh_type; /* save for possbile timeout handling */ + ep->f_ctl = ntoh24(fh->fh_f_ctl); + fc_exch_setup_hdr(ep, fp, ep->f_ctl); + sp->cnt++; + + if (ep->xid <= lport->lro_xid) + fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); + + if (unlikely(lport->tt.frame_send(lport, fp))) + goto err; + + if (timer_msec) + fc_exch_timer_set_locked(ep, timer_msec); + ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */ + + if (ep->f_ctl & FC_FC_SEQ_INIT) + ep->esb_stat &= ~ESB_ST_SEQ_INIT; + spin_unlock_bh(&ep->ex_lock); + return sp; +err: + rc = fc_exch_done_locked(ep); + spin_unlock_bh(&ep->ex_lock); + if (!rc) + fc_exch_delete(ep); + return NULL; +} + +/** + * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command + * @ep: The exchange to send the RRQ on + * * This tells the remote port to stop blocking the use of * the exchange and the seq_cnt range. */ static void fc_exch_rrq(struct fc_exch *ep) { - struct fc_lport *lp; + struct fc_lport *lport; struct fc_els_rrq *rrq; struct fc_frame *fp; u32 did; - lp = ep->lp; + lport = ep->lp; - fp = fc_frame_alloc(lp, sizeof(*rrq)); + fp = fc_frame_alloc(lport, sizeof(*rrq)); if (!fp) goto retry; @@ -1694,10 +1944,11 @@ static void fc_exch_rrq(struct fc_exch *ep) did = ep->sid; fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did, - fc_host_port_id(lp->host), FC_TYPE_ELS, + fc_host_port_id(lport->host), FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - if (fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, lp->e_d_tov)) + if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep, + lport->e_d_tov)) return; retry: @@ -1714,12 +1965,14 @@ retry: } -/* - * Handle incoming ELS RRQ - Reset Recovery Qualifier. +/** + * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests + * @sp: The sequence that the RRQ is on + * @fp: The RRQ frame */ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) { - struct fc_exch *ep; /* request or subject exchange */ + struct fc_exch *ep = NULL; /* request or subject exchange */ struct fc_els_rrq *rp; u32 sid; u16 xid; @@ -1769,17 +2022,24 @@ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) * Send LS_ACC. */ fc_seq_ls_acc(sp); - fc_frame_free(fp); - return; + goto out; unlock_reject: spin_unlock_bh(&ep->ex_lock); - fc_exch_release(ep); /* drop hold from fc_exch_find */ reject: fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan); +out: fc_frame_free(fp); + if (ep) + fc_exch_release(ep); /* drop hold from fc_exch_find */ } +/** + * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs + * @lport: The local port to add the exchange manager to + * @mp: The exchange manager to be added to the local port + * @match: The match routine that indicates when this EM should be used + */ struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, struct fc_exch_mgr *mp, bool (*match)(struct fc_frame *)) @@ -1799,6 +2059,10 @@ struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, } EXPORT_SYMBOL(fc_exch_mgr_add); +/** + * fc_exch_mgr_destroy() - Destroy an exchange manager + * @kref: The reference to the EM to be destroyed + */ static void fc_exch_mgr_destroy(struct kref *kref) { struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref); @@ -1808,6 +2072,10 @@ static void fc_exch_mgr_destroy(struct kref *kref) kfree(mp); } +/** + * fc_exch_mgr_del() - Delete an EM from a local port's list + * @ema: The exchange manager anchor identifying the EM to be deleted + */ void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema) { /* remove EM anchor from EM anchors list */ @@ -1817,7 +2085,35 @@ void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema) } EXPORT_SYMBOL(fc_exch_mgr_del); -struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, +/** + * fc_exch_mgr_list_clone() - Share all exchange manager objects + * @src: Source lport to clone exchange managers from + * @dst: New lport that takes references to all the exchange managers + */ +int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst) +{ + struct fc_exch_mgr_anchor *ema, *tmp; + + list_for_each_entry(ema, &src->ema_list, ema_list) { + if (!fc_exch_mgr_add(dst, ema->mp, ema->match)) + goto err; + } + return 0; +err: + list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list) + fc_exch_mgr_del(ema); + return -ENOMEM; +} + +/** + * fc_exch_mgr_alloc() - Allocate an exchange manager + * @lport: The local port that the new EM will be associated with + * @class: The default FC class for new exchanges + * @min_xid: The minimum XID for exchanges from the new EM + * @max_xid: The maximum XID for exchanges from the new EM + * @match: The match routine for the new EM + */ +struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport, enum fc_class class, u16 min_xid, u16 max_xid, bool (*match)(struct fc_frame *)) @@ -1830,7 +2126,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN || (min_xid & fc_cpu_mask) != 0) { - FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", + FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", min_xid, max_xid); return NULL; } @@ -1873,7 +2169,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, } kref_init(&mp->kref); - if (!fc_exch_mgr_add(lp, mp, match)) { + if (!fc_exch_mgr_add(lport, mp, match)) { free_percpu(mp->pool); goto free_mempool; } @@ -1894,76 +2190,26 @@ free_mp: } EXPORT_SYMBOL(fc_exch_mgr_alloc); +/** + * fc_exch_mgr_free() - Free all exchange managers on a local port + * @lport: The local port whose EMs are to be freed + */ void fc_exch_mgr_free(struct fc_lport *lport) { struct fc_exch_mgr_anchor *ema, *next; + flush_workqueue(fc_exch_workqueue); list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list) fc_exch_mgr_del(ema); } EXPORT_SYMBOL(fc_exch_mgr_free); - -struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, - struct fc_frame *fp, - void (*resp)(struct fc_seq *, - struct fc_frame *fp, - void *arg), - void (*destructor)(struct fc_seq *, void *), - void *arg, u32 timer_msec) -{ - struct fc_exch *ep; - struct fc_seq *sp = NULL; - struct fc_frame_header *fh; - int rc = 1; - - ep = fc_exch_alloc(lp, fp); - if (!ep) { - fc_frame_free(fp); - return NULL; - } - ep->esb_stat |= ESB_ST_SEQ_INIT; - fh = fc_frame_header_get(fp); - fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id)); - ep->resp = resp; - ep->destructor = destructor; - ep->arg = arg; - ep->r_a_tov = FC_DEF_R_A_TOV; - ep->lp = lp; - sp = &ep->seq; - - ep->fh_type = fh->fh_type; /* save for possbile timeout handling */ - ep->f_ctl = ntoh24(fh->fh_f_ctl); - fc_exch_setup_hdr(ep, fp, ep->f_ctl); - sp->cnt++; - - if (ep->xid <= lp->lro_xid) - fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); - - if (unlikely(lp->tt.frame_send(lp, fp))) - goto err; - - if (timer_msec) - fc_exch_timer_set_locked(ep, timer_msec); - ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */ - - if (ep->f_ctl & FC_FC_SEQ_INIT) - ep->esb_stat &= ~ESB_ST_SEQ_INIT; - spin_unlock_bh(&ep->ex_lock); - return sp; -err: - rc = fc_exch_done_locked(ep); - spin_unlock_bh(&ep->ex_lock); - if (!rc) - fc_exch_delete(ep); - return NULL; -} -EXPORT_SYMBOL(fc_exch_seq_send); - -/* - * Receive a frame +/** + * fc_exch_recv() - Handler for received frames + * @lport: The local port the frame was received on + * @fp: The received frame */ -void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) +void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); struct fc_exch_mgr_anchor *ema; @@ -1971,8 +2217,8 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) u16 oxid; /* lport lock ? */ - if (!lp || lp->state == LPORT_ST_DISABLED) { - FC_LPORT_DBG(lp, "Receiving frames for an lport that " + if (!lport || lport->state == LPORT_ST_DISABLED) { + FC_LPORT_DBG(lport, "Receiving frames for an lport that " "has not been initialized correctly\n"); fc_frame_free(fp); return; @@ -1981,7 +2227,7 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) f_ctl = ntoh24(fh->fh_f_ctl); oxid = ntohs(fh->fh_ox_id); if (f_ctl & FC_FC_EX_CTX) { - list_for_each_entry(ema, &lp->ema_list, ema_list) { + list_for_each_entry(ema, &lport->ema_list, ema_list) { if ((oxid >= ema->mp->min_xid) && (oxid <= ema->mp->max_xid)) { found = 1; @@ -1990,13 +2236,13 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) } if (!found) { - FC_LPORT_DBG(lp, "Received response for out " + FC_LPORT_DBG(lport, "Received response for out " "of range oxid:%hx\n", oxid); fc_frame_free(fp); return; } } else - ema = list_entry(lp->ema_list.prev, typeof(*ema), ema_list); + ema = list_entry(lport->ema_list.prev, typeof(*ema), ema_list); /* * If frame is marked invalid, just drop it. @@ -2015,37 +2261,56 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) else if (f_ctl & FC_FC_SEQ_CTX) fc_exch_recv_resp(ema->mp, fp); else - fc_exch_recv_req(lp, ema->mp, fp); + fc_exch_recv_req(lport, ema->mp, fp); break; default: - FC_LPORT_DBG(lp, "dropping invalid frame (eof %x)", fr_eof(fp)); + FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)", + fr_eof(fp)); fc_frame_free(fp); } } EXPORT_SYMBOL(fc_exch_recv); -int fc_exch_init(struct fc_lport *lp) +/** + * fc_exch_init() - Initialize the exchange layer for a local port + * @lport: The local port to initialize the exchange layer for + */ +int fc_exch_init(struct fc_lport *lport) { - if (!lp->tt.seq_start_next) - lp->tt.seq_start_next = fc_seq_start_next; + if (!lport->tt.seq_start_next) + lport->tt.seq_start_next = fc_seq_start_next; - if (!lp->tt.exch_seq_send) - lp->tt.exch_seq_send = fc_exch_seq_send; + if (!lport->tt.exch_seq_send) + lport->tt.exch_seq_send = fc_exch_seq_send; - if (!lp->tt.seq_send) - lp->tt.seq_send = fc_seq_send; + if (!lport->tt.seq_send) + lport->tt.seq_send = fc_seq_send; - if (!lp->tt.seq_els_rsp_send) - lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send; + if (!lport->tt.seq_els_rsp_send) + lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send; - if (!lp->tt.exch_done) - lp->tt.exch_done = fc_exch_done; + if (!lport->tt.exch_done) + lport->tt.exch_done = fc_exch_done; - if (!lp->tt.exch_mgr_reset) - lp->tt.exch_mgr_reset = fc_exch_mgr_reset; + if (!lport->tt.exch_mgr_reset) + lport->tt.exch_mgr_reset = fc_exch_mgr_reset; - if (!lp->tt.seq_exch_abort) - lp->tt.seq_exch_abort = fc_seq_exch_abort; + if (!lport->tt.seq_exch_abort) + lport->tt.seq_exch_abort = fc_seq_exch_abort; + + return 0; +} +EXPORT_SYMBOL(fc_exch_init); + +/** + * fc_setup_exch_mgr() - Setup an exchange manager + */ +int fc_setup_exch_mgr() +{ + fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!fc_em_cachep) + return -ENOMEM; /* * Initialize fc_cpu_mask and fc_cpu_order. The @@ -2069,20 +2334,17 @@ int fc_exch_init(struct fc_lport *lp) } fc_cpu_mask--; - return 0; -} -EXPORT_SYMBOL(fc_exch_init); - -int fc_setup_exch_mgr(void) -{ - fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch), - 0, SLAB_HWCACHE_ALIGN, NULL); - if (!fc_em_cachep) + fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); + if (!fc_exch_workqueue) return -ENOMEM; return 0; } -void fc_destroy_exch_mgr(void) +/** + * fc_destroy_exch_mgr() - Destroy an exchange manager + */ +void fc_destroy_exch_mgr() { + destroy_workqueue(fc_exch_workqueue); kmem_cache_destroy(fc_em_cachep); } diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 59a4408b27b..c4b58d042f6 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -39,15 +39,9 @@ #include <scsi/libfc.h> #include <scsi/fc_encode.h> -MODULE_AUTHOR("Open-FCoE.org"); -MODULE_DESCRIPTION("libfc"); -MODULE_LICENSE("GPL v2"); +#include "fc_libfc.h" -unsigned int fc_debug_logging; -module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); - -static struct kmem_cache *scsi_pkt_cachep; +struct kmem_cache *scsi_pkt_cachep; /* SRB state definitions */ #define FC_SRB_FREE 0 /* cmd is free */ @@ -58,7 +52,6 @@ static struct kmem_cache *scsi_pkt_cachep; #define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */ #define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */ #define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */ -#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */ #define FC_SRB_READ (1 << 1) #define FC_SRB_WRITE (1 << 0) @@ -73,10 +66,20 @@ static struct kmem_cache *scsi_pkt_cachep; #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) #define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual) +/** + * struct fc_fcp_internal - FCP layer internal data + * @scsi_pkt_pool: Memory pool to draw FCP packets from + * @scsi_pkt_queue: Current FCP packets + * @last_can_queue_ramp_down_time: ramp down time + * @last_can_queue_ramp_up_time: ramp up time + * @max_can_queue: max can_queue size + */ struct fc_fcp_internal { - mempool_t *scsi_pkt_pool; + mempool_t *scsi_pkt_pool; struct list_head scsi_pkt_queue; - u8 throttled; + unsigned long last_can_queue_ramp_down_time; + unsigned long last_can_queue_ramp_up_time; + int max_can_queue; }; #define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) @@ -90,9 +93,9 @@ static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *); static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *); static void fc_fcp_complete_locked(struct fc_fcp_pkt *); static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); -static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp); +static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *); static void fc_timeout_error(struct fc_fcp_pkt *); -static void fc_fcp_timeout(unsigned long data); +static void fc_fcp_timeout(unsigned long); static void fc_fcp_rec(struct fc_fcp_pkt *); static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); @@ -124,6 +127,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); #define FC_SCSI_TM_TOV (10 * HZ) #define FC_SCSI_REC_TOV (2 * HZ) #define FC_HOST_RESET_TIMEOUT (30 * HZ) +#define FC_CAN_QUEUE_PERIOD (60 * HZ) #define FC_MAX_ERROR_CNT 5 #define FC_MAX_RECOV_RETRY 3 @@ -131,23 +135,22 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); #define FC_FCP_DFLT_QUEUE_DEPTH 32 /** - * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet - * @lp: fc lport struct - * @gfp: gfp flags for allocation + * fc_fcp_pkt_alloc() - Allocate a fcp_pkt + * @lport: The local port that the FCP packet is for + * @gfp: GFP flags for allocation * - * This is used by upper layer scsi driver. - * Return Value : scsi_pkt structure or null on allocation failure. - * Context : call from process context. no locking required. + * Return value: fcp_pkt structure or null on allocation failure. + * Context: Can be called from process context, no lock is required. */ -static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp) +static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp) { - struct fc_fcp_internal *si = fc_get_scsi_internal(lp); + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); struct fc_fcp_pkt *fsp; fsp = mempool_alloc(si->scsi_pkt_pool, gfp); if (fsp) { memset(fsp, 0, sizeof(*fsp)); - fsp->lp = lp; + fsp->lp = lport; atomic_set(&fsp->ref_cnt, 1); init_timer(&fsp->timer); INIT_LIST_HEAD(&fsp->list); @@ -157,12 +160,11 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp) } /** - * fc_fcp_pkt_release() - release hold on scsi_pkt packet - * @fsp: fcp packet struct + * fc_fcp_pkt_release() - Release hold on a fcp_pkt + * @fsp: The FCP packet to be released * - * This is used by upper layer scsi driver. - * Context : call from process and interrupt context. - * no locking required + * Context: Can be called from process or interrupt context, + * no lock is required. */ static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) { @@ -173,20 +175,25 @@ static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) } } +/** + * fc_fcp_pkt_hold() - Hold a fcp_pkt + * @fsp: The FCP packet to be held + */ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) { atomic_inc(&fsp->ref_cnt); } /** - * fc_fcp_pkt_destory() - release hold on scsi_pkt packet - * @seq: exchange sequence - * @fsp: fcp packet struct + * fc_fcp_pkt_destory() - Release hold on a fcp_pkt + * @seq: The sequence that the FCP packet is on (required by destructor API) + * @fsp: The FCP packet to be released + * + * This routine is called by a destructor callback in the exch_seq_send() + * routine of the libfc Transport Template. The 'struct fc_seq' is a required + * argument even though it is not used by this routine. * - * Release hold on scsi_pkt packet set to keep scsi_pkt - * till EM layer exch resource is not freed. - * Context : called from from EM layer. - * no locking required + * Context: No locking required. */ static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) { @@ -194,10 +201,10 @@ static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) } /** - * fc_fcp_lock_pkt() - lock a packet and get a ref to it. - * @fsp: fcp packet + * fc_fcp_lock_pkt() - Lock a fcp_pkt and increase its reference count + * @fsp: The FCP packet to be locked and incremented * - * We should only return error if we return a command to scsi-ml before + * We should only return error if we return a command to SCSI-ml before * getting a response. This can happen in cases where we send a abort, but * do not wait for the response and the abort and command can be passing * each other on the wire/network-layer. @@ -222,18 +229,33 @@ static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp) return 0; } +/** + * fc_fcp_unlock_pkt() - Release a fcp_pkt's lock and decrement its + * reference count + * @fsp: The FCP packet to be unlocked and decremented + */ static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) { spin_unlock_bh(&fsp->scsi_pkt_lock); fc_fcp_pkt_release(fsp); } +/** + * fc_fcp_timer_set() - Start a timer for a fcp_pkt + * @fsp: The FCP packet to start a timer for + * @delay: The timeout period for the timer + */ static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) { if (!(fsp->state & FC_SRB_COMPL)) mod_timer(&fsp->timer, jiffies + delay); } +/** + * fc_fcp_send_abort() - Send an abort for exchanges associated with a + * fcp_pkt + * @fsp: The FCP packet to abort exchanges on + */ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) { if (!fsp->seq_ptr) @@ -243,9 +265,14 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0); } -/* - * Retry command. - * An abort isn't needed. +/** + * fc_fcp_retry_cmd() - Retry a fcp_pkt + * @fsp: The FCP packet to be retried + * + * Sets the status code to be FC_ERROR and then calls + * fc_fcp_complete_locked() which in turn calls fc_io_compl(). + * fc_io_compl() will notify the SCSI-ml that the I/O is done. + * The SCSI-ml will retry the command. */ static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) { @@ -260,64 +287,146 @@ static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp) fc_fcp_complete_locked(fsp); } -/* - * fc_fcp_ddp_setup - calls to LLD's ddp_setup to set up DDP - * transfer for a read I/O indicated by the fc_fcp_pkt. - * @fsp: ptr to the fc_fcp_pkt - * - * This is called in exch_seq_send() when we have a newly allocated - * exchange with a valid exchange id to setup ddp. - * - * returns: none +/** + * fc_fcp_ddp_setup() - Calls a LLD's ddp_setup routine to set up DDP context + * @fsp: The FCP packet that will manage the DDP frames + * @xid: The XID that will be used for the DDP exchange */ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) { - struct fc_lport *lp; + struct fc_lport *lport; if (!fsp) return; - lp = fsp->lp; + lport = fsp->lp; if ((fsp->req_flags & FC_SRB_READ) && - (lp->lro_enabled) && (lp->tt.ddp_setup)) { - if (lp->tt.ddp_setup(lp, xid, scsi_sglist(fsp->cmd), - scsi_sg_count(fsp->cmd))) + (lport->lro_enabled) && (lport->tt.ddp_setup)) { + if (lport->tt.ddp_setup(lport, xid, scsi_sglist(fsp->cmd), + scsi_sg_count(fsp->cmd))) fsp->xfer_ddp = xid; } } -EXPORT_SYMBOL(fc_fcp_ddp_setup); -/* - * fc_fcp_ddp_done - calls to LLD's ddp_done to release any - * DDP related resources for this I/O if it is initialized - * as a ddp transfer - * @fsp: ptr to the fc_fcp_pkt - * - * returns: none +/** + * fc_fcp_ddp_done() - Calls a LLD's ddp_done routine to release any + * DDP related resources for a fcp_pkt + * @fsp: The FCP packet that DDP had been used on */ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) { - struct fc_lport *lp; + struct fc_lport *lport; if (!fsp) return; - lp = fsp->lp; - if (fsp->xfer_ddp && lp->tt.ddp_done) { - fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp); - fsp->xfer_ddp = 0; + if (fsp->xfer_ddp == FC_XID_UNKNOWN) + return; + + lport = fsp->lp; + if (lport->tt.ddp_done) { + fsp->xfer_len = lport->tt.ddp_done(lport, fsp->xfer_ddp); + fsp->xfer_ddp = FC_XID_UNKNOWN; } } +/** + * fc_fcp_can_queue_ramp_up() - increases can_queue + * @lport: lport to ramp up can_queue + * + * Locking notes: Called with Scsi_Host lock held + */ +static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport) +{ + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); + int can_queue; + + if (si->last_can_queue_ramp_up_time && + (time_before(jiffies, si->last_can_queue_ramp_up_time + + FC_CAN_QUEUE_PERIOD))) + return; + + if (time_before(jiffies, si->last_can_queue_ramp_down_time + + FC_CAN_QUEUE_PERIOD)) + return; + + si->last_can_queue_ramp_up_time = jiffies; + + can_queue = lport->host->can_queue << 1; + if (can_queue >= si->max_can_queue) { + can_queue = si->max_can_queue; + si->last_can_queue_ramp_down_time = 0; + } + lport->host->can_queue = can_queue; + shost_printk(KERN_ERR, lport->host, "libfc: increased " + "can_queue to %d.\n", can_queue); +} + +/** + * fc_fcp_can_queue_ramp_down() - reduces can_queue + * @lport: lport to reduce can_queue + * + * If we are getting memory allocation failures, then we may + * be trying to execute too many commands. We let the running + * commands complete or timeout, then try again with a reduced + * can_queue. Eventually we will hit the point where we run + * on all reserved structs. + * + * Locking notes: Called with Scsi_Host lock held + */ +static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) +{ + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); + int can_queue; + + if (si->last_can_queue_ramp_down_time && + (time_before(jiffies, si->last_can_queue_ramp_down_time + + FC_CAN_QUEUE_PERIOD))) + return; + + si->last_can_queue_ramp_down_time = jiffies; + + can_queue = lport->host->can_queue; + can_queue >>= 1; + if (!can_queue) + can_queue = 1; + lport->host->can_queue = can_queue; + shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n" + "Reducing can_queue to %d.\n", can_queue); +} /* - * Receive SCSI data from target. - * Called after receiving solicited data. + * fc_fcp_frame_alloc() - Allocates fc_frame structure and buffer. + * @lport: fc lport struct + * @len: payload length + * + * Allocates fc_frame structure and buffer but if fails to allocate + * then reduce can_queue. + */ +static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport, + size_t len) +{ + struct fc_frame *fp; + unsigned long flags; + + fp = fc_frame_alloc(lport, len); + if (!fp) { + spin_lock_irqsave(lport->host->host_lock, flags); + fc_fcp_can_queue_ramp_down(lport); + spin_unlock_irqrestore(lport->host->host_lock, flags); + } + return fp; +} + +/** + * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target + * @fsp: The FCP packet the data is on + * @fp: The data frame */ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) { struct scsi_cmnd *sc = fsp->cmd; - struct fc_lport *lp = fsp->lp; + struct fc_lport *lport = fsp->lp; struct fcoe_dev_stats *stats; struct fc_frame_header *fh; size_t start_offset; @@ -327,7 +436,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) size_t len; void *buf; struct scatterlist *sg; - size_t remaining; + u32 nents; fh = fc_frame_header_get(fp); offset = ntohl(fh->fh_parm_offset); @@ -351,65 +460,29 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) if (offset != fsp->xfer_len) fsp->state |= FC_SRB_DISCONTIG; - crc = 0; - if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) - crc = crc32(~0, (u8 *) fh, sizeof(*fh)); - sg = scsi_sglist(sc); - remaining = len; - - while (remaining > 0 && sg) { - size_t off; - void *page_addr; - size_t sg_bytes; + nents = scsi_sg_count(sc); - if (offset >= sg->length) { - offset -= sg->length; - sg = sg_next(sg); - continue; - } - sg_bytes = min(remaining, sg->length - offset); - - /* - * The scatterlist item may be bigger than PAGE_SIZE, - * but we are limited to mapping PAGE_SIZE at a time. - */ - off = offset + sg->offset; - sg_bytes = min(sg_bytes, (size_t) - (PAGE_SIZE - (off & ~PAGE_MASK))); - page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT), - KM_SOFTIRQ0); - if (!page_addr) - break; /* XXX panic? */ - - if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) - crc = crc32(crc, buf, sg_bytes); - memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, - sg_bytes); - - kunmap_atomic(page_addr, KM_SOFTIRQ0); - buf += sg_bytes; - offset += sg_bytes; - remaining -= sg_bytes; - copy_len += sg_bytes; - } - - if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { + if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) { + copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, + &offset, KM_SOFTIRQ0, NULL); + } else { + crc = crc32(~0, (u8 *) fh, sizeof(*fh)); + copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, + &offset, KM_SOFTIRQ0, &crc); buf = fc_frame_payload_get(fp, 0); - if (len % 4) { + if (len % 4) crc = crc32(crc, buf + len, 4 - (len % 4)); - len += 4 - (len % 4); - } if (~crc != le32_to_cpu(fr_crc(fp))) { crc_err: - stats = fc_lport_get_stats(lp); + stats = fc_lport_get_stats(lport); stats->ErrorFrames++; /* FIXME - per cpu count, not total count! */ if (stats->InvalidCRCCount++ < 5) printk(KERN_WARNING "libfc: CRC error on data " "frame for port (%6x)\n", - fc_host_port_id(lp->host)); + fc_host_port_id(lport->host)); /* * Assume the frame is total garbage. * We may have copied it over the good part @@ -437,18 +510,17 @@ crc_err: } /** - * fc_fcp_send_data() - Send SCSI data to target. - * @fsp: ptr to fc_fcp_pkt - * @sp: ptr to this sequence - * @offset: starting offset for this data request - * @seq_blen: the burst length for this data request + * fc_fcp_send_data() - Send SCSI data to a target + * @fsp: The FCP packet the data is on + * @sp: The sequence the data is to be sent on + * @offset: The starting offset for this data request + * @seq_blen: The burst length for this data request * * Called after receiving a Transfer Ready data descriptor. - * if LLD is capable of seq offload then send down seq_blen - * size of data in single frame, otherwise send multiple FC - * frames of max FC frame payload supported by target port. - * - * Returns : 0 for success. + * If the LLD is capable of sequence offload then send down the + * seq_blen ammount of data in single frame, otherwise send + * multiple frames of the maximum frame payload supported by + * the target port. */ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, size_t offset, size_t seq_blen) @@ -457,16 +529,18 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, struct scsi_cmnd *sc; struct scatterlist *sg; struct fc_frame *fp = NULL; - struct fc_lport *lp = fsp->lp; + struct fc_lport *lport = fsp->lp; + struct page *page; size_t remaining; size_t t_blen; size_t tlen; size_t sg_bytes; size_t frame_offset, fh_parm_offset; + size_t off; int error; void *data = NULL; void *page_addr; - int using_sg = lp->sg_supp; + int using_sg = lport->sg_supp; u32 f_ctl; WARN_ON(seq_blen <= 0); @@ -488,10 +562,10 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, * to max FC frame payload previously set in fsp->max_payload. */ t_blen = fsp->max_payload; - if (lp->seq_offload) { - t_blen = min(seq_blen, (size_t)lp->lso_max); + if (lport->seq_offload) { + t_blen = min(seq_blen, (size_t)lport->lso_max); FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", - fsp, seq_blen, lp->lso_max, t_blen); + fsp, seq_blen, lport->lso_max, t_blen); } WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); @@ -503,7 +577,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, remaining = seq_blen; fh_parm_offset = frame_offset = offset; tlen = 0; - seq = lp->tt.seq_start_next(seq); + seq = lport->tt.seq_start_next(seq); f_ctl = FC_FC_REL_OFF; WARN_ON(!seq); @@ -525,43 +599,34 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, */ if (tlen % 4) using_sg = 0; - if (using_sg) { - fp = _fc_frame_alloc(lp, 0); - if (!fp) - return -ENOMEM; - } else { - fp = fc_frame_alloc(lp, tlen); - if (!fp) - return -ENOMEM; + fp = fc_frame_alloc(lport, using_sg ? 0 : tlen); + if (!fp) + return -ENOMEM; - data = (void *)(fr_hdr(fp)) + - sizeof(struct fc_frame_header); - } + data = fc_frame_header_get(fp) + 1; fh_parm_offset = frame_offset; fr_max_payload(fp) = fsp->max_payload; } + + off = offset + sg->offset; sg_bytes = min(tlen, sg->length - offset); + sg_bytes = min(sg_bytes, + (size_t) (PAGE_SIZE - (off & ~PAGE_MASK))); + page = sg_page(sg) + (off >> PAGE_SHIFT); if (using_sg) { - get_page(sg_page(sg)); + get_page(page); skb_fill_page_desc(fp_skb(fp), skb_shinfo(fp_skb(fp))->nr_frags, - sg_page(sg), sg->offset + offset, - sg_bytes); + page, off & ~PAGE_MASK, sg_bytes); fp_skb(fp)->data_len += sg_bytes; fr_len(fp) += sg_bytes; fp_skb(fp)->truesize += PAGE_SIZE; } else { - size_t off = offset + sg->offset; - /* * The scatterlist item may be bigger than PAGE_SIZE, * but we must not cross pages inside the kmap. */ - sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE - - (off & ~PAGE_MASK))); - page_addr = kmap_atomic(sg_page(sg) + - (off >> PAGE_SHIFT), - KM_SOFTIRQ0); + page_addr = kmap_atomic(page, KM_SOFTIRQ0); memcpy(data, (char *)page_addr + (off & ~PAGE_MASK), sg_bytes); kunmap_atomic(page_addr, KM_SOFTIRQ0); @@ -572,7 +637,8 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, tlen -= sg_bytes; remaining -= sg_bytes; - if (tlen) + if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) && + (tlen)) continue; /* @@ -589,7 +655,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, /* * send fragment using for a sequence. */ - error = lp->tt.seq_send(lp, seq, fp); + error = lport->tt.seq_send(lport, seq, fp); if (error) { WARN_ON(1); /* send error should be rare */ fc_fcp_retry_cmd(fsp); @@ -601,6 +667,11 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, return 0; } +/** + * fc_fcp_abts_resp() - Send an ABTS response + * @fsp: The FCP packet that is being aborted + * @fp: The response frame + */ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) { int ba_done = 1; @@ -637,46 +708,13 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) } /** - * fc_fcp_reduce_can_queue() - drop can_queue - * @lp: lport to drop queueing for - * - * If we are getting memory allocation failures, then we may - * be trying to execute too many commands. We let the running - * commands complete or timeout, then try again with a reduced - * can_queue. Eventually we will hit the point where we run - * on all reserved structs. - */ -static void fc_fcp_reduce_can_queue(struct fc_lport *lp) -{ - struct fc_fcp_internal *si = fc_get_scsi_internal(lp); - unsigned long flags; - int can_queue; - - spin_lock_irqsave(lp->host->host_lock, flags); - if (si->throttled) - goto done; - si->throttled = 1; - - can_queue = lp->host->can_queue; - can_queue >>= 1; - if (!can_queue) - can_queue = 1; - lp->host->can_queue = can_queue; - shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n" - "Reducing can_queue to %d.\n", can_queue); -done: - spin_unlock_irqrestore(lp->host->host_lock, flags); -} - -/** - * fc_fcp_recv() - Reveive FCP frames + * fc_fcp_recv() - Reveive an FCP frame * @seq: The sequence the frame is on - * @fp: The FC frame + * @fp: The received frame * @arg: The related FCP packet * - * Return : None - * Context : called from Soft IRQ context - * can not called holding list lock + * Context: Called from Soft IRQ context. Can not be called + * holding the FCP packet list lock. */ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) { @@ -687,8 +725,10 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) u8 r_ctl; int rc = 0; - if (IS_ERR(fp)) - goto errout; + if (IS_ERR(fp)) { + fc_fcp_error(fsp, fp); + return; + } fh = fc_frame_header_get(fp); r_ctl = fh->fh_r_ctl; @@ -721,8 +761,6 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) (size_t) ntohl(dd->ft_burst_len)); if (!rc) seq->rec_data = fsp->xfer_len; - else if (rc == -ENOMEM) - fsp->state |= FC_SRB_NOMEM; } else if (r_ctl == FC_RCTL_DD_SOL_DATA) { /* * received a DATA frame @@ -742,13 +780,13 @@ unlock: fc_fcp_unlock_pkt(fsp); out: fc_frame_free(fp); -errout: - if (IS_ERR(fp)) - fc_fcp_error(fsp, fp); - else if (rc == -ENOMEM) - fc_fcp_reduce_can_queue(lport); } +/** + * fc_fcp_resp() - Handler for FCP responses + * @fsp: The FCP packet the response is for + * @fp: The response frame + */ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) { struct fc_frame_header *fh; @@ -862,15 +900,16 @@ err: } /** - * fc_fcp_complete_locked() - complete processing of a fcp packet - * @fsp: fcp packet + * fc_fcp_complete_locked() - Complete processing of a fcp_pkt with the + * fcp_pkt lock held + * @fsp: The FCP packet to be completed * * This function may sleep if a timer is pending. The packet lock must be * held, and the host lock must not be held. */ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) { - struct fc_lport *lp = fsp->lp; + struct fc_lport *lport = fsp->lp; struct fc_seq *seq; struct fc_exch *ep; u32 f_ctl; @@ -901,8 +940,8 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) struct fc_frame *conf_frame; struct fc_seq *csp; - csp = lp->tt.seq_start_next(seq); - conf_frame = fc_frame_alloc(fsp->lp, 0); + csp = lport->tt.seq_start_next(seq); + conf_frame = fc_fcp_frame_alloc(fsp->lp, 0); if (conf_frame) { f_ctl = FC_FC_SEQ_INIT; f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; @@ -910,43 +949,48 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, ep->did, ep->sid, FC_TYPE_FCP, f_ctl, 0); - lp->tt.seq_send(lp, csp, conf_frame); + lport->tt.seq_send(lport, csp, conf_frame); } } - lp->tt.exch_done(seq); + lport->tt.exch_done(seq); } fc_io_compl(fsp); } +/** + * fc_fcp_cleanup_cmd() - Cancel the active exchange on a fcp_pkt + * @fsp: The FCP packet whose exchanges should be canceled + * @error: The reason for the cancellation + */ static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) { - struct fc_lport *lp = fsp->lp; + struct fc_lport *lport = fsp->lp; if (fsp->seq_ptr) { - lp->tt.exch_done(fsp->seq_ptr); + lport->tt.exch_done(fsp->seq_ptr); fsp->seq_ptr = NULL; } fsp->status_code = error; } /** - * fc_fcp_cleanup_each_cmd() - Cleanup active commads - * @lp: logical port - * @id: target id - * @lun: lun - * @error: fsp status code + * fc_fcp_cleanup_each_cmd() - Cancel all exchanges on a local port + * @lport: The local port whose exchanges should be canceled + * @id: The target's ID + * @lun: The LUN + * @error: The reason for cancellation * * If lun or id is -1, they are ignored. */ -static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id, +static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id, unsigned int lun, int error) { - struct fc_fcp_internal *si = fc_get_scsi_internal(lp); + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); struct fc_fcp_pkt *fsp; struct scsi_cmnd *sc_cmd; unsigned long flags; - spin_lock_irqsave(lp->host->host_lock, flags); + spin_lock_irqsave(lport->host->host_lock, flags); restart: list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { sc_cmd = fsp->cmd; @@ -957,7 +1001,7 @@ restart: continue; fc_fcp_pkt_hold(fsp); - spin_unlock_irqrestore(lp->host->host_lock, flags); + spin_unlock_irqrestore(lport->host->host_lock, flags); if (!fc_fcp_lock_pkt(fsp)) { fc_fcp_cleanup_cmd(fsp, error); @@ -966,35 +1010,36 @@ restart: } fc_fcp_pkt_release(fsp); - spin_lock_irqsave(lp->host->host_lock, flags); + spin_lock_irqsave(lport->host->host_lock, flags); /* * while we dropped the lock multiple pkts could * have been released, so we have to start over. */ goto restart; } - spin_unlock_irqrestore(lp->host->host_lock, flags); + spin_unlock_irqrestore(lport->host->host_lock, flags); } -static void fc_fcp_abort_io(struct fc_lport *lp) +/** + * fc_fcp_abort_io() - Abort all FCP-SCSI exchanges on a local port + * @lport: The local port whose exchanges are to be aborted + */ +static void fc_fcp_abort_io(struct fc_lport *lport) { - fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR); + fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_HRD_ERROR); } /** - * fc_fcp_pkt_send() - send a fcp packet to the lower level. - * @lp: fc lport - * @fsp: fc packet. + * fc_fcp_pkt_send() - Send a fcp_pkt + * @lport: The local port to send the FCP packet on + * @fsp: The FCP packet to send * - * This is called by upper layer protocol. - * Return : zero for success and -1 for failure - * Context : called from queuecommand which can be called from process - * or scsi soft irq. - * Locks : called with the host lock and irqs disabled. + * Return: Zero for success and -1 for failure + * Locks: Called with the host lock and irqs disabled. */ -static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp) +static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) { - struct fc_fcp_internal *si = fc_get_scsi_internal(lp); + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); int rc; fsp->cmd->SCp.ptr = (char *)fsp; @@ -1006,16 +1051,22 @@ static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp) memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); list_add_tail(&fsp->list, &si->scsi_pkt_queue); - spin_unlock_irq(lp->host->host_lock); - rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv); - spin_lock_irq(lp->host->host_lock); + spin_unlock_irq(lport->host->host_lock); + rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv); + spin_lock_irq(lport->host->host_lock); if (rc) list_del(&fsp->list); return rc; } -static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, +/** + * fc_fcp_cmd_send() - Send a FCP command + * @lport: The local port to send the command on + * @fsp: The FCP packet the command is on + * @resp: The handler for the response + */ +static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp, void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg)) @@ -1023,14 +1074,14 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, struct fc_frame *fp; struct fc_seq *seq; struct fc_rport *rport; - struct fc_rport_libfc_priv *rp; + struct fc_rport_libfc_priv *rpriv; const size_t len = sizeof(fsp->cdb_cmd); int rc = 0; if (fc_fcp_lock_pkt(fsp)) return 0; - fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd)); + fp = fc_fcp_frame_alloc(lport, sizeof(fsp->cdb_cmd)); if (!fp) { rc = -1; goto unlock; @@ -1040,15 +1091,15 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, fr_fsp(fp) = fsp; rport = fsp->rport; fsp->max_payload = rport->maxframe_size; - rp = rport->dd_data; + rpriv = rport->dd_data; fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, - fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, + fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0); + seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, + fsp, 0); if (!seq) { - fc_frame_free(fp); rc = -1; goto unlock; } @@ -1065,8 +1116,10 @@ unlock: return rc; } -/* - * transport error handler +/** + * fc_fcp_error() - Handler for FCP layer errors + * @fsp: The FCP packet the error is on + * @fp: The frame that has errored */ static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) { @@ -1091,11 +1144,13 @@ unlock: fc_fcp_unlock_pkt(fsp); } -/* - * Scsi abort handler- calls to send an abort - * and then wait for abort completion +/** + * fc_fcp_pkt_abort() - Abort a fcp_pkt + * @fsp: The FCP packet to abort on + * + * Called to send an abort and then wait for abort completion */ -static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp) +static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) { int rc = FAILED; @@ -1122,14 +1177,15 @@ static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp) return rc; } -/* - * Retry LUN reset after resource allocation failed. +/** + * fc_lun_reset_send() - Send LUN reset command + * @data: The FCP packet that identifies the LUN to be reset */ static void fc_lun_reset_send(unsigned long data) { struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; - struct fc_lport *lp = fsp->lp; - if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) { + struct fc_lport *lport = fsp->lp; + if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) { if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) return; if (fc_fcp_lock_pkt(fsp)) @@ -1140,11 +1196,15 @@ static void fc_lun_reset_send(unsigned long data) } } -/* - * Scsi device reset handler- send a LUN RESET to the device - * and wait for reset reply +/** + * fc_lun_reset() - Send a LUN RESET command to a device + * and wait for the reply + * @lport: The local port to sent the comand on + * @fsp: The FCP packet that identifies the LUN to be reset + * @id: The SCSI command ID + * @lun: The LUN ID to be reset */ -static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, +static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp, unsigned int id, unsigned int lun) { int rc; @@ -1172,14 +1232,14 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, spin_lock_bh(&fsp->scsi_pkt_lock); if (fsp->seq_ptr) { - lp->tt.exch_done(fsp->seq_ptr); + lport->tt.exch_done(fsp->seq_ptr); fsp->seq_ptr = NULL; } fsp->wait_for_comp = 0; spin_unlock_bh(&fsp->scsi_pkt_lock); if (!rc) { - FC_SCSI_DBG(lp, "lun reset failed\n"); + FC_SCSI_DBG(lport, "lun reset failed\n"); return FAILED; } @@ -1187,13 +1247,16 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, if (fsp->cdb_status != FCP_TMF_CMPL) return FAILED; - FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun); - fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); + FC_SCSI_DBG(lport, "lun reset to lun %u completed\n", lun); + fc_fcp_cleanup_each_cmd(lport, id, lun, FC_CMD_ABORTED); return SUCCESS; } -/* - * Task Managment response handler +/** + * fc_tm_done() - Task Managment response handler + * @seq: The sequence that the response is on + * @fp: The response frame + * @arg: The FCP packet the response is for */ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) { @@ -1230,34 +1293,31 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) fc_fcp_unlock_pkt(fsp); } -static void fc_fcp_cleanup(struct fc_lport *lp) +/** + * fc_fcp_cleanup() - Cleanup all FCP exchanges on a local port + * @lport: The local port to be cleaned up + */ +static void fc_fcp_cleanup(struct fc_lport *lport) { - fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR); + fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_ERROR); } -/* - * fc_fcp_timeout: called by OS timer function. - * - * The timer has been inactivated and must be reactivated if desired - * using fc_fcp_timer_set(). - * - * Algorithm: - * - * If REC is supported, just issue it, and return. The REC exchange will - * complete or time out, and recovery can continue at that point. - * - * Otherwise, if the response has been received without all the data, - * it has been ER_TIMEOUT since the response was received. +/** + * fc_fcp_timeout() - Handler for fcp_pkt timeouts + * @data: The FCP packet that has timed out * - * If the response has not been received, - * we see if data was received recently. If it has been, we continue waiting, - * otherwise, we abort the command. + * If REC is supported then just issue it and return. The REC exchange will + * complete or time out and recovery can continue at that point. Otherwise, + * if the response has been received without all the data it has been + * ER_TIMEOUT since the response was received. If the response has not been + * received we see if data was received recently. If it has been then we + * continue waiting, otherwise, we abort the command. */ static void fc_fcp_timeout(unsigned long data) { struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; struct fc_rport *rport = fsp->rport; - struct fc_rport_libfc_priv *rp = rport->dd_data; + struct fc_rport_libfc_priv *rpriv = rport->dd_data; if (fc_fcp_lock_pkt(fsp)) return; @@ -1267,7 +1327,7 @@ static void fc_fcp_timeout(unsigned long data) fsp->state |= FC_SRB_FCP_PROCESSING_TMO; - if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED) + if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) fc_fcp_rec(fsp); else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2), jiffies)) @@ -1281,39 +1341,40 @@ unlock: fc_fcp_unlock_pkt(fsp); } -/* - * Send a REC ELS request +/** + * fc_fcp_rec() - Send a REC ELS request + * @fsp: The FCP packet to send the REC request on */ static void fc_fcp_rec(struct fc_fcp_pkt *fsp) { - struct fc_lport *lp; + struct fc_lport *lport; struct fc_frame *fp; struct fc_rport *rport; - struct fc_rport_libfc_priv *rp; + struct fc_rport_libfc_priv *rpriv; - lp = fsp->lp; + lport = fsp->lp; rport = fsp->rport; - rp = rport->dd_data; - if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) { + rpriv = rport->dd_data; + if (!fsp->seq_ptr || rpriv->rp_state != RPORT_ST_READY) { fsp->status_code = FC_HRD_ERROR; fsp->io_status = 0; fc_fcp_complete_locked(fsp); return; } - fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec)); + fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec)); if (!fp) goto retry; fr_seq(fp) = fsp->seq_ptr; fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, - fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, + fc_host_port_id(rpriv->local_port->host), FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - if (lp->tt.elsct_send(lp, rport->port_id, fp, ELS_REC, fc_fcp_rec_resp, - fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { + if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC, + fc_fcp_rec_resp, fsp, + jiffies_to_msecs(FC_SCSI_REC_TOV))) { fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ return; } - fc_frame_free(fp); retry: if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); @@ -1321,12 +1382,16 @@ retry: fc_timeout_error(fsp); } -/* - * Receive handler for REC ELS frame - * if it is a reject then let the scsi layer to handle - * the timeout. if it is a LS_ACC then if the io was not completed - * then set the timeout and return otherwise complete the exchange - * and tell the scsi layer to restart the I/O. +/** + * fc_fcp_rec_resp() - Handler for REC ELS responses + * @seq: The sequence the response is on + * @fp: The response frame + * @arg: The FCP packet the response is on + * + * If the response is a reject then the scsi layer will handle + * the timeout. If the response is a LS_ACC then if the I/O was not completed + * set the timeout and return. If the I/O was completed then complete the + * exchange and tell the SCSI layer. */ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) { @@ -1338,7 +1403,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) u32 offset; enum dma_data_direction data_dir; enum fc_rctl r_ctl; - struct fc_rport_libfc_priv *rp; + struct fc_rport_libfc_priv *rpriv; if (IS_ERR(fp)) { fc_fcp_rec_error(fsp, fp); @@ -1361,13 +1426,13 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) /* fall through */ case ELS_RJT_UNSUP: FC_FCP_DBG(fsp, "device does not support REC\n"); - rp = fsp->rport->dd_data; + rpriv = fsp->rport->dd_data; /* * if we do not spport RECs or got some bogus * reason then resetup timer so we check for * making progress. */ - rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; + rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); break; case ELS_RJT_LOGIC: @@ -1464,8 +1529,10 @@ out: fc_frame_free(fp); } -/* - * Handle error response or timeout for REC exchange. +/** + * fc_fcp_rec_error() - Handler for REC errors + * @fsp: The FCP packet the error is on + * @fp: The REC frame */ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) { @@ -1504,10 +1571,9 @@ out: fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ } -/* - * Time out error routine: - * abort's the I/O close the exchange and - * send completion notification to scsi layer +/** + * fc_timeout_error() - Handler for fcp_pkt timeouts + * @fsp: The FCP packt that has timed out */ static void fc_timeout_error(struct fc_fcp_pkt *fsp) { @@ -1521,16 +1587,18 @@ static void fc_timeout_error(struct fc_fcp_pkt *fsp) fc_fcp_send_abort(fsp); } -/* - * Sequence retransmission request. +/** + * fc_fcp_srr() - Send a SRR request (Sequence Retransmission Request) + * @fsp: The FCP packet the SRR is to be sent on + * @r_ctl: The R_CTL field for the SRR request * This is called after receiving status but insufficient data, or * when expecting status but the request has timed out. */ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) { - struct fc_lport *lp = fsp->lp; + struct fc_lport *lport = fsp->lp; struct fc_rport *rport; - struct fc_rport_libfc_priv *rp; + struct fc_rport_libfc_priv *rpriv; struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); struct fc_seq *seq; struct fcp_srr *srr; @@ -1538,12 +1606,13 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) u8 cdb_op; rport = fsp->rport; - rp = rport->dd_data; + rpriv = rport->dd_data; cdb_op = fsp->cdb_cmd.fc_cdb[0]; - if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY) + if (!(rpriv->flags & FC_RP_FLAGS_RETRY) || + rpriv->rp_state != RPORT_ST_READY) goto retry; /* shouldn't happen */ - fp = fc_frame_alloc(lp, sizeof(*srr)); + fp = fc_fcp_frame_alloc(lport, sizeof(*srr)); if (!fp) goto retry; @@ -1556,15 +1625,14 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) srr->srr_rel_off = htonl(offset); fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, - fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, + fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL, - fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); - if (!seq) { - fc_frame_free(fp); + seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL, + fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); + if (!seq) goto retry; - } + fsp->recov_seq = seq; fsp->xfer_len = offset; fsp->xfer_contig_end = offset; @@ -1575,8 +1643,11 @@ retry: fc_fcp_retry_cmd(fsp); } -/* - * Handle response from SRR. +/** + * fc_fcp_srr_resp() - Handler for SRR response + * @seq: The sequence the SRR is on + * @fp: The SRR frame + * @arg: The FCP packet the SRR is on */ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) { @@ -1622,6 +1693,11 @@ out: fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ } +/** + * fc_fcp_srr_error() - Handler for SRR errors + * @fsp: The FCP packet that the SRR error is on + * @fp: The SRR frame + */ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) { if (fc_fcp_lock_pkt(fsp)) @@ -1646,31 +1722,36 @@ out: fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ } -static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp) +/** + * fc_fcp_lport_queue_ready() - Determine if the lport and it's queue is ready + * @lport: The local port to be checked + */ +static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport) { /* lock ? */ - return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull; + return (lport->state == LPORT_ST_READY) && + lport->link_up && !lport->qfull; } /** - * fc_queuecommand - The queuecommand function of the scsi template - * @cmd: struct scsi_cmnd to be executed - * @done: Callback function to be called when cmd is completed + * fc_queuecommand() - The queuecommand function of the SCSI template + * @cmd: The scsi_cmnd to be executed + * @done: The callback function to be called when the scsi_cmnd is complete * - * this is the i/o strategy routine, called by the scsi layer - * this routine is called with holding the host_lock. + * This is the i/o strategy routine, called by the SCSI layer. This routine + * is called with the host_lock held. */ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) { - struct fc_lport *lp; + struct fc_lport *lport; struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); struct fc_fcp_pkt *fsp; - struct fc_rport_libfc_priv *rp; + struct fc_rport_libfc_priv *rpriv; int rval; int rc = 0; struct fcoe_dev_stats *stats; - lp = shost_priv(sc_cmd->device->host); + lport = shost_priv(sc_cmd->device->host); rval = fc_remote_port_chkready(rport); if (rval) { @@ -1689,14 +1770,16 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) goto out; } - rp = rport->dd_data; + rpriv = rport->dd_data; - if (!fc_fcp_lport_queue_ready(lp)) { + if (!fc_fcp_lport_queue_ready(lport)) { + if (lport->qfull) + fc_fcp_can_queue_ramp_down(lport); rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } - fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC); + fsp = fc_fcp_pkt_alloc(lport, GFP_ATOMIC); if (fsp == NULL) { rc = SCSI_MLQUEUE_HOST_BUSY; goto out; @@ -1706,8 +1789,9 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) * build the libfc request pkt */ fsp->cmd = sc_cmd; /* save the cmd */ - fsp->lp = lp; /* save the softc ptr */ + fsp->lp = lport; /* save the softc ptr */ fsp->rport = rport; /* set the remote port ptr */ + fsp->xfer_ddp = FC_XID_UNKNOWN; sc_cmd->scsi_done = done; /* @@ -1719,7 +1803,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) /* * setup the data direction */ - stats = fc_lport_get_stats(lp); + stats = fc_lport_get_stats(lport); if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { fsp->req_flags = FC_SRB_READ; stats->InputRequests++; @@ -1733,7 +1817,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) stats->ControlRequests++; } - fsp->tgt_flags = rp->flags; + fsp->tgt_flags = rpriv->flags; init_timer(&fsp->timer); fsp->timer.data = (unsigned long)fsp; @@ -1743,7 +1827,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) * if we get -1 return then put the request in the pending * queue. */ - rval = fc_fcp_pkt_send(lp, fsp); + rval = fc_fcp_pkt_send(lport, fsp); if (rval != 0) { fsp->state = FC_SRB_FREE; fc_fcp_pkt_release(fsp); @@ -1755,18 +1839,17 @@ out: EXPORT_SYMBOL(fc_queuecommand); /** - * fc_io_compl() - Handle responses for completed commands - * @fsp: scsi packet - * - * Translates a error to a Linux SCSI error. + * fc_io_compl() - Handle responses for completed commands + * @fsp: The FCP packet that is complete * + * Translates fcp_pkt errors to a Linux SCSI errors. * The fcp packet lock must be held when calling. */ static void fc_io_compl(struct fc_fcp_pkt *fsp) { struct fc_fcp_internal *si; struct scsi_cmnd *sc_cmd; - struct fc_lport *lp; + struct fc_lport *lport; unsigned long flags; /* release outstanding ddp context */ @@ -1779,28 +1862,26 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) spin_lock_bh(&fsp->scsi_pkt_lock); } - lp = fsp->lp; - si = fc_get_scsi_internal(lp); - spin_lock_irqsave(lp->host->host_lock, flags); + lport = fsp->lp; + si = fc_get_scsi_internal(lport); + spin_lock_irqsave(lport->host->host_lock, flags); if (!fsp->cmd) { - spin_unlock_irqrestore(lp->host->host_lock, flags); + spin_unlock_irqrestore(lport->host->host_lock, flags); return; } /* - * if a command timed out while we had to try and throttle IO - * and it is now getting cleaned up, then we are about to - * try again so clear the throttled flag incase we get more - * time outs. + * if can_queue ramp down is done then try can_queue ramp up + * since commands are completing now. */ - if (si->throttled && fsp->state & FC_SRB_NOMEM) - si->throttled = 0; + if (si->last_can_queue_ramp_down_time) + fc_fcp_can_queue_ramp_up(lport); sc_cmd = fsp->cmd; fsp->cmd = NULL; if (!sc_cmd->SCp.ptr) { - spin_unlock_irqrestore(lp->host->host_lock, flags); + spin_unlock_irqrestore(lport->host->host_lock, flags); return; } @@ -1814,21 +1895,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) sc_cmd->result = DID_OK << 16; if (fsp->scsi_resid) CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; - } else if (fsp->cdb_status == QUEUE_FULL) { - struct scsi_device *tmp_sdev; - struct scsi_device *sdev = sc_cmd->device; - - shost_for_each_device(tmp_sdev, sdev->host) { - if (tmp_sdev->id != sdev->id) - continue; - - if (tmp_sdev->queue_depth > 1) { - scsi_track_queue_full(tmp_sdev, - tmp_sdev-> - queue_depth - 1); - } - } - sc_cmd->result = (DID_OK << 16) | fsp->cdb_status; } else { /* * transport level I/O was ok but scsi @@ -1846,7 +1912,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) * scsi status is good but transport level * underrun. */ - sc_cmd->result = DID_OK << 16; + sc_cmd->result = (fsp->state & FC_SRB_RCV_STATUS ? + DID_OK : DID_ERROR) << 16; } else { /* * scsi got underrun, this is an error @@ -1881,60 +1948,42 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) list_del(&fsp->list); sc_cmd->SCp.ptr = NULL; sc_cmd->scsi_done(sc_cmd); - spin_unlock_irqrestore(lp->host->host_lock, flags); + spin_unlock_irqrestore(lport->host->host_lock, flags); /* release ref from initial allocation in queue command */ fc_fcp_pkt_release(fsp); } /** - * fc_fcp_complete() - complete processing of a fcp packet - * @fsp: fcp packet - * - * This function may sleep if a fsp timer is pending. - * The host lock must not be held by caller. - */ -void fc_fcp_complete(struct fc_fcp_pkt *fsp) -{ - if (fc_fcp_lock_pkt(fsp)) - return; - - fc_fcp_complete_locked(fsp); - fc_fcp_unlock_pkt(fsp); -} -EXPORT_SYMBOL(fc_fcp_complete); - -/** * fc_eh_abort() - Abort a command - * @sc_cmd: scsi command to abort + * @sc_cmd: The SCSI command to abort * - * From scsi host template. - * send ABTS to the target device and wait for the response - * sc_cmd is the pointer to the command to be aborted. + * From SCSI host template. + * Send an ABTS to the target device and wait for the response. */ int fc_eh_abort(struct scsi_cmnd *sc_cmd) { struct fc_fcp_pkt *fsp; - struct fc_lport *lp; + struct fc_lport *lport; int rc = FAILED; unsigned long flags; - lp = shost_priv(sc_cmd->device->host); - if (lp->state != LPORT_ST_READY) + lport = shost_priv(sc_cmd->device->host); + if (lport->state != LPORT_ST_READY) return rc; - else if (!lp->link_up) + else if (!lport->link_up) return rc; - spin_lock_irqsave(lp->host->host_lock, flags); + spin_lock_irqsave(lport->host->host_lock, flags); fsp = CMD_SP(sc_cmd); if (!fsp) { /* command completed while scsi eh was setting up */ - spin_unlock_irqrestore(lp->host->host_lock, flags); + spin_unlock_irqrestore(lport->host->host_lock, flags); return SUCCESS; } /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ fc_fcp_pkt_hold(fsp); - spin_unlock_irqrestore(lp->host->host_lock, flags); + spin_unlock_irqrestore(lport->host->host_lock, flags); if (fc_fcp_lock_pkt(fsp)) { /* completed while we were waiting for timer to be deleted */ @@ -1942,7 +1991,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd) goto release_pkt; } - rc = fc_fcp_pkt_abort(lp, fsp); + rc = fc_fcp_pkt_abort(fsp); fc_fcp_unlock_pkt(fsp); release_pkt: @@ -1952,37 +2001,34 @@ release_pkt: EXPORT_SYMBOL(fc_eh_abort); /** - * fc_eh_device_reset() Reset a single LUN - * @sc_cmd: scsi command + * fc_eh_device_reset() - Reset a single LUN + * @sc_cmd: The SCSI command which identifies the device whose + * LUN is to be reset * - * Set from scsi host template to send tm cmd to the target and wait for the - * response. + * Set from SCSI host template. */ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) { - struct fc_lport *lp; + struct fc_lport *lport; struct fc_fcp_pkt *fsp; struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); int rc = FAILED; - struct fc_rport_libfc_priv *rp; int rval; rval = fc_remote_port_chkready(rport); if (rval) goto out; - rp = rport->dd_data; - lp = shost_priv(sc_cmd->device->host); + lport = shost_priv(sc_cmd->device->host); - if (lp->state != LPORT_ST_READY) + if (lport->state != LPORT_ST_READY) return rc; - FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id); + FC_SCSI_DBG(lport, "Resetting rport (%6x)\n", rport->port_id); - fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); + fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO); if (fsp == NULL) { printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); - sc_cmd->result = DID_NO_CONNECT << 16; goto out; } @@ -1991,13 +2037,13 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) * the sc passed in is not setup for execution like when sent * through the queuecommand callout. */ - fsp->lp = lp; /* save the softc ptr */ + fsp->lp = lport; /* save the softc ptr */ fsp->rport = rport; /* set the remote port ptr */ /* * flush outstanding commands */ - rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); + rc = fc_lun_reset(lport, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); fsp->state = FC_SRB_FREE; fc_fcp_pkt_release(fsp); @@ -2007,38 +2053,39 @@ out: EXPORT_SYMBOL(fc_eh_device_reset); /** - * fc_eh_host_reset() - The reset function will reset the ports on the host. - * @sc_cmd: scsi command + * fc_eh_host_reset() - Reset a Scsi_Host. + * @sc_cmd: The SCSI command that identifies the SCSI host to be reset */ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) { struct Scsi_Host *shost = sc_cmd->device->host; - struct fc_lport *lp = shost_priv(shost); + struct fc_lport *lport = shost_priv(shost); unsigned long wait_tmo; - FC_SCSI_DBG(lp, "Resetting host\n"); + FC_SCSI_DBG(lport, "Resetting host\n"); - lp->tt.lport_reset(lp); + lport->tt.lport_reset(lport); wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; - while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) + while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, + wait_tmo)) msleep(1000); - if (fc_fcp_lport_queue_ready(lp)) { + if (fc_fcp_lport_queue_ready(lport)) { shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " - "on port (%6x)\n", fc_host_port_id(lp->host)); + "on port (%6x)\n", fc_host_port_id(lport->host)); return SUCCESS; } else { shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " "port (%6x) is not ready.\n", - fc_host_port_id(lp->host)); + fc_host_port_id(lport->host)); return FAILED; } } EXPORT_SYMBOL(fc_eh_host_reset); /** - * fc_slave_alloc() - configure queue depth - * @sdev: scsi device + * fc_slave_alloc() - Configure the queue depth of a Scsi_Host + * @sdev: The SCSI device that identifies the SCSI host * * Configures queue depth based on host's cmd_per_len. If not set * then we use the libfc default. @@ -2046,29 +2093,50 @@ EXPORT_SYMBOL(fc_eh_host_reset); int fc_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); - int queue_depth; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; - if (sdev->tagged_supported) { - if (sdev->host->hostt->cmd_per_lun) - queue_depth = sdev->host->hostt->cmd_per_lun; - else - queue_depth = FC_FCP_DFLT_QUEUE_DEPTH; - scsi_activate_tcq(sdev, queue_depth); - } + if (sdev->tagged_supported) + scsi_activate_tcq(sdev, FC_FCP_DFLT_QUEUE_DEPTH); + else + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), + FC_FCP_DFLT_QUEUE_DEPTH); + return 0; } EXPORT_SYMBOL(fc_slave_alloc); -int fc_change_queue_depth(struct scsi_device *sdev, int qdepth) +/** + * fc_change_queue_depth() - Change a device's queue depth + * @sdev: The SCSI device whose queue depth is to change + * @qdepth: The new queue depth + * @reason: The resason for the change + */ +int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) { - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + switch (reason) { + case SCSI_QDEPTH_DEFAULT: + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + break; + case SCSI_QDEPTH_QFULL: + scsi_track_queue_full(sdev, qdepth); + break; + case SCSI_QDEPTH_RAMP_UP: + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + break; + default: + return -EOPNOTSUPP; + } return sdev->queue_depth; } EXPORT_SYMBOL(fc_change_queue_depth); +/** + * fc_change_queue_type() - Change a device's queue type + * @sdev: The SCSI device whose queue depth is to change + * @tag_type: Identifier for queue type + */ int fc_change_queue_type(struct scsi_device *sdev, int tag_type) { if (sdev->tagged_supported) { @@ -2084,38 +2152,69 @@ int fc_change_queue_type(struct scsi_device *sdev, int tag_type) } EXPORT_SYMBOL(fc_change_queue_type); -void fc_fcp_destroy(struct fc_lport *lp) +/** + * fc_fcp_destory() - Tear down the FCP layer for a given local port + * @lport: The local port that no longer needs the FCP layer + */ +void fc_fcp_destroy(struct fc_lport *lport) { - struct fc_fcp_internal *si = fc_get_scsi_internal(lp); + struct fc_fcp_internal *si = fc_get_scsi_internal(lport); if (!list_empty(&si->scsi_pkt_queue)) printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " - "port (%6x)\n", fc_host_port_id(lp->host)); + "port (%6x)\n", fc_host_port_id(lport->host)); mempool_destroy(si->scsi_pkt_pool); kfree(si); - lp->scsi_priv = NULL; + lport->scsi_priv = NULL; } EXPORT_SYMBOL(fc_fcp_destroy); -int fc_fcp_init(struct fc_lport *lp) +int fc_setup_fcp() +{ + int rc = 0; + + scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt", + sizeof(struct fc_fcp_pkt), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!scsi_pkt_cachep) { + printk(KERN_ERR "libfc: Unable to allocate SRB cache, " + "module load failed!"); + rc = -ENOMEM; + } + + return rc; +} + +void fc_destroy_fcp() +{ + if (scsi_pkt_cachep) + kmem_cache_destroy(scsi_pkt_cachep); +} + +/** + * fc_fcp_init() - Initialize the FCP layer for a local port + * @lport: The local port to initialize the exchange layer for + */ +int fc_fcp_init(struct fc_lport *lport) { int rc; struct fc_fcp_internal *si; - if (!lp->tt.fcp_cmd_send) - lp->tt.fcp_cmd_send = fc_fcp_cmd_send; + if (!lport->tt.fcp_cmd_send) + lport->tt.fcp_cmd_send = fc_fcp_cmd_send; - if (!lp->tt.fcp_cleanup) - lp->tt.fcp_cleanup = fc_fcp_cleanup; + if (!lport->tt.fcp_cleanup) + lport->tt.fcp_cleanup = fc_fcp_cleanup; - if (!lp->tt.fcp_abort_io) - lp->tt.fcp_abort_io = fc_fcp_abort_io; + if (!lport->tt.fcp_abort_io) + lport->tt.fcp_abort_io = fc_fcp_abort_io; si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL); if (!si) return -ENOMEM; - lp->scsi_priv = si; + lport->scsi_priv = si; + si->max_can_queue = lport->host->can_queue; INIT_LIST_HEAD(&si->scsi_pkt_queue); si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); @@ -2130,42 +2229,3 @@ free_internal: return rc; } EXPORT_SYMBOL(fc_fcp_init); - -static int __init libfc_init(void) -{ - int rc; - - scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt", - sizeof(struct fc_fcp_pkt), - 0, SLAB_HWCACHE_ALIGN, NULL); - if (scsi_pkt_cachep == NULL) { - printk(KERN_ERR "libfc: Unable to allocate SRB cache, " - "module load failed!"); - return -ENOMEM; - } - - rc = fc_setup_exch_mgr(); - if (rc) - goto destroy_pkt_cache; - - rc = fc_setup_rport(); - if (rc) - goto destroy_em; - - return rc; -destroy_em: - fc_destroy_exch_mgr(); -destroy_pkt_cache: - kmem_cache_destroy(scsi_pkt_cachep); - return rc; -} - -static void __exit libfc_exit(void) -{ - kmem_cache_destroy(scsi_pkt_cachep); - fc_destroy_exch_mgr(); - fc_destroy_rport(); -} - -module_init(libfc_init); -module_exit(libfc_exit); diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c index 63fe00cfe66..6da01c61696 100644 --- a/drivers/scsi/libfc/fc_frame.c +++ b/drivers/scsi/libfc/fc_frame.c @@ -51,24 +51,24 @@ EXPORT_SYMBOL(fc_frame_crc_check); * Allocate a frame intended to be sent via fcoe_xmit. * Get an sk_buff for the frame and set the length. */ -struct fc_frame *__fc_frame_alloc(size_t len) +struct fc_frame *_fc_frame_alloc(size_t len) { struct fc_frame *fp; struct sk_buff *skb; WARN_ON((len % sizeof(u32)) != 0); len += sizeof(struct fc_frame_header); - skb = dev_alloc_skb(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM); + skb = alloc_skb_fclone(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM + + NET_SKB_PAD, GFP_ATOMIC); if (!skb) return NULL; + skb_reserve(skb, NET_SKB_PAD + FC_FRAME_HEADROOM); fp = (struct fc_frame *) skb; fc_frame_init(fp); - skb_reserve(skb, FC_FRAME_HEADROOM); skb_put(skb, len); return fp; } -EXPORT_SYMBOL(__fc_frame_alloc); - +EXPORT_SYMBOL(_fc_frame_alloc); struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) { @@ -78,7 +78,7 @@ struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) fill = payload_len % 4; if (fill != 0) fill = 4 - fill; - fp = __fc_frame_alloc(payload_len + fill); + fp = _fc_frame_alloc(payload_len + fill); if (fp) { memset((char *) fr_hdr(fp) + payload_len, 0, fill); /* trim is OK, we just allocated it so there are no fragments */ @@ -87,3 +87,4 @@ struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) } return fp; } +EXPORT_SYMBOL(fc_frame_alloc_fill); diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c new file mode 100644 index 00000000000..39f4b6ab04b --- /dev/null +++ b/drivers/scsi/libfc/fc_libfc.c @@ -0,0 +1,134 @@ +/* + * Copyright(c) 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/scatterlist.h> +#include <linux/crc32.h> + +#include <scsi/libfc.h> + +#include "fc_libfc.h" + +MODULE_AUTHOR("Open-FCoE.org"); +MODULE_DESCRIPTION("libfc"); +MODULE_LICENSE("GPL v2"); + +unsigned int fc_debug_logging; +module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); + +/** + * libfc_init() - Initialize libfc.ko + */ +static int __init libfc_init(void) +{ + int rc = 0; + + rc = fc_setup_fcp(); + if (rc) + return rc; + + rc = fc_setup_exch_mgr(); + if (rc) + goto destroy_pkt_cache; + + rc = fc_setup_rport(); + if (rc) + goto destroy_em; + + return rc; +destroy_em: + fc_destroy_exch_mgr(); +destroy_pkt_cache: + fc_destroy_fcp(); + return rc; +} +module_init(libfc_init); + +/** + * libfc_exit() - Tear down libfc.ko + */ +static void __exit libfc_exit(void) +{ + fc_destroy_fcp(); + fc_destroy_exch_mgr(); + fc_destroy_rport(); +} +module_exit(libfc_exit); + +/** + * fc_copy_buffer_to_sglist() - This routine copies the data of a buffer + * into a scatter-gather list (SG list). + * + * @buf: pointer to the data buffer. + * @len: the byte-length of the data buffer. + * @sg: pointer to the pointer of the SG list. + * @nents: pointer to the remaining number of entries in the SG list. + * @offset: pointer to the current offset in the SG list. + * @km_type: dedicated page table slot type for kmap_atomic. + * @crc: pointer to the 32-bit crc value. + * If crc is NULL, CRC is not calculated. + */ +u32 fc_copy_buffer_to_sglist(void *buf, size_t len, + struct scatterlist *sg, + u32 *nents, size_t *offset, + enum km_type km_type, u32 *crc) +{ + size_t remaining = len; + u32 copy_len = 0; + + while (remaining > 0 && sg) { + size_t off, sg_bytes; + void *page_addr; + + if (*offset >= sg->length) { + /* + * Check for end and drop resources + * from the last iteration. + */ + if (!(*nents)) + break; + --(*nents); + *offset -= sg->length; + sg = sg_next(sg); + continue; + } + sg_bytes = min(remaining, sg->length - *offset); + + /* + * The scatterlist item may be bigger than PAGE_SIZE, + * but we are limited to mapping PAGE_SIZE at a time. + */ + off = *offset + sg->offset; + sg_bytes = min(sg_bytes, + (size_t)(PAGE_SIZE - (off & ~PAGE_MASK))); + page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT), + km_type); + if (crc) + *crc = crc32(*crc, buf, sg_bytes); + memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes); + kunmap_atomic(page_addr, km_type); + buf += sg_bytes; + *offset += sg_bytes; + remaining -= sg_bytes; + copy_len += sg_bytes; + } + return copy_len; +} diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h new file mode 100644 index 00000000000..741fd5c72e1 --- /dev/null +++ b/drivers/scsi/libfc/fc_libfc.h @@ -0,0 +1,112 @@ +/* + * Copyright(c) 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +#ifndef _FC_LIBFC_H_ +#define _FC_LIBFC_H_ + +#define FC_LIBFC_LOGGING 0x01 /* General logging, not categorized */ +#define FC_LPORT_LOGGING 0x02 /* lport layer logging */ +#define FC_DISC_LOGGING 0x04 /* discovery layer logging */ +#define FC_RPORT_LOGGING 0x08 /* rport layer logging */ +#define FC_FCP_LOGGING 0x10 /* I/O path logging */ +#define FC_EM_LOGGING 0x20 /* Exchange Manager logging */ +#define FC_EXCH_LOGGING 0x40 /* Exchange/Sequence logging */ +#define FC_SCSI_LOGGING 0x80 /* SCSI logging (mostly error handling) */ + +extern unsigned int fc_debug_logging; + +#define FC_CHECK_LOGGING(LEVEL, CMD) \ + do { \ + if (unlikely(fc_debug_logging & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ + } while (0) + +#define FC_LIBFC_DBG(fmt, args...) \ + FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \ + printk(KERN_INFO "libfc: " fmt, ##args)) + +#define FC_LPORT_DBG(lport, fmt, args...) \ + FC_CHECK_LOGGING(FC_LPORT_LOGGING, \ + printk(KERN_INFO "host%u: lport %6x: " fmt, \ + (lport)->host->host_no, \ + fc_host_port_id((lport)->host), ##args)) + +#define FC_DISC_DBG(disc, fmt, args...) \ + FC_CHECK_LOGGING(FC_DISC_LOGGING, \ + printk(KERN_INFO "host%u: disc: " fmt, \ + (disc)->lport->host->host_no, \ + ##args)) + +#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \ + FC_CHECK_LOGGING(FC_RPORT_LOGGING, \ + printk(KERN_INFO "host%u: rport %6x: " fmt, \ + (lport)->host->host_no, \ + (port_id), ##args)) + +#define FC_RPORT_DBG(rdata, fmt, args...) \ + FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args) + +#define FC_FCP_DBG(pkt, fmt, args...) \ + FC_CHECK_LOGGING(FC_FCP_LOGGING, \ + printk(KERN_INFO "host%u: fcp: %6x: " fmt, \ + (pkt)->lp->host->host_no, \ + pkt->rport->port_id, ##args)) + +#define FC_EXCH_DBG(exch, fmt, args...) \ + FC_CHECK_LOGGING(FC_EXCH_LOGGING, \ + printk(KERN_INFO "host%u: xid %4x: " fmt, \ + (exch)->lp->host->host_no, \ + exch->xid, ##args)) + +#define FC_SCSI_DBG(lport, fmt, args...) \ + FC_CHECK_LOGGING(FC_SCSI_LOGGING, \ + printk(KERN_INFO "host%u: scsi: " fmt, \ + (lport)->host->host_no, ##args)) + +/* + * Set up direct-data placement for this I/O request + */ +void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid); + +/* + * Module setup functions + */ +int fc_setup_exch_mgr(void); +void fc_destroy_exch_mgr(void); +int fc_setup_rport(void); +void fc_destroy_rport(void); +int fc_setup_fcp(void); +void fc_destroy_fcp(void); + +/* + * Internal libfc functions + */ +const char *fc_els_resp_type(struct fc_frame *); + +/* + * Copies a buffer into an sg list + */ +u32 fc_copy_buffer_to_sglist(void *buf, size_t len, + struct scatterlist *sg, + u32 *nents, size_t *offset, + enum km_type km_type, u32 *crc); + +#endif /* _FC_LIBFC_H_ */ diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index bd2f7719744..74338c83ad0 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -56,7 +56,7 @@ * at the same time. * * When discovery succeeds or fails a callback is made to the lport as - * notification. Currently, succesful discovery causes the lport to take no + * notification. Currently, successful discovery causes the lport to take no * action. A failure will cause the lport to reset. There is likely a circular * locking problem with this implementation. */ @@ -94,6 +94,9 @@ #include <scsi/libfc.h> #include <scsi/fc_encode.h> +#include <linux/scatterlist.h> + +#include "fc_libfc.h" /* Fabric IDs to use for point-to-point mode, chosen on whims. */ #define FC_LOCAL_PTP_FID_LO 0x010101 @@ -106,8 +109,7 @@ static void fc_lport_error(struct fc_lport *, struct fc_frame *); static void fc_lport_enter_reset(struct fc_lport *); static void fc_lport_enter_flogi(struct fc_lport *); static void fc_lport_enter_dns(struct fc_lport *); -static void fc_lport_enter_rpn_id(struct fc_lport *); -static void fc_lport_enter_rft_id(struct fc_lport *); +static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state); static void fc_lport_enter_scr(struct fc_lport *); static void fc_lport_enter_ready(struct fc_lport *); static void fc_lport_enter_logo(struct fc_lport *); @@ -116,14 +118,40 @@ static const char *fc_lport_state_names[] = { [LPORT_ST_DISABLED] = "disabled", [LPORT_ST_FLOGI] = "FLOGI", [LPORT_ST_DNS] = "dNS", - [LPORT_ST_RPN_ID] = "RPN_ID", + [LPORT_ST_RNN_ID] = "RNN_ID", + [LPORT_ST_RSNN_NN] = "RSNN_NN", + [LPORT_ST_RSPN_ID] = "RSPN_ID", [LPORT_ST_RFT_ID] = "RFT_ID", + [LPORT_ST_RFF_ID] = "RFF_ID", [LPORT_ST_SCR] = "SCR", [LPORT_ST_READY] = "Ready", [LPORT_ST_LOGO] = "LOGO", [LPORT_ST_RESET] = "reset", }; +/** + * struct fc_bsg_info - FC Passthrough managemet structure + * @job: The passthrough job + * @lport: The local port to pass through a command + * @rsp_code: The expected response code + * @sg: job->reply_payload.sg_list + * @nents: job->reply_payload.sg_cnt + * @offset: The offset into the response data + */ +struct fc_bsg_info { + struct fc_bsg_job *job; + struct fc_lport *lport; + u16 rsp_code; + struct scatterlist *sg; + u32 nents; + size_t offset; +}; + +/** + * fc_frame_drop() - Dummy frame handler + * @lport: The local port the frame was received on + * @fp: The received frame + */ static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) { fc_frame_free(fp); @@ -150,8 +178,8 @@ static void fc_lport_rport_callback(struct fc_lport *lport, switch (event) { case RPORT_EV_READY: if (lport->state == LPORT_ST_DNS) { - lport->dns_rp = rdata; - fc_lport_enter_rpn_id(lport); + lport->dns_rdata = rdata; + fc_lport_enter_ns(lport, LPORT_ST_RNN_ID); } else { FC_LPORT_DBG(lport, "Received an READY event " "on port (%6x) for the directory " @@ -165,7 +193,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport, case RPORT_EV_LOGO: case RPORT_EV_FAILED: case RPORT_EV_STOP: - lport->dns_rp = NULL; + lport->dns_rdata = NULL; break; case RPORT_EV_NONE: break; @@ -189,8 +217,8 @@ static const char *fc_lport_state(struct fc_lport *lport) /** * fc_lport_ptp_setup() - Create an rport for point-to-point mode - * @lport: The lport to attach the ptp rport to - * @fid: The FID of the ptp rport + * @lport: The lport to attach the ptp rport to + * @remote_fid: The FID of the ptp rport * @remote_wwpn: The WWPN of the ptp rport * @remote_wwnn: The WWNN of the ptp rport */ @@ -199,18 +227,22 @@ static void fc_lport_ptp_setup(struct fc_lport *lport, u64 remote_wwnn) { mutex_lock(&lport->disc.disc_mutex); - if (lport->ptp_rp) - lport->tt.rport_logoff(lport->ptp_rp); - lport->ptp_rp = lport->tt.rport_create(lport, remote_fid); - lport->ptp_rp->ids.port_name = remote_wwpn; - lport->ptp_rp->ids.node_name = remote_wwnn; + if (lport->ptp_rdata) + lport->tt.rport_logoff(lport->ptp_rdata); + lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid); + lport->ptp_rdata->ids.port_name = remote_wwpn; + lport->ptp_rdata->ids.node_name = remote_wwnn; mutex_unlock(&lport->disc.disc_mutex); - lport->tt.rport_login(lport->ptp_rp); + lport->tt.rport_login(lport->ptp_rdata); fc_lport_enter_ready(lport); } +/** + * fc_get_host_port_type() - Return the port type of the given Scsi_Host + * @shost: The SCSI host whose port type is to be determined + */ void fc_get_host_port_type(struct Scsi_Host *shost) { /* TODO - currently just NPORT */ @@ -218,17 +250,33 @@ void fc_get_host_port_type(struct Scsi_Host *shost) } EXPORT_SYMBOL(fc_get_host_port_type); +/** + * fc_get_host_port_state() - Return the port state of the given Scsi_Host + * @shost: The SCSI host whose port state is to be determined + */ void fc_get_host_port_state(struct Scsi_Host *shost) { - struct fc_lport *lp = shost_priv(shost); + struct fc_lport *lport = shost_priv(shost); - if (lp->link_up) - fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + mutex_lock(&lport->lp_mutex); + if (!lport->link_up) + fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; else - fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; + switch (lport->state) { + case LPORT_ST_READY: + fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + break; + default: + fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; + } + mutex_unlock(&lport->lp_mutex); } EXPORT_SYMBOL(fc_get_host_port_state); +/** + * fc_get_host_speed() - Return the speed of the given Scsi_Host + * @shost: The SCSI host whose port speed is to be determined + */ void fc_get_host_speed(struct Scsi_Host *shost) { struct fc_lport *lport = shost_priv(shost); @@ -237,24 +285,28 @@ void fc_get_host_speed(struct Scsi_Host *shost) } EXPORT_SYMBOL(fc_get_host_speed); +/** + * fc_get_host_stats() - Return the Scsi_Host's statistics + * @shost: The SCSI host whose statistics are to be returned + */ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) { struct fc_host_statistics *fcoe_stats; - struct fc_lport *lp = shost_priv(shost); + struct fc_lport *lport = shost_priv(shost); struct timespec v0, v1; unsigned int cpu; - fcoe_stats = &lp->host_stats; + fcoe_stats = &lport->host_stats; memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); jiffies_to_timespec(jiffies, &v0); - jiffies_to_timespec(lp->boot_time, &v1); + jiffies_to_timespec(lport->boot_time, &v1); fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); for_each_possible_cpu(cpu) { struct fcoe_dev_stats *stats; - stats = per_cpu_ptr(lp->dev_stats, cpu); + stats = per_cpu_ptr(lport->dev_stats, cpu); fcoe_stats->tx_frames += stats->TxFrames; fcoe_stats->tx_words += stats->TxWords; @@ -279,12 +331,15 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) } EXPORT_SYMBOL(fc_get_host_stats); -/* - * Fill in FLOGI command for request. +/** + * fc_lport_flogi_fill() - Fill in FLOGI command for request + * @lport: The local port the FLOGI is for + * @flogi: The FLOGI command + * @op: The opcode */ -static void -fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi, - unsigned int op) +static void fc_lport_flogi_fill(struct fc_lport *lport, + struct fc_els_flogi *flogi, + unsigned int op) { struct fc_els_csp *sp; struct fc_els_cssp *cp; @@ -312,8 +367,10 @@ fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi, } } -/* - * Add a supported FC-4 type. +/** + * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port + * @lport: The local port to add a new FC-4 type to + * @type: The new FC-4 type */ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) { @@ -325,11 +382,11 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) /** * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. + * @sp: The sequence in the RLIR exchange + * @fp: The RLIR request frame * @lport: Fibre Channel local port recieving the RLIR - * @sp: current sequence in the RLIR exchange - * @fp: RLIR request frame * - * Locking Note: The lport lock is exected to be held before calling + * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, @@ -344,11 +401,11 @@ static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, /** * fc_lport_recv_echo_req() - Handle received ECHO request - * @lport: Fibre Channel local port recieving the ECHO - * @sp: current sequence in the ECHO exchange - * @fp: ECHO request frame + * @sp: The sequence in the ECHO exchange + * @fp: ECHO request frame + * @lport: The local port recieving the ECHO * - * Locking Note: The lport lock is exected to be held before calling + * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, @@ -361,7 +418,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, void *dp; u32 f_ctl; - FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", + FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", fc_lport_state(lport)); len = fr_len(in_fp) - sizeof(struct fc_frame_header); @@ -374,7 +431,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, if (fp) { dp = fc_frame_payload_get(fp, len); memcpy(dp, pp, len); - *((u32 *)dp) = htonl(ELS_LS_ACC << 24); + *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); sp = lport->tt.seq_start_next(sp); f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, @@ -385,12 +442,12 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, } /** - * fc_lport_recv_echo_req() - Handle received Request Node ID data request - * @lport: Fibre Channel local port recieving the RNID - * @sp: current sequence in the RNID exchange - * @fp: RNID request frame + * fc_lport_recv_rnid_req() - Handle received Request Node ID data request + * @sp: The sequence in the RNID exchange + * @fp: The RNID request frame + * @lport: The local port recieving the RNID * - * Locking Note: The lport lock is exected to be held before calling + * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, @@ -453,9 +510,9 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, /** * fc_lport_recv_logo_req() - Handle received fabric LOGO request - * @lport: Fibre Channel local port recieving the LOGO - * @sp: current sequence in the LOGO exchange - * @fp: LOGO request frame + * @sp: The sequence in the LOGO exchange + * @fp: The LOGO request frame + * @lport: The local port recieving the LOGO * * Locking Note: The lport lock is exected to be held before calling * this function. @@ -470,7 +527,7 @@ static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp, /** * fc_fabric_login() - Start the lport state machine - * @lport: The lport that should log into the fabric + * @lport: The local port that should log into the fabric * * Locking Note: This function should not be called * with the lport lock held. @@ -491,47 +548,69 @@ int fc_fabric_login(struct fc_lport *lport) EXPORT_SYMBOL(fc_fabric_login); /** - * fc_linkup() - Handler for transport linkup events + * __fc_linkup() - Handler for transport linkup events * @lport: The lport whose link is up + * + * Locking: must be called with the lp_mutex held */ -void fc_linkup(struct fc_lport *lport) +void __fc_linkup(struct fc_lport *lport) { - printk(KERN_INFO "libfc: Link up on port (%6x)\n", - fc_host_port_id(lport->host)); - - mutex_lock(&lport->lp_mutex); if (!lport->link_up) { lport->link_up = 1; if (lport->state == LPORT_ST_RESET) fc_lport_enter_flogi(lport); } +} + +/** + * fc_linkup() - Handler for transport linkup events + * @lport: The local port whose link is up + */ +void fc_linkup(struct fc_lport *lport) +{ + printk(KERN_INFO "host%d: libfc: Link up on port (%6x)\n", + lport->host->host_no, fc_host_port_id(lport->host)); + + mutex_lock(&lport->lp_mutex); + __fc_linkup(lport); mutex_unlock(&lport->lp_mutex); } EXPORT_SYMBOL(fc_linkup); /** - * fc_linkdown() - Handler for transport linkdown events + * __fc_linkdown() - Handler for transport linkdown events * @lport: The lport whose link is down + * + * Locking: must be called with the lp_mutex held */ -void fc_linkdown(struct fc_lport *lport) +void __fc_linkdown(struct fc_lport *lport) { - mutex_lock(&lport->lp_mutex); - printk(KERN_INFO "libfc: Link down on port (%6x)\n", - fc_host_port_id(lport->host)); - if (lport->link_up) { lport->link_up = 0; fc_lport_enter_reset(lport); lport->tt.fcp_cleanup(lport); } +} + +/** + * fc_linkdown() - Handler for transport linkdown events + * @lport: The local port whose link is down + */ +void fc_linkdown(struct fc_lport *lport) +{ + printk(KERN_INFO "host%d: libfc: Link down on port (%6x)\n", + lport->host->host_no, fc_host_port_id(lport->host)); + + mutex_lock(&lport->lp_mutex); + __fc_linkdown(lport); mutex_unlock(&lport->lp_mutex); } EXPORT_SYMBOL(fc_linkdown); /** * fc_fabric_logoff() - Logout of the fabric - * @lport: fc_lport pointer to logoff the fabric + * @lport: The local port to logoff the fabric * * Return value: * 0 for success, -1 for failure @@ -540,8 +619,8 @@ int fc_fabric_logoff(struct fc_lport *lport) { lport->tt.disc_stop_final(lport); mutex_lock(&lport->lp_mutex); - if (lport->dns_rp) - lport->tt.rport_logoff(lport->dns_rp); + if (lport->dns_rdata) + lport->tt.rport_logoff(lport->dns_rdata); mutex_unlock(&lport->lp_mutex); lport->tt.rport_flush_queue(); mutex_lock(&lport->lp_mutex); @@ -553,11 +632,9 @@ int fc_fabric_logoff(struct fc_lport *lport) EXPORT_SYMBOL(fc_fabric_logoff); /** - * fc_lport_destroy() - unregister a fc_lport - * @lport: fc_lport pointer to unregister + * fc_lport_destroy() - Unregister a fc_lport + * @lport: The local port to unregister * - * Return value: - * None * Note: * exit routine for fc_lport instance * clean-up all the allocated memory @@ -580,13 +657,9 @@ int fc_lport_destroy(struct fc_lport *lport) EXPORT_SYMBOL(fc_lport_destroy); /** - * fc_set_mfs() - sets up the mfs for the corresponding fc_lport - * @lport: fc_lport pointer to unregister - * @mfs: the new mfs for fc_lport - * - * Set mfs for the given fc_lport to the new mfs. - * - * Return: 0 for success + * fc_set_mfs() - Set the maximum frame size for a local port + * @lport: The local port to set the MFS for + * @mfs: The new MFS */ int fc_set_mfs(struct fc_lport *lport, u32 mfs) { @@ -617,7 +690,7 @@ EXPORT_SYMBOL(fc_set_mfs); /** * fc_lport_disc_callback() - Callback for discovery events - * @lport: FC local port + * @lport: The local port receiving the event * @event: The discovery event */ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) @@ -627,8 +700,9 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) FC_LPORT_DBG(lport, "Discovery succeeded\n"); break; case DISC_EV_FAILED: - printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n", - fc_host_port_id(lport->host)); + printk(KERN_ERR "host%d: libfc: " + "Discovery failed for port (%6x)\n", + lport->host->host_no, fc_host_port_id(lport->host)); mutex_lock(&lport->lp_mutex); fc_lport_enter_reset(lport); mutex_unlock(&lport->lp_mutex); @@ -641,7 +715,7 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) /** * fc_rport_enter_ready() - Enter the ready state and start discovery - * @lport: Fibre Channel local port that is ready + * @lport: The local port that is ready * * Locking Note: The lport lock is expected to be held before calling * this routine. @@ -652,22 +726,46 @@ static void fc_lport_enter_ready(struct fc_lport *lport) fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_READY); + if (lport->vport) + fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE); + fc_vports_linkchange(lport); - if (!lport->ptp_rp) + if (!lport->ptp_rdata) lport->tt.disc_start(fc_lport_disc_callback, lport); } /** + * fc_lport_set_port_id() - set the local port Port ID + * @lport: The local port which will have its Port ID set. + * @port_id: The new port ID. + * @fp: The frame containing the incoming request, or NULL. + * + * Locking Note: The lport lock is expected to be held before calling + * this function. + */ +static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id, + struct fc_frame *fp) +{ + if (port_id) + printk(KERN_INFO "host%d: Assigned Port ID %6x\n", + lport->host->host_no, port_id); + + fc_host_port_id(lport->host) = port_id; + if (lport->tt.lport_set_port_id) + lport->tt.lport_set_port_id(lport, port_id, fp); +} + +/** * fc_lport_recv_flogi_req() - Receive a FLOGI request * @sp_in: The sequence the FLOGI is on - * @rx_fp: The frame the FLOGI is in - * @lport: The lport that recieved the request + * @rx_fp: The FLOGI frame + * @lport: The local port that recieved the request * * A received FLOGI request indicates a point-to-point connection. * Accept it with the common service parameters indicating our N port. * Set up to do a PLOGI if we have the higher-number WWPN. * - * Locking Note: The lport lock is exected to be held before calling + * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, @@ -695,8 +793,9 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, goto out; remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); if (remote_wwpn == lport->wwpn) { - printk(KERN_WARNING "libfc: Received FLOGI from port " - "with same WWPN %llx\n", remote_wwpn); + printk(KERN_WARNING "host%d: libfc: Received FLOGI from port " + "with same WWPN %llx\n", + lport->host->host_no, remote_wwpn); goto out; } FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn); @@ -715,7 +814,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, remote_fid = FC_LOCAL_PTP_FID_HI; } - fc_host_port_id(lport->host) = local_fid; + fc_lport_set_port_id(lport, local_fid, rx_fp); fp = fc_frame_alloc(lport, sizeof(*flp)); if (fp) { @@ -747,9 +846,9 @@ out: /** * fc_lport_recv_req() - The generic lport request handler - * @lport: The lport that received the request - * @sp: The sequence the request is on - * @fp: The frame the request is in + * @lport: The local port that received the request + * @sp: The sequence the request is on + * @fp: The request frame * * This function will see if the lport handles the request or * if an rport should handle the request. @@ -817,8 +916,8 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, } /** - * fc_lport_reset() - Reset an lport - * @lport: The lport which should be reset + * fc_lport_reset() - Reset a local port + * @lport: The local port which should be reset * * Locking Note: This functions should not be called with the * lport lock held. @@ -834,29 +933,31 @@ int fc_lport_reset(struct fc_lport *lport) EXPORT_SYMBOL(fc_lport_reset); /** - * fc_lport_reset_locked() - Reset the local port - * @lport: Fibre Channel local port to be reset + * fc_lport_reset_locked() - Reset the local port w/ the lport lock held + * @lport: The local port to be reset * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static void fc_lport_reset_locked(struct fc_lport *lport) { - if (lport->dns_rp) - lport->tt.rport_logoff(lport->dns_rp); + if (lport->dns_rdata) + lport->tt.rport_logoff(lport->dns_rdata); - lport->ptp_rp = NULL; + lport->ptp_rdata = NULL; lport->tt.disc_stop(lport); lport->tt.exch_mgr_reset(lport, 0, 0); fc_host_fabric_name(lport->host) = 0; - fc_host_port_id(lport->host) = 0; + + if (fc_host_port_id(lport->host)) + fc_lport_set_port_id(lport, 0, NULL); } /** * fc_lport_enter_reset() - Reset the local port - * @lport: Fibre Channel local port to be reset + * @lport: The local port to be reset * * Locking Note: The lport lock is expected to be held before calling * this routine. @@ -866,15 +967,22 @@ static void fc_lport_enter_reset(struct fc_lport *lport) FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", fc_lport_state(lport)); + if (lport->vport) { + if (lport->link_up) + fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING); + else + fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN); + } fc_lport_state_enter(lport, LPORT_ST_RESET); + fc_vports_linkchange(lport); fc_lport_reset_locked(lport); if (lport->link_up) fc_lport_enter_flogi(lport); } /** - * fc_lport_enter_disabled() - disable the local port - * @lport: Fibre Channel local port to be reset + * fc_lport_enter_disabled() - Disable the local port + * @lport: The local port to be reset * * Locking Note: The lport lock is expected to be held before calling * this routine. @@ -885,13 +993,14 @@ static void fc_lport_enter_disabled(struct fc_lport *lport) fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_DISABLED); + fc_vports_linkchange(lport); fc_lport_reset_locked(lport); } /** * fc_lport_error() - Handler for any errors - * @lport: The fc_lport object - * @fp: The frame pointer + * @lport: The local port that the error was on + * @fp: The error code encoded in a frame pointer * * If the error was caused by a resource allocation failure * then wait for half a second and retry, otherwise retry @@ -922,8 +1031,11 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) case LPORT_ST_DISABLED: case LPORT_ST_READY: case LPORT_ST_RESET: - case LPORT_ST_RPN_ID: + case LPORT_ST_RNN_ID: + case LPORT_ST_RSNN_NN: + case LPORT_ST_RSPN_ID: case LPORT_ST_RFT_ID: + case LPORT_ST_RFF_ID: case LPORT_ST_SCR: case LPORT_ST_DNS: case LPORT_ST_FLOGI: @@ -936,33 +1048,33 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) } /** - * fc_lport_rft_id_resp() - Handle response to Register Fibre - * Channel Types by ID (RPN_ID) request - * @sp: current sequence in RPN_ID exchange - * @fp: response frame + * fc_lport_ns_resp() - Handle response to a name server + * registration exchange + * @sp: current sequence in exchange + * @fp: response frame * @lp_arg: Fibre Channel host port instance * * Locking Note: This function will be called without the lport lock - * held, but it will lock, call an _enter_* function or fc_lport_error + * held, but it will lock, call an _enter_* function or fc_lport_error() * and then unlock the lport. */ -static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp, - void *lp_arg) +static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, + void *lp_arg) { struct fc_lport *lport = lp_arg; struct fc_frame_header *fh; struct fc_ct_hdr *ct; - FC_LPORT_DBG(lport, "Received a RFT_ID %s\n", fc_els_resp_type(fp)); + FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp)); if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); - if (lport->state != LPORT_ST_RFT_ID) { - FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state " - "%s\n", fc_lport_state(lport)); + if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) { + FC_LPORT_DBG(lport, "Received a name server response, " + "but in state %s\n", fc_lport_state(lport)); if (IS_ERR(fp)) goto err; goto out; @@ -980,63 +1092,28 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp, ct->ct_fs_type == FC_FST_DIR && ct->ct_fs_subtype == FC_NS_SUBTYPE && ntohs(ct->ct_cmd) == FC_FS_ACC) - fc_lport_enter_scr(lport); - else - fc_lport_error(lport, fp); -out: - fc_frame_free(fp); -err: - mutex_unlock(&lport->lp_mutex); -} - -/** - * fc_lport_rpn_id_resp() - Handle response to Register Port - * Name by ID (RPN_ID) request - * @sp: current sequence in RPN_ID exchange - * @fp: response frame - * @lp_arg: Fibre Channel host port instance - * - * Locking Note: This function will be called without the lport lock - * held, but it will lock, call an _enter_* function or fc_lport_error - * and then unlock the lport. - */ -static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, - void *lp_arg) -{ - struct fc_lport *lport = lp_arg; - struct fc_frame_header *fh; - struct fc_ct_hdr *ct; - - FC_LPORT_DBG(lport, "Received a RPN_ID %s\n", fc_els_resp_type(fp)); - - if (fp == ERR_PTR(-FC_EX_CLOSED)) - return; - - mutex_lock(&lport->lp_mutex); - - if (lport->state != LPORT_ST_RPN_ID) { - FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state " - "%s\n", fc_lport_state(lport)); - if (IS_ERR(fp)) - goto err; - goto out; - } - - if (IS_ERR(fp)) { - fc_lport_error(lport, fp); - goto err; - } - - fh = fc_frame_header_get(fp); - ct = fc_frame_payload_get(fp, sizeof(*ct)); - if (fh && ct && fh->fh_type == FC_TYPE_CT && - ct->ct_fs_type == FC_FST_DIR && - ct->ct_fs_subtype == FC_NS_SUBTYPE && - ntohs(ct->ct_cmd) == FC_FS_ACC) - fc_lport_enter_rft_id(lport); + switch (lport->state) { + case LPORT_ST_RNN_ID: + fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN); + break; + case LPORT_ST_RSNN_NN: + fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID); + break; + case LPORT_ST_RSPN_ID: + fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); + break; + case LPORT_ST_RFT_ID: + fc_lport_enter_ns(lport, LPORT_ST_RFF_ID); + break; + case LPORT_ST_RFF_ID: + fc_lport_enter_scr(lport); + break; + default: + /* should have already been caught by state checks */ + break; + } else fc_lport_error(lport, fp); - out: fc_frame_free(fp); err: @@ -1045,8 +1122,8 @@ err: /** * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request - * @sp: current sequence in SCR exchange - * @fp: response frame + * @sp: current sequence in SCR exchange + * @fp: response frame * @lp_arg: Fibre Channel lport port instance that sent the registration request * * Locking Note: This function will be called without the lport lock @@ -1092,8 +1169,8 @@ err: } /** - * fc_lport_enter_scr() - Send a State Change Register (SCR) request - * @lport: Fibre Channel local port to register for state changes + * fc_lport_enter_scr() - Send a SCR (State Change Register) request + * @lport: The local port to register for state changes * * Locking Note: The lport lock is expected to be held before calling * this routine. @@ -1114,78 +1191,74 @@ static void fc_lport_enter_scr(struct fc_lport *lport) } if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, - fc_lport_scr_resp, lport, lport->e_d_tov)) - fc_lport_error(lport, fp); + fc_lport_scr_resp, lport, + 2 * lport->r_a_tov)) + fc_lport_error(lport, NULL); } /** - * fc_lport_enter_rft_id() - Register FC4-types with the name server + * fc_lport_enter_ns() - register some object with the name server * @lport: Fibre Channel local port to register * * Locking Note: The lport lock is expected to be held before calling * this routine. */ -static void fc_lport_enter_rft_id(struct fc_lport *lport) +static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) { struct fc_frame *fp; - struct fc_ns_fts *lps; - int i; + enum fc_ns_req cmd; + int size = sizeof(struct fc_ct_hdr); + size_t len; - FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n", + FC_LPORT_DBG(lport, "Entered %s state from %s state\n", + fc_lport_state_names[state], fc_lport_state(lport)); - fc_lport_state_enter(lport, LPORT_ST_RFT_ID); - - lps = &lport->fcts; - i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]); - while (--i >= 0) - if (ntohl(lps->ff_type_map[i]) != 0) - break; - if (i < 0) { - /* nothing to register, move on to SCR */ - fc_lport_enter_scr(lport); - return; - } + fc_lport_state_enter(lport, state); - fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + - sizeof(struct fc_ns_rft)); - if (!fp) { - fc_lport_error(lport, fp); + switch (state) { + case LPORT_ST_RNN_ID: + cmd = FC_NS_RNN_ID; + size += sizeof(struct fc_ns_rn_id); + break; + case LPORT_ST_RSNN_NN: + len = strnlen(fc_host_symbolic_name(lport->host), 255); + /* if there is no symbolic name, skip to RFT_ID */ + if (!len) + return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); + cmd = FC_NS_RSNN_NN; + size += sizeof(struct fc_ns_rsnn) + len; + break; + case LPORT_ST_RSPN_ID: + len = strnlen(fc_host_symbolic_name(lport->host), 255); + /* if there is no symbolic name, skip to RFT_ID */ + if (!len) + return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); + cmd = FC_NS_RSPN_ID; + size += sizeof(struct fc_ns_rspn) + len; + break; + case LPORT_ST_RFT_ID: + cmd = FC_NS_RFT_ID; + size += sizeof(struct fc_ns_rft); + break; + case LPORT_ST_RFF_ID: + cmd = FC_NS_RFF_ID; + size += sizeof(struct fc_ns_rff_id); + break; + default: + fc_lport_error(lport, NULL); return; } - if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID, - fc_lport_rft_id_resp, - lport, lport->e_d_tov)) - fc_lport_error(lport, fp); -} - -/** - * fc_rport_enter_rft_id() - Register port name with the name server - * @lport: Fibre Channel local port to register - * - * Locking Note: The lport lock is expected to be held before calling - * this routine. - */ -static void fc_lport_enter_rpn_id(struct fc_lport *lport) -{ - struct fc_frame *fp; - - FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n", - fc_lport_state(lport)); - - fc_lport_state_enter(lport, LPORT_ST_RPN_ID); - - fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + - sizeof(struct fc_ns_rn_id)); + fp = fc_frame_alloc(lport, size); if (!fp) { fc_lport_error(lport, fp); return; } - if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID, - fc_lport_rpn_id_resp, - lport, lport->e_d_tov)) + if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd, + fc_lport_ns_resp, + lport, 3 * lport->r_a_tov)) fc_lport_error(lport, fp); } @@ -1194,8 +1267,8 @@ static struct fc_rport_operations fc_lport_rport_ops = { }; /** - * fc_rport_enter_dns() - Create a rport to the name server - * @lport: Fibre Channel local port requesting a rport for the name server + * fc_rport_enter_dns() - Create a fc_rport for the name server + * @lport: The local port requesting a remote port for the name server * * Locking Note: The lport lock is expected to be held before calling * this routine. @@ -1224,8 +1297,8 @@ err: } /** - * fc_lport_timeout() - Handler for the retry_work timer. - * @work: The work struct of the fc_lport + * fc_lport_timeout() - Handler for the retry_work timer + * @work: The work struct of the local port */ static void fc_lport_timeout(struct work_struct *work) { @@ -1237,21 +1310,25 @@ static void fc_lport_timeout(struct work_struct *work) switch (lport->state) { case LPORT_ST_DISABLED: + WARN_ON(1); + break; case LPORT_ST_READY: - case LPORT_ST_RESET: WARN_ON(1); break; + case LPORT_ST_RESET: + break; case LPORT_ST_FLOGI: fc_lport_enter_flogi(lport); break; case LPORT_ST_DNS: fc_lport_enter_dns(lport); break; - case LPORT_ST_RPN_ID: - fc_lport_enter_rpn_id(lport); - break; + case LPORT_ST_RNN_ID: + case LPORT_ST_RSNN_NN: + case LPORT_ST_RSPN_ID: case LPORT_ST_RFT_ID: - fc_lport_enter_rft_id(lport); + case LPORT_ST_RFF_ID: + fc_lport_enter_ns(lport, lport->state); break; case LPORT_ST_SCR: fc_lport_enter_scr(lport); @@ -1266,16 +1343,16 @@ static void fc_lport_timeout(struct work_struct *work) /** * fc_lport_logo_resp() - Handle response to LOGO request - * @sp: current sequence in LOGO exchange - * @fp: response frame - * @lp_arg: Fibre Channel lport port instance that sent the LOGO request + * @sp: The sequence that the LOGO was on + * @fp: The LOGO frame + * @lp_arg: The lport port that received the LOGO request * * Locking Note: This function will be called without the lport lock - * held, but it will lock, call an _enter_* function or fc_lport_error + * held, but it will lock, call an _enter_* function or fc_lport_error() * and then unlock the lport. */ -static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, - void *lp_arg) +void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, + void *lp_arg) { struct fc_lport *lport = lp_arg; u8 op; @@ -1311,10 +1388,11 @@ out: err: mutex_unlock(&lport->lp_mutex); } +EXPORT_SYMBOL(fc_lport_logo_resp); /** * fc_rport_enter_logo() - Logout of the fabric - * @lport: Fibre Channel local port to be logged out + * @lport: The local port to be logged out * * Locking Note: The lport lock is expected to be held before calling * this routine. @@ -1328,6 +1406,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport) fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_LOGO); + fc_vports_linkchange(lport); fp = fc_frame_alloc(lport, sizeof(*logo)); if (!fp) { @@ -1336,22 +1415,23 @@ static void fc_lport_enter_logo(struct fc_lport *lport) } if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, - fc_lport_logo_resp, lport, lport->e_d_tov)) - fc_lport_error(lport, fp); + fc_lport_logo_resp, lport, + 2 * lport->r_a_tov)) + fc_lport_error(lport, NULL); } /** * fc_lport_flogi_resp() - Handle response to FLOGI request - * @sp: current sequence in FLOGI exchange - * @fp: response frame - * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request + * @sp: The sequence that the FLOGI was on + * @fp: The FLOGI response frame + * @lp_arg: The lport port that received the FLOGI response * * Locking Note: This function will be called without the lport lock - * held, but it will lock, call an _enter_* function or fc_lport_error + * held, but it will lock, call an _enter_* function or fc_lport_error() * and then unlock the lport. */ -static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, - void *lp_arg) +void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, + void *lp_arg) { struct fc_lport *lport = lp_arg; struct fc_frame_header *fh; @@ -1385,11 +1465,6 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, fh = fc_frame_header_get(fp); did = ntoh24(fh->fh_d_id); if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { - - printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n", - did); - fc_host_port_id(lport->host) = did; - flp = fc_frame_payload_get(fp, sizeof(*flp)); if (flp) { mfs = ntohs(flp->fl_csp.sp_bb_data) & @@ -1402,12 +1477,18 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); if (csp_flags & FC_SP_FT_EDTR) e_d_tov /= 1000000; + + lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC); + if ((csp_flags & FC_SP_FT_FPORT) == 0) { if (e_d_tov > lport->e_d_tov) lport->e_d_tov = e_d_tov; lport->r_a_tov = 2 * e_d_tov; - printk(KERN_INFO "libfc: Port (%6x) entered " - "point to point mode\n", did); + fc_lport_set_port_id(lport, did, fp); + printk(KERN_INFO "host%d: libfc: " + "Port (%6x) entered " + "point-to-point mode\n", + lport->host->host_no, did); fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), get_unaligned_be64( &flp->fl_wwpn), @@ -1418,6 +1499,7 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, lport->r_a_tov = r_a_tov; fc_host_fabric_name(lport->host) = get_unaligned_be64(&flp->fl_wwnn); + fc_lport_set_port_id(lport, did, fp); fc_lport_enter_dns(lport); } } @@ -1430,6 +1512,7 @@ out: err: mutex_unlock(&lport->lp_mutex); } +EXPORT_SYMBOL(fc_lport_flogi_resp); /** * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager @@ -1451,12 +1534,18 @@ void fc_lport_enter_flogi(struct fc_lport *lport) if (!fp) return fc_lport_error(lport, fp); - if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI, - fc_lport_flogi_resp, lport, lport->e_d_tov)) - fc_lport_error(lport, fp); + if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, + lport->vport ? ELS_FDISC : ELS_FLOGI, + fc_lport_flogi_resp, lport, + lport->vport ? 2 * lport->r_a_tov : + lport->e_d_tov)) + fc_lport_error(lport, NULL); } -/* Configure a fc_lport */ +/** + * fc_lport_config() - Configure a fc_lport + * @lport: The local port to be configured + */ int fc_lport_config(struct fc_lport *lport) { INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); @@ -1471,6 +1560,10 @@ int fc_lport_config(struct fc_lport *lport) } EXPORT_SYMBOL(fc_lport_config); +/** + * fc_lport_init() - Initialize the lport layer for a local port + * @lport: The local port to initialize the exchange layer for + */ int fc_lport_init(struct fc_lport *lport) { if (!lport->tt.lport_recv) @@ -1500,7 +1593,253 @@ int fc_lport_init(struct fc_lport *lport) if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; - INIT_LIST_HEAD(&lport->ema_list); return 0; } EXPORT_SYMBOL(fc_lport_init); + +/** + * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests + * @sp: The sequence for the FC Passthrough response + * @fp: The response frame + * @info_arg: The BSG info that the response is for + */ +static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, + void *info_arg) +{ + struct fc_bsg_info *info = info_arg; + struct fc_bsg_job *job = info->job; + struct fc_lport *lport = info->lport; + struct fc_frame_header *fh; + size_t len; + void *buf; + + if (IS_ERR(fp)) { + job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? + -ECONNABORTED : -ETIMEDOUT; + job->reply_len = sizeof(uint32_t); + job->state_flags |= FC_RQST_STATE_DONE; + job->job_done(job); + kfree(info); + return; + } + + mutex_lock(&lport->lp_mutex); + fh = fc_frame_header_get(fp); + len = fr_len(fp) - sizeof(*fh); + buf = fc_frame_payload_get(fp, 0); + + if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) { + /* Get the response code from the first frame payload */ + unsigned short cmd = (info->rsp_code == FC_FS_ACC) ? + ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) : + (unsigned short)fc_frame_payload_op(fp); + + /* Save the reply status of the job */ + job->reply->reply_data.ctels_reply.status = + (cmd == info->rsp_code) ? + FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT; + } + + job->reply->reply_payload_rcv_len += + fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, + &info->offset, KM_BIO_SRC_IRQ, NULL); + + if (fr_eof(fp) == FC_EOF_T && + (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == + (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { + if (job->reply->reply_payload_rcv_len > + job->reply_payload.payload_len) + job->reply->reply_payload_rcv_len = + job->reply_payload.payload_len; + job->reply->result = 0; + job->state_flags |= FC_RQST_STATE_DONE; + job->job_done(job); + kfree(info); + } + fc_frame_free(fp); + mutex_unlock(&lport->lp_mutex); +} + +/** + * fc_lport_els_request() - Send ELS passthrough request + * @job: The BSG Passthrough job + * @lport: The local port sending the request + * @did: The destination port id + * + * Locking Note: The lport lock is expected to be held before calling + * this routine. + */ +static int fc_lport_els_request(struct fc_bsg_job *job, + struct fc_lport *lport, + u32 did, u32 tov) +{ + struct fc_bsg_info *info; + struct fc_frame *fp; + struct fc_frame_header *fh; + char *pp; + int len; + + fp = fc_frame_alloc(lport, job->request_payload.payload_len); + if (!fp) + return -ENOMEM; + + len = job->request_payload.payload_len; + pp = fc_frame_payload_get(fp, len); + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + pp, len); + + fh = fc_frame_header_get(fp); + fh->fh_r_ctl = FC_RCTL_ELS_REQ; + hton24(fh->fh_d_id, did); + hton24(fh->fh_s_id, fc_host_port_id(lport->host)); + fh->fh_type = FC_TYPE_ELS; + hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ | + FC_FC_END_SEQ | FC_FC_SEQ_INIT); + fh->fh_cs_ctl = 0; + fh->fh_df_ctl = 0; + fh->fh_parm_offset = 0; + + info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); + if (!info) { + fc_frame_free(fp); + return -ENOMEM; + } + + info->job = job; + info->lport = lport; + info->rsp_code = ELS_LS_ACC; + info->nents = job->reply_payload.sg_cnt; + info->sg = job->reply_payload.sg_list; + + if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, + NULL, info, tov)) + return -ECOMM; + return 0; +} + +/** + * fc_lport_ct_request() - Send CT Passthrough request + * @job: The BSG Passthrough job + * @lport: The local port sending the request + * @did: The destination FC-ID + * @tov: The timeout period to wait for the response + * + * Locking Note: The lport lock is expected to be held before calling + * this routine. + */ +static int fc_lport_ct_request(struct fc_bsg_job *job, + struct fc_lport *lport, u32 did, u32 tov) +{ + struct fc_bsg_info *info; + struct fc_frame *fp; + struct fc_frame_header *fh; + struct fc_ct_req *ct; + size_t len; + + fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + + job->request_payload.payload_len); + if (!fp) + return -ENOMEM; + + len = job->request_payload.payload_len; + ct = fc_frame_payload_get(fp, len); + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + ct, len); + + fh = fc_frame_header_get(fp); + fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL; + hton24(fh->fh_d_id, did); + hton24(fh->fh_s_id, fc_host_port_id(lport->host)); + fh->fh_type = FC_TYPE_CT; + hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ | + FC_FC_END_SEQ | FC_FC_SEQ_INIT); + fh->fh_cs_ctl = 0; + fh->fh_df_ctl = 0; + fh->fh_parm_offset = 0; + + info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); + if (!info) { + fc_frame_free(fp); + return -ENOMEM; + } + + info->job = job; + info->lport = lport; + info->rsp_code = FC_FS_ACC; + info->nents = job->reply_payload.sg_cnt; + info->sg = job->reply_payload.sg_list; + + if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, + NULL, info, tov)) + return -ECOMM; + return 0; +} + +/** + * fc_lport_bsg_request() - The common entry point for sending + * FC Passthrough requests + * @job: The BSG passthrough job + */ +int fc_lport_bsg_request(struct fc_bsg_job *job) +{ + struct request *rsp = job->req->next_rq; + struct Scsi_Host *shost = job->shost; + struct fc_lport *lport = shost_priv(shost); + struct fc_rport *rport; + struct fc_rport_priv *rdata; + int rc = -EINVAL; + u32 did; + + job->reply->reply_payload_rcv_len = 0; + rsp->resid_len = job->reply_payload.payload_len; + + mutex_lock(&lport->lp_mutex); + + switch (job->request->msgcode) { + case FC_BSG_RPT_ELS: + rport = job->rport; + if (!rport) + break; + + rdata = rport->dd_data; + rc = fc_lport_els_request(job, lport, rport->port_id, + rdata->e_d_tov); + break; + + case FC_BSG_RPT_CT: + rport = job->rport; + if (!rport) + break; + + rdata = rport->dd_data; + rc = fc_lport_ct_request(job, lport, rport->port_id, + rdata->e_d_tov); + break; + + case FC_BSG_HST_CT: + did = ntoh24(job->request->rqst_data.h_ct.port_id); + if (did == FC_FID_DIR_SERV) + rdata = lport->dns_rdata; + else + rdata = lport->tt.rport_lookup(lport, did); + + if (!rdata) + break; + + rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov); + break; + + case FC_BSG_HST_ELS_NOLOGIN: + did = ntoh24(job->request->rqst_data.h_els.port_id); + rc = fc_lport_els_request(job, lport, did, lport->e_d_tov); + break; + } + + mutex_unlock(&lport->lp_mutex); + return rc; +} +EXPORT_SYMBOL(fc_lport_bsg_request); diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c new file mode 100644 index 00000000000..c68f6c7341c --- /dev/null +++ b/drivers/scsi/libfc/fc_npiv.c @@ -0,0 +1,161 @@ +/* + * Copyright(c) 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +/* + * NPIV VN_Port helper functions for libfc + */ + +#include <scsi/libfc.h> + +/** + * fc_vport_create() - Create a new NPIV vport instance + * @vport: fc_vport structure from scsi_transport_fc + * @privsize: driver private data size to allocate along with the Scsi_Host + */ + +struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port; + + vn_port = libfc_host_alloc(shost->hostt, privsize); + if (!vn_port) + goto err_out; + if (fc_exch_mgr_list_clone(n_port, vn_port)) + goto err_put; + + vn_port->vport = vport; + vport->dd_data = vn_port; + + mutex_lock(&n_port->lp_mutex); + list_add_tail(&vn_port->list, &n_port->vports); + mutex_unlock(&n_port->lp_mutex); + + return vn_port; + +err_put: + scsi_host_put(vn_port->host); +err_out: + return NULL; +} +EXPORT_SYMBOL(libfc_vport_create); + +/** + * fc_vport_id_lookup() - find NPIV lport that matches a given fabric ID + * @n_port: Top level N_Port which may have multiple NPIV VN_Ports + * @port_id: Fabric ID to find a match for + * + * Returns: matching lport pointer or NULL if there is no match + */ +struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id) +{ + struct fc_lport *lport = NULL; + struct fc_lport *vn_port; + + if (fc_host_port_id(n_port->host) == port_id) + return n_port; + + mutex_lock(&n_port->lp_mutex); + list_for_each_entry(vn_port, &n_port->vports, list) { + if (fc_host_port_id(vn_port->host) == port_id) { + lport = vn_port; + break; + } + } + mutex_unlock(&n_port->lp_mutex); + + return lport; +} + +/* + * When setting the link state of vports during an lport state change, it's + * necessary to hold the lp_mutex of both the N_Port and the VN_Port. + * This tells the lockdep engine to treat the nested locking of the VN_Port + * as a different lock class. + */ +enum libfc_lport_mutex_class { + LPORT_MUTEX_NORMAL = 0, + LPORT_MUTEX_VN_PORT = 1, +}; + +/** + * __fc_vport_setlink() - update link and status on a VN_Port + * @n_port: parent N_Port + * @vn_port: VN_Port to update + * + * Locking: must be called with both the N_Port and VN_Port lp_mutex held + */ +static void __fc_vport_setlink(struct fc_lport *n_port, + struct fc_lport *vn_port) +{ + struct fc_vport *vport = vn_port->vport; + + if (vn_port->state == LPORT_ST_DISABLED) + return; + + if (n_port->state == LPORT_ST_READY) { + if (n_port->npiv_enabled) { + fc_vport_set_state(vport, FC_VPORT_INITIALIZING); + __fc_linkup(vn_port); + } else { + fc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); + __fc_linkdown(vn_port); + } + } else { + fc_vport_set_state(vport, FC_VPORT_LINKDOWN); + __fc_linkdown(vn_port); + } +} + +/** + * fc_vport_setlink() - update link and status on a VN_Port + * @vn_port: virtual port to update + */ +void fc_vport_setlink(struct fc_lport *vn_port) +{ + struct fc_vport *vport = vn_port->vport; + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + + mutex_lock(&n_port->lp_mutex); + mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT); + __fc_vport_setlink(n_port, vn_port); + mutex_unlock(&vn_port->lp_mutex); + mutex_unlock(&n_port->lp_mutex); +} +EXPORT_SYMBOL(fc_vport_setlink); + +/** + * fc_vports_linkchange() - change the link state of all vports + * @n_port: Parent N_Port that has changed state + * + * Locking: called with the n_port lp_mutex held + */ +void fc_vports_linkchange(struct fc_lport *n_port) +{ + struct fc_lport *vn_port; + + list_for_each_entry(vn_port, &n_port->vports, list) { + mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT); + __fc_vport_setlink(n_port, vn_port); + mutex_unlock(&vn_port->lp_mutex); + } +} + diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 03ea6748e7e..35ca0e72df4 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -55,6 +55,8 @@ #include <scsi/libfc.h> #include <scsi/fc_encode.h> +#include "fc_libfc.h" + struct workqueue_struct *rport_event_queue; static void fc_rport_enter_plogi(struct fc_rport_priv *); @@ -86,12 +88,13 @@ static const char *fc_rport_state_names[] = { [RPORT_ST_LOGO] = "LOGO", [RPORT_ST_ADISC] = "ADISC", [RPORT_ST_DELETE] = "Delete", + [RPORT_ST_RESTART] = "Restart", }; /** - * fc_rport_lookup() - lookup a remote port by port_id - * @lport: Fibre Channel host port instance - * @port_id: remote port port_id to match + * fc_rport_lookup() - Lookup a remote port by port_id + * @lport: The local port to lookup the remote port on + * @port_id: The remote port ID to look up */ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, u32 port_id) @@ -99,16 +102,17 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, struct fc_rport_priv *rdata; list_for_each_entry(rdata, &lport->disc.rports, peers) - if (rdata->ids.port_id == port_id && - rdata->rp_state != RPORT_ST_DELETE) + if (rdata->ids.port_id == port_id) return rdata; return NULL; } /** * fc_rport_create() - Create a new remote port - * @lport: The local port that the new remote port is for - * @port_id: The port ID for the new remote port + * @lport: The local port this remote port will be associated with + * @ids: The identifiers for the new remote port + * + * The remote port will start in the INIT state. * * Locking note: must be called with the disc_mutex held. */ @@ -147,8 +151,8 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, } /** - * fc_rport_destroy() - free a remote port after last reference is released. - * @kref: pointer to kref inside struct fc_rport_priv + * fc_rport_destroy() - Free a remote port after last reference is released + * @kref: The remote port's kref */ static void fc_rport_destroy(struct kref *kref) { @@ -159,8 +163,8 @@ static void fc_rport_destroy(struct kref *kref) } /** - * fc_rport_state() - return a string for the state the rport is in - * @rdata: remote port private data + * fc_rport_state() - Return a string identifying the remote port's state + * @rdata: The remote port */ static const char *fc_rport_state(struct fc_rport_priv *rdata) { @@ -173,9 +177,9 @@ static const char *fc_rport_state(struct fc_rport_priv *rdata) } /** - * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds. - * @rport: Pointer to Fibre Channel remote port structure - * @timeout: timeout in seconds + * fc_set_rport_loss_tmo() - Set the remote port loss timeout + * @rport: The remote port that gets a new timeout value + * @timeout: The new timeout value (in seconds) */ void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) { @@ -187,9 +191,11 @@ void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) EXPORT_SYMBOL(fc_set_rport_loss_tmo); /** - * fc_plogi_get_maxframe() - Get max payload from the common service parameters - * @flp: FLOGI payload structure - * @maxval: upper limit, may be less than what is in the service parameters + * fc_plogi_get_maxframe() - Get the maximum payload from the common service + * parameters in a FLOGI frame + * @flp: The FLOGI payload + * @maxval: The maximum frame size upper limit; this may be less than what + * is in the service parameters */ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval) @@ -210,9 +216,9 @@ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, } /** - * fc_rport_state_enter() - Change the rport's state - * @rdata: The rport whose state should change - * @new: The new state of the rport + * fc_rport_state_enter() - Change the state of a remote port + * @rdata: The remote port whose state should change + * @new: The new state * * Locking Note: Called with the rport lock held */ @@ -224,17 +230,22 @@ static void fc_rport_state_enter(struct fc_rport_priv *rdata, rdata->rp_state = new; } +/** + * fc_rport_work() - Handler for remote port events in the rport_event_queue + * @work: Handle to the remote port being dequeued + */ static void fc_rport_work(struct work_struct *work) { u32 port_id; struct fc_rport_priv *rdata = container_of(work, struct fc_rport_priv, event_work); - struct fc_rport_libfc_priv *rp; + struct fc_rport_libfc_priv *rpriv; enum fc_rport_event event; struct fc_lport *lport = rdata->local_port; struct fc_rport_operations *rport_ops; struct fc_rport_identifiers ids; struct fc_rport *rport; + int restart = 0; mutex_lock(&rdata->rp_mutex); event = rdata->event; @@ -265,12 +276,12 @@ static void fc_rport_work(struct work_struct *work) rport->maxframe_size = rdata->maxframe_size; rport->supported_classes = rdata->supported_classes; - rp = rport->dd_data; - rp->local_port = lport; - rp->rp_state = rdata->rp_state; - rp->flags = rdata->flags; - rp->e_d_tov = rdata->e_d_tov; - rp->r_a_tov = rdata->r_a_tov; + rpriv = rport->dd_data; + rpriv->local_port = lport; + rpriv->rp_state = rdata->rp_state; + rpriv->flags = rdata->flags; + rpriv->e_d_tov = rdata->e_d_tov; + rpriv->r_a_tov = rdata->r_a_tov; mutex_unlock(&rdata->rp_mutex); if (rport_ops && rport_ops->event_callback) { @@ -287,8 +298,19 @@ static void fc_rport_work(struct work_struct *work) mutex_unlock(&rdata->rp_mutex); if (port_id != FC_FID_DIR_SERV) { + /* + * We must drop rp_mutex before taking disc_mutex. + * Re-evaluate state to allow for restart. + * A transition to RESTART state must only happen + * while disc_mutex is held and rdata is on the list. + */ mutex_lock(&lport->disc.disc_mutex); - list_del(&rdata->peers); + mutex_lock(&rdata->rp_mutex); + if (rdata->rp_state == RPORT_ST_RESTART) + restart = 1; + else + list_del(&rdata->peers); + mutex_unlock(&rdata->rp_mutex); mutex_unlock(&lport->disc.disc_mutex); } @@ -305,14 +327,20 @@ static void fc_rport_work(struct work_struct *work) lport->tt.exch_mgr_reset(lport, port_id, 0); if (rport) { - rp = rport->dd_data; - rp->rp_state = RPORT_ST_DELETE; + rpriv = rport->dd_data; + rpriv->rp_state = RPORT_ST_DELETE; mutex_lock(&rdata->rp_mutex); rdata->rport = NULL; mutex_unlock(&rdata->rp_mutex); fc_remote_port_delete(rport); } - kref_put(&rdata->kref, lport->tt.rport_destroy); + if (restart) { + mutex_lock(&rdata->rp_mutex); + FC_RPORT_DBG(rdata, "work restart\n"); + fc_rport_enter_plogi(rdata); + mutex_unlock(&rdata->rp_mutex); + } else + kref_put(&rdata->kref, lport->tt.rport_destroy); break; default: @@ -323,7 +351,7 @@ static void fc_rport_work(struct work_struct *work) /** * fc_rport_login() - Start the remote port login state machine - * @rdata: private remote port + * @rdata: The remote port to be logged in to * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* @@ -342,6 +370,12 @@ int fc_rport_login(struct fc_rport_priv *rdata) FC_RPORT_DBG(rdata, "ADISC port\n"); fc_rport_enter_adisc(rdata); break; + case RPORT_ST_RESTART: + break; + case RPORT_ST_DELETE: + FC_RPORT_DBG(rdata, "Restart deleted port\n"); + fc_rport_state_enter(rdata, RPORT_ST_RESTART); + break; default: FC_RPORT_DBG(rdata, "Login to port\n"); fc_rport_enter_plogi(rdata); @@ -353,9 +387,9 @@ int fc_rport_login(struct fc_rport_priv *rdata) } /** - * fc_rport_enter_delete() - schedule a remote port to be deleted. - * @rdata: private remote port - * @event: event to report as the reason for deletion + * fc_rport_enter_delete() - Schedule a remote port to be deleted + * @rdata: The remote port to be deleted + * @event: The event to report as the reason for deletion * * Locking Note: Called with the rport lock held. * @@ -382,8 +416,8 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata, } /** - * fc_rport_logoff() - Logoff and remove an rport - * @rdata: private remote port + * fc_rport_logoff() - Logoff and remove a remote port + * @rdata: The remote port to be logged off of * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* @@ -397,26 +431,27 @@ int fc_rport_logoff(struct fc_rport_priv *rdata) if (rdata->rp_state == RPORT_ST_DELETE) { FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n"); - mutex_unlock(&rdata->rp_mutex); goto out; } - fc_rport_enter_logo(rdata); + if (rdata->rp_state == RPORT_ST_RESTART) + FC_RPORT_DBG(rdata, "Port in Restart state, deleting\n"); + else + fc_rport_enter_logo(rdata); /* * Change the state to Delete so that we discard * the response. */ fc_rport_enter_delete(rdata, RPORT_EV_STOP); - mutex_unlock(&rdata->rp_mutex); - out: + mutex_unlock(&rdata->rp_mutex); return 0; } /** - * fc_rport_enter_ready() - The rport is ready - * @rdata: private remote port + * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state + * @rdata: The remote port that is ready * * Locking Note: The rport lock is expected to be held before calling * this routine. @@ -433,8 +468,8 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata) } /** - * fc_rport_timeout() - Handler for the retry_work timer. - * @work: The work struct of the fc_rport_priv + * fc_rport_timeout() - Handler for the retry_work timer + * @work: Handle to the remote port that has timed out * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* @@ -466,6 +501,7 @@ static void fc_rport_timeout(struct work_struct *work) case RPORT_ST_READY: case RPORT_ST_INIT: case RPORT_ST_DELETE: + case RPORT_ST_RESTART: break; } @@ -474,8 +510,8 @@ static void fc_rport_timeout(struct work_struct *work) /** * fc_rport_error() - Error handler, called once retries have been exhausted - * @rdata: private remote port - * @fp: The frame pointer + * @rdata: The remote port the error is happened on + * @fp: The error code encapsulated in a frame pointer * * Locking Note: The rport lock is expected to be held before * calling this routine @@ -499,6 +535,7 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) fc_rport_enter_logo(rdata); break; case RPORT_ST_DELETE: + case RPORT_ST_RESTART: case RPORT_ST_READY: case RPORT_ST_INIT: break; @@ -506,9 +543,9 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) } /** - * fc_rport_error_retry() - Error handler when retries are desired - * @rdata: private remote port data - * @fp: The frame pointer + * fc_rport_error_retry() - Handler for remote port state retries + * @rdata: The remote port whose state is to be retried + * @fp: The error code encapsulated in a frame pointer * * If the error was an exchange timeout retry immediately, * otherwise wait for E_D_TOV. @@ -540,10 +577,10 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata, } /** - * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response - * @sp: current sequence in the PLOGI exchange - * @fp: response frame - * @rdata_arg: private remote port data + * fc_rport_plogi_recv_resp() - Handler for ELS PLOGI responses + * @sp: The sequence the PLOGI is on + * @fp: The PLOGI response frame + * @rdata_arg: The remote port that sent the PLOGI response * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error @@ -606,8 +643,8 @@ err: } /** - * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer - * @rdata: private remote port data + * fc_rport_enter_plogi() - Send Port Login (PLOGI) request + * @rdata: The remote port to send a PLOGI to * * Locking Note: The rport lock is expected to be held before calling * this routine. @@ -631,17 +668,18 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) rdata->e_d_tov = lport->e_d_tov; if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI, - fc_rport_plogi_resp, rdata, lport->e_d_tov)) - fc_rport_error_retry(rdata, fp); + fc_rport_plogi_resp, rdata, + 2 * lport->r_a_tov)) + fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** * fc_rport_prli_resp() - Process Login (PRLI) response handler - * @sp: current sequence in the PRLI exchange - * @fp: response frame - * @rdata_arg: private remote port data + * @sp: The sequence the PRLI response was on + * @fp: The PRLI response frame + * @rdata_arg: The remote port that sent the PRLI response * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error @@ -710,10 +748,10 @@ err: } /** - * fc_rport_logo_resp() - Logout (LOGO) response handler - * @sp: current sequence in the LOGO exchange - * @fp: response frame - * @rdata_arg: private remote port data + * fc_rport_logo_resp() - Handler for logout (LOGO) responses + * @sp: The sequence the LOGO was on + * @fp: The LOGO response frame + * @rdata_arg: The remote port that sent the LOGO response * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error @@ -756,8 +794,8 @@ err: } /** - * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer - * @rdata: private remote port data + * fc_rport_enter_prli() - Send Process Login (PRLI) request + * @rdata: The remote port to send the PRLI request to * * Locking Note: The rport lock is expected to be held before calling * this routine. @@ -792,17 +830,18 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) } if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI, - fc_rport_prli_resp, rdata, lport->e_d_tov)) - fc_rport_error_retry(rdata, fp); + fc_rport_prli_resp, rdata, + 2 * lport->r_a_tov)) + fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** - * fc_rport_els_rtv_resp() - Request Timeout Value response handler - * @sp: current sequence in the RTV exchange - * @fp: response frame - * @rdata_arg: private remote port data + * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses + * @sp: The sequence the RTV was on + * @fp: The RTV response frame + * @rdata_arg: The remote port that sent the RTV response * * Many targets don't seem to support this. * @@ -865,8 +904,8 @@ err: } /** - * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer - * @rdata: private remote port data + * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request + * @rdata: The remote port to send the RTV request to * * Locking Note: The rport lock is expected to be held before calling * this routine. @@ -888,15 +927,16 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) } if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV, - fc_rport_rtv_resp, rdata, lport->e_d_tov)) - fc_rport_error_retry(rdata, fp); + fc_rport_rtv_resp, rdata, + 2 * lport->r_a_tov)) + fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** - * fc_rport_enter_logo() - Send Logout (LOGO) request to peer - * @rdata: private remote port data + * fc_rport_enter_logo() - Send a logout (LOGO) request + * @rdata: The remote port to send the LOGO request to * * Locking Note: The rport lock is expected to be held before calling * this routine. @@ -918,24 +958,25 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) } if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, - fc_rport_logo_resp, rdata, lport->e_d_tov)) - fc_rport_error_retry(rdata, fp); + fc_rport_logo_resp, rdata, + 2 * lport->r_a_tov)) + fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** - * fc_rport_els_adisc_resp() - Address Discovery response handler - * @sp: current sequence in the ADISC exchange - * @fp: response frame - * @rdata_arg: remote port private. + * fc_rport_els_adisc_resp() - Handler for Address Discovery (ADISC) responses + * @sp: The sequence the ADISC response was on + * @fp: The ADISC response frame + * @rdata_arg: The remote port that sent the ADISC response * * Locking Note: This function will be called without the rport lock * held, but it will lock, call an _enter_* function or fc_rport_error * and then unlock the rport. */ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp, - void *rdata_arg) + void *rdata_arg) { struct fc_rport_priv *rdata = rdata_arg; struct fc_els_adisc *adisc; @@ -983,8 +1024,8 @@ err: } /** - * fc_rport_enter_adisc() - Send Address Discover (ADISC) request to peer - * @rdata: remote port private data + * fc_rport_enter_adisc() - Send Address Discover (ADISC) request + * @rdata: The remote port to send the ADISC request to * * Locking Note: The rport lock is expected to be held before calling * this routine. @@ -1005,17 +1046,18 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) return; } if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC, - fc_rport_adisc_resp, rdata, lport->e_d_tov)) - fc_rport_error_retry(rdata, fp); + fc_rport_adisc_resp, rdata, + 2 * lport->r_a_tov)) + fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); } /** - * fc_rport_recv_adisc_req() - Handle incoming Address Discovery (ADISC) Request - * @rdata: remote port private - * @sp: current sequence in the ADISC exchange - * @in_fp: ADISC request frame + * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests + * @rdata: The remote port that sent the ADISC request + * @sp: The sequence the ADISC request was on + * @in_fp: The ADISC request frame * * Locking Note: Called with the lport and rport locks held. */ @@ -1056,10 +1098,82 @@ drop: } /** - * fc_rport_recv_els_req() - handle a validated ELS request. - * @lport: Fibre Channel local port - * @sp: current sequence in the PLOGI exchange - * @fp: response frame + * fc_rport_recv_rls_req() - Handle received Read Link Status request + * @rdata: The remote port that sent the RLS request + * @sp: The sequence that the RLS was on + * @rx_fp: The PRLI request frame + * + * Locking Note: The rport lock is expected to be held before calling + * this function. + */ +static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, + struct fc_seq *sp, struct fc_frame *rx_fp) + +{ + struct fc_lport *lport = rdata->local_port; + struct fc_frame *fp; + struct fc_exch *ep = fc_seq_exch(sp); + struct fc_els_rls *rls; + struct fc_els_rls_resp *rsp; + struct fc_els_lesb *lesb; + struct fc_seq_els_data rjt_data; + struct fc_host_statistics *hst; + u32 f_ctl; + + FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n", + fc_rport_state(rdata)); + + rls = fc_frame_payload_get(rx_fp, sizeof(*rls)); + if (!rls) { + rjt_data.reason = ELS_RJT_PROT; + rjt_data.explan = ELS_EXPL_INV_LEN; + goto out_rjt; + } + + fp = fc_frame_alloc(lport, sizeof(*rsp)); + if (!fp) { + rjt_data.reason = ELS_RJT_UNAB; + rjt_data.explan = ELS_EXPL_INSUF_RES; + goto out_rjt; + } + + rsp = fc_frame_payload_get(fp, sizeof(*rsp)); + memset(rsp, 0, sizeof(*rsp)); + rsp->rls_cmd = ELS_LS_ACC; + lesb = &rsp->rls_lesb; + if (lport->tt.get_lesb) { + /* get LESB from LLD if it supports it */ + lport->tt.get_lesb(lport, lesb); + } else { + fc_get_host_stats(lport->host); + hst = &lport->host_stats; + lesb->lesb_link_fail = htonl(hst->link_failure_count); + lesb->lesb_sync_loss = htonl(hst->loss_of_sync_count); + lesb->lesb_sig_loss = htonl(hst->loss_of_signal_count); + lesb->lesb_prim_err = htonl(hst->prim_seq_protocol_err_count); + lesb->lesb_inv_word = htonl(hst->invalid_tx_word_count); + lesb->lesb_inv_crc = htonl(hst->invalid_crc_count); + } + + sp = lport->tt.seq_start_next(sp); + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, + FC_TYPE_ELS, f_ctl, 0); + lport->tt.seq_send(lport, sp, fp); + goto out; + +out_rjt: + rjt_data.fp = NULL; + lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); +out: + fc_frame_free(rx_fp); +} + +/** + * fc_rport_recv_els_req() - Handler for validated ELS requests + * @lport: The local port that received the ELS request + * @sp: The sequence that the ELS request was on + * @fp: The ELS request frame * * Handle incoming ELS requests that require port login. * The ELS opcode has already been validated by the caller. @@ -1117,6 +1231,9 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, els_data.fp = fp; lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data); break; + case ELS_RLS: + fc_rport_recv_rls_req(rdata, sp, fp); + break; default: fc_frame_free(fp); /* can't happen */ break; @@ -1131,10 +1248,10 @@ reject: } /** - * fc_rport_recv_req() - Handle a received ELS request from a rport - * @sp: current sequence in the PLOGI exchange - * @fp: response frame - * @lport: Fibre Channel local port + * fc_rport_recv_req() - Handler for requests + * @sp: The sequence the request was on + * @fp: The request frame + * @lport: The local port that received the request * * Locking Note: Called with the lport lock held. */ @@ -1161,6 +1278,7 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, case ELS_ADISC: case ELS_RRQ: case ELS_REC: + case ELS_RLS: fc_rport_recv_els_req(lport, sp, fp); break; default: @@ -1174,10 +1292,10 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, } /** - * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request - * @lport: local port - * @sp: current sequence in the PLOGI exchange - * @fp: PLOGI request frame + * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests + * @lport: The local port that received the PLOGI request + * @sp: The sequence that the PLOGI request was on + * @rx_fp: The PLOGI request frame * * Locking Note: The rport lock is held before calling this function. */ @@ -1248,6 +1366,7 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, } break; case RPORT_ST_PRLI: + case RPORT_ST_RTV: case RPORT_ST_READY: case RPORT_ST_ADISC: FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d " @@ -1255,11 +1374,14 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, /* XXX TBD - should reset */ break; case RPORT_ST_DELETE: - default: - FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n", - rdata->rp_state); - fc_frame_free(rx_fp); - goto out; + case RPORT_ST_LOGO: + case RPORT_ST_RESTART: + FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n", + fc_rport_state(rdata)); + mutex_unlock(&rdata->rp_mutex); + rjt_data.reason = ELS_RJT_BUSY; + rjt_data.explan = ELS_EXPL_NONE; + goto reject; } /* @@ -1295,10 +1417,10 @@ reject: } /** - * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request - * @rdata: private remote port data - * @sp: current sequence in the PRLI exchange - * @fp: PRLI request frame + * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests + * @rdata: The remote port that sent the PRLI request + * @sp: The sequence that the PRLI was on + * @rx_fp: The PRLI request frame * * Locking Note: The rport lock is exected to be held before calling * this function. @@ -1402,7 +1524,7 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, break; case FC_TYPE_FCP: fcp_parm = ntohl(rspp->spp_params); - if (fcp_parm * FCP_SPPF_RETRY) + if (fcp_parm & FCP_SPPF_RETRY) rdata->flags |= FC_RP_FLAGS_RETRY; rdata->supported_classes = FC_COS_CLASS3; if (fcp_parm & FCP_SPPF_INIT_FCN) @@ -1452,10 +1574,10 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, } /** - * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request - * @rdata: private remote port data - * @sp: current sequence in the PRLO exchange - * @fp: PRLO request frame + * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests + * @rdata: The remote port that sent the PRLO request + * @sp: The sequence that the PRLO was on + * @fp: The PRLO request frame * * Locking Note: The rport lock is exected to be held before calling * this function. @@ -1482,10 +1604,10 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, } /** - * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request - * @lport: local port. - * @sp: current sequence in the LOGO exchange - * @fp: LOGO request frame + * fc_rport_recv_logo_req() - Handler for logout (LOGO) requests + * @lport: The local port that received the LOGO request + * @sp: The sequence that the LOGO request was on + * @fp: The LOGO request frame * * Locking Note: The rport lock is exected to be held before calling * this function. @@ -1510,14 +1632,14 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", fc_rport_state(rdata)); + fc_rport_enter_delete(rdata, RPORT_EV_LOGO); + /* - * If the remote port was created due to discovery, - * log back in. It may have seen a stale RSCN about us. + * If the remote port was created due to discovery, set state + * to log back in. It may have seen a stale RSCN about us. */ - if (rdata->rp_state != RPORT_ST_DELETE && rdata->disc_id) - fc_rport_enter_plogi(rdata); - else - fc_rport_enter_delete(rdata, RPORT_EV_LOGO); + if (rdata->disc_id) + fc_rport_state_enter(rdata, RPORT_ST_RESTART); mutex_unlock(&rdata->rp_mutex); } else FC_RPORT_ID_DBG(lport, sid, @@ -1526,11 +1648,18 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, fc_frame_free(fp); } +/** + * fc_rport_flush_queue() - Flush the rport_event_queue + */ static void fc_rport_flush_queue(void) { flush_workqueue(rport_event_queue); } +/** + * fc_rport_init() - Initialize the remote port layer for a local port + * @lport: The local port to initialize the remote port layer for + */ int fc_rport_init(struct fc_lport *lport) { if (!lport->tt.rport_lookup) @@ -1558,25 +1687,33 @@ int fc_rport_init(struct fc_lport *lport) } EXPORT_SYMBOL(fc_rport_init); -int fc_setup_rport(void) +/** + * fc_setup_rport() - Initialize the rport_event_queue + */ +int fc_setup_rport() { rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); if (!rport_event_queue) return -ENOMEM; return 0; } -EXPORT_SYMBOL(fc_setup_rport); -void fc_destroy_rport(void) +/** + * fc_destroy_rport() - Destroy the rport_event_queue + */ +void fc_destroy_rport() { destroy_workqueue(rport_event_queue); } -EXPORT_SYMBOL(fc_destroy_rport); +/** + * fc_rport_terminate_io() - Stop all outstanding I/O on a remote port + * @rport: The remote port whose I/O should be terminated + */ void fc_rport_terminate_io(struct fc_rport *rport) { - struct fc_rport_libfc_priv *rp = rport->dd_data; - struct fc_lport *lport = rp->local_port; + struct fc_rport_libfc_priv *rpriv = rport->dd_data; + struct fc_lport *lport = rpriv->local_port; lport->tt.exch_mgr_reset(lport, 0, rport->port_id); lport->tt.exch_mgr_reset(lport, rport->port_id, 0); |