diff options
Diffstat (limited to 'drivers/scsi')
186 files changed, 49601 insertions, 8522 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 4a1f029c4fe..8d9dae89f06 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -830,6 +830,19 @@ config SCSI_GDTH To compile this driver as a module, choose M here: the module will be called gdth. +config SCSI_ISCI + tristate "Intel(R) C600 Series Chipset SAS Controller" + depends on PCI && SCSI + depends on X86 + # (temporary): known alpha quality driver + depends on EXPERIMENTAL + select SCSI_SAS_LIBSAS + ---help--- + This driver supports the 6Gb/s SAS capabilities of the storage + control unit found in the Intel(R) C600 series chipset. + + The experimental tag will be removed after the driver exits alpha + config SCSI_GENERIC_NCR5380 tristate "Generic NCR5380/53c400 SCSI PIO support" depends on ISA && SCSI diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 7ad0b8a79ae..3c08f5352b2 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -73,6 +73,7 @@ obj-$(CONFIG_SCSI_AACRAID) += aacraid/ obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/ obj-$(CONFIG_SCSI_PM8001) += pm8001/ +obj-$(CONFIG_SCSI_ISCI) += isci/ obj-$(CONFIG_SCSI_IPS) += ips.o obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index c5169f01c1c..f17c92cf808 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -422,10 +422,19 @@ MODULE_PARM_DESC(aha152x1, "parameters for second controller"); #ifdef __ISAPNP__ static struct isapnp_device_id id_table[] __devinitdata = { - { ISAPNP_ANY_ID, ISAPNP_ANY_ID, - ISAPNP_VENDOR('A','D','P'), ISAPNP_FUNCTION(0x1505), 0 }, - { ISAPNP_ANY_ID, ISAPNP_ANY_ID, - ISAPNP_VENDOR('A','D','P'), ISAPNP_FUNCTION(0x1530), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1502), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1505), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1510), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1515), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1520), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x2015), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1522), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x2215), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1530), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x3015), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1532), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x3215), 0 }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x6360), 0 }, { ISAPNP_DEVICE_SINGLE_END, } }; MODULE_DEVICE_TABLE(isapnp, id_table); diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index ea439f93ed8..2db79b469d9 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c @@ -892,6 +892,11 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) return 0; } +static void NCR5380_exit(struct Scsi_Host *instance) +{ + /* Empty, as we didn't schedule any delayed work */ +} + /* * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd, * void (*done)(Scsi_Cmnd *)) @@ -914,7 +919,6 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) { SETUP_HOSTDATA(cmd->device->host); Scsi_Cmnd *tmp; - int oldto; unsigned long flags; #if (NDEBUG & NDEBUG_NO_WRITE) diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 3e8658e2f15..04a154f87e3 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -730,6 +730,7 @@ int atari_scsi_release(struct Scsi_Host *sh) free_irq(IRQ_TT_MFP_SCSI, sh); if (atari_dma_buffer) atari_stram_free(atari_dma_buffer); + NCR5380_exit(sh); return 1; } diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 94b9a07845d..0a9bdfa3d93 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -215,73 +215,62 @@ unlock: static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) { struct beiscsi_hba *phba = data; + struct mgmt_session_info *boot_sess = &phba->boot_sess; + struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; char *str = buf; int rc; switch (type) { case ISCSI_BOOT_TGT_NAME: rc = sprintf(buf, "%.*s\n", - (int)strlen(phba->boot_sess.target_name), - (char *)&phba->boot_sess.target_name); + (int)strlen(boot_sess->target_name), + (char *)&boot_sess->target_name); break; case ISCSI_BOOT_TGT_IP_ADDR: - if (phba->boot_sess.conn_list[0].dest_ipaddr.ip_type == 0x1) + if (boot_conn->dest_ipaddr.ip_type == 0x1) rc = sprintf(buf, "%pI4\n", - (char *)&phba->boot_sess.conn_list[0]. - dest_ipaddr.ip_address); + (char *)&boot_conn->dest_ipaddr.ip_address); else rc = sprintf(str, "%pI6\n", - (char *)&phba->boot_sess.conn_list[0]. - dest_ipaddr.ip_address); + (char *)&boot_conn->dest_ipaddr.ip_address); break; case ISCSI_BOOT_TGT_PORT: - rc = sprintf(str, "%d\n", phba->boot_sess.conn_list[0]. - dest_port); + rc = sprintf(str, "%d\n", boot_conn->dest_port); break; case ISCSI_BOOT_TGT_CHAP_NAME: rc = sprintf(str, "%.*s\n", - phba->boot_sess.conn_list[0]. - negotiated_login_options.auth_data.chap. - target_chap_name_length, - (char *)&phba->boot_sess.conn_list[0]. - negotiated_login_options.auth_data.chap. - target_chap_name); + boot_conn->negotiated_login_options.auth_data.chap. + target_chap_name_length, + (char *)&boot_conn->negotiated_login_options. + auth_data.chap.target_chap_name); break; case ISCSI_BOOT_TGT_CHAP_SECRET: rc = sprintf(str, "%.*s\n", - phba->boot_sess.conn_list[0]. - negotiated_login_options.auth_data.chap. - target_secret_length, - (char *)&phba->boot_sess.conn_list[0]. - negotiated_login_options.auth_data.chap. - target_secret); - + boot_conn->negotiated_login_options.auth_data.chap. + target_secret_length, + (char *)&boot_conn->negotiated_login_options. + auth_data.chap.target_secret); break; case ISCSI_BOOT_TGT_REV_CHAP_NAME: rc = sprintf(str, "%.*s\n", - phba->boot_sess.conn_list[0]. - negotiated_login_options.auth_data.chap. - intr_chap_name_length, - (char *)&phba->boot_sess.conn_list[0]. - negotiated_login_options.auth_data.chap. - intr_chap_name); - + boot_conn->negotiated_login_options.auth_data.chap. + intr_chap_name_length, + (char *)&boot_conn->negotiated_login_options. + auth_data.chap.intr_chap_name); break; case ISCSI_BOOT_TGT_REV_CHAP_SECRET: - rc = sprintf(str, "%.*s\n", - phba->boot_sess.conn_list[0]. - negotiated_login_options.auth_data.chap. - intr_secret_length, - (char *)&phba->boot_sess.conn_list[0]. - negotiated_login_options.auth_data.chap. - intr_secret); + rc = sprintf(str, "%.*s\n", + boot_conn->negotiated_login_options.auth_data.chap. + intr_secret_length, + (char *)&boot_conn->negotiated_login_options. + auth_data.chap.intr_secret); break; case ISCSI_BOOT_TGT_FLAGS: - rc = sprintf(str, "2\n"); + rc = sprintf(str, "2\n"); break; case ISCSI_BOOT_TGT_NIC_ASSOC: - rc = sprintf(str, "0\n"); + rc = sprintf(str, "0\n"); break; default: rc = -ENOSYS; @@ -315,10 +304,10 @@ static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) switch (type) { case ISCSI_BOOT_ETH_FLAGS: - rc = sprintf(str, "2\n"); + rc = sprintf(str, "2\n"); break; case ISCSI_BOOT_ETH_INDEX: - rc = sprintf(str, "0\n"); + rc = sprintf(str, "0\n"); break; case ISCSI_BOOT_ETH_MAC: rc = beiscsi_get_macaddr(buf, phba); @@ -391,40 +380,6 @@ static mode_t beiscsi_eth_get_attr_visibility(void *data, int type) return rc; } -static int beiscsi_setup_boot_info(struct beiscsi_hba *phba) -{ - struct iscsi_boot_kobj *boot_kobj; - - phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); - if (!phba->boot_kset) - return -ENOMEM; - - /* get boot info using mgmt cmd */ - boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba, - beiscsi_show_boot_tgt_info, - beiscsi_tgt_get_attr_visibility); - if (!boot_kobj) - goto free_kset; - - boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba, - beiscsi_show_boot_ini_info, - beiscsi_ini_get_attr_visibility); - if (!boot_kobj) - goto free_kset; - - boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba, - beiscsi_show_boot_eth_info, - beiscsi_eth_get_attr_visibility); - if (!boot_kobj) - goto free_kset; - return 0; - -free_kset: - if (phba->boot_kset) - iscsi_boot_destroy_kset(phba->boot_kset); - return -ENOMEM; -} - /*------------------- PCI Driver operations and data ----------------- */ static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, @@ -483,14 +438,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) if (iscsi_host_add(shost, &phba->pcidev->dev)) goto free_devices; - if (beiscsi_setup_boot_info(phba)) - /* - * log error but continue, because we may not be using - * iscsi boot. - */ - shost_printk(KERN_ERR, phba->shost, "Could not set up " - "iSCSI boot info."); - return phba; free_devices: @@ -3511,6 +3458,7 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba) unsigned int tag, wrb_num; unsigned short status, extd_status; struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; + int ret = -ENOMEM; tag = beiscsi_get_boot_target(phba); if (!tag) { @@ -3535,8 +3483,7 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba) boot_resp = embedded_payload(wrb); if (boot_resp->boot_session_handle < 0) { - printk(KERN_ERR "No Boot Session for this pci_func," - "session Hndl = %d\n", boot_resp->boot_session_handle); + shost_printk(KERN_INFO, phba->shost, "No Boot Session.\n"); return -ENXIO; } @@ -3574,14 +3521,70 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba) wrb = queue_get_wrb(mccq, wrb_num); free_mcc_tag(&phba->ctrl, tag); session_resp = nonemb_cmd.va ; + memcpy(&phba->boot_sess, &session_resp->session_info, sizeof(struct mgmt_session_info)); - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, - nonemb_cmd.va, nonemb_cmd.dma); - return 0; + ret = 0; + boot_freemem: pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); + return ret; +} + +static void beiscsi_boot_release(void *data) +{ + struct beiscsi_hba *phba = data; + + scsi_host_put(phba->shost); +} + +static int beiscsi_setup_boot_info(struct beiscsi_hba *phba) +{ + struct iscsi_boot_kobj *boot_kobj; + + /* get boot info using mgmt cmd */ + if (beiscsi_get_boot_info(phba)) + /* Try to see if we can carry on without this */ + return 0; + + phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); + if (!phba->boot_kset) + return -ENOMEM; + + /* get a ref because the show function will ref the phba */ + if (!scsi_host_get(phba->shost)) + goto free_kset; + boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba, + beiscsi_show_boot_tgt_info, + beiscsi_tgt_get_attr_visibility, + beiscsi_boot_release); + if (!boot_kobj) + goto put_shost; + + if (!scsi_host_get(phba->shost)) + goto free_kset; + boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba, + beiscsi_show_boot_ini_info, + beiscsi_ini_get_attr_visibility, + beiscsi_boot_release); + if (!boot_kobj) + goto put_shost; + + if (!scsi_host_get(phba->shost)) + goto free_kset; + boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba, + beiscsi_show_boot_eth_info, + beiscsi_eth_get_attr_visibility, + beiscsi_boot_release); + if (!boot_kobj) + goto put_shost; + return 0; + +put_shost: + scsi_host_put(phba->shost); +free_kset: + iscsi_boot_destroy_kset(phba->boot_kset); return -ENOMEM; } @@ -3963,11 +3966,10 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, } memcpy(&io_task->cmd_bhs->iscsi_data_pdu. dw[offsetof(struct amap_pdu_data_out, lun) / 32], - io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun)); + &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun)); AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, - cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr. - lun[0])); + cpu_to_be16(*(unsigned short *)&io_task->cmd_bhs->iscsi_hdr.lun)); AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, io_task->pwrb_handle->wrb_index); @@ -4150,8 +4152,7 @@ static void beiscsi_remove(struct pci_dev *pcidev) phba->ctrl.mbox_mem_alloced.size, phba->ctrl.mbox_mem_alloced.va, phba->ctrl.mbox_mem_alloced.dma); - if (phba->boot_kset) - iscsi_boot_destroy_kset(phba->boot_kset); + iscsi_boot_destroy_kset(phba->boot_kset); iscsi_host_remove(phba->shost); pci_dev_put(phba->pcidev); iscsi_host_free(phba->shost); @@ -4310,11 +4311,15 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, goto free_blkenbld; } hwi_enable_intr(phba); - ret = beiscsi_get_boot_info(phba); - if (ret < 0) { - shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" - "No Boot Devices !!!!!\n"); - } + + if (beiscsi_setup_boot_info(phba)) + /* + * log error but continue, because we may not be using + * iscsi boot. + */ + shost_printk(KERN_ERR, phba->shost, "Could not set up " + "iSCSI boot info."); + SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n"); return 0; diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 081c171a1ed..5ce5170254c 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -397,7 +397,7 @@ struct amap_pdu_data_out { }; struct be_cmd_bhs { - struct iscsi_cmd iscsi_hdr; + struct iscsi_scsi_req iscsi_hdr; unsigned char pad1[16]; struct pdu_data_out iscsi_data_pdu; unsigned char pad2[BE_SENSE_INFO_SIZE - @@ -428,7 +428,7 @@ struct be_nonio_bhs { }; struct be_status_bhs { - struct iscsi_cmd iscsi_hdr; + struct iscsi_scsi_req iscsi_hdr; unsigned char pad1[16]; /** * The plus 2 below is to hold the sense info length that gets diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile index 4ce6f494232..475cf925d5e 100644 --- a/drivers/scsi/bfa/Makefile +++ b/drivers/scsi/bfa/Makefile @@ -1,6 +1,6 @@ obj-$(CONFIG_SCSI_BFA_FC) := bfa.o -bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o +bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o bfad_bsg.o bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h index 7be6b5a8114..a796de93505 100644 --- a/drivers/scsi/bfa/bfa.h +++ b/drivers/scsi/bfa/bfa.h @@ -27,7 +27,7 @@ struct bfa_s; typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m); -typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete); +typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status); /* * Interrupt message handlers @@ -54,7 +54,8 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m); ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \ + bfa_reqq_pi((__bfa), (__reqq))))) -#define bfa_reqq_produce(__bfa, __reqq) do { \ +#define bfa_reqq_produce(__bfa, __reqq, __mh) do { \ + (__mh).mtag.h2i.qid = (__bfa)->iocfc.hw_qid[__reqq];\ (__bfa)->iocfc.req_cq_pi[__reqq]++; \ (__bfa)->iocfc.req_cq_pi[__reqq] &= \ ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \ @@ -76,16 +77,6 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m); } while (0) /* - * Queue element to wait for room in request queue. FIFO order is - * maintained when fullfilling requests. - */ -struct bfa_reqq_wait_s { - struct list_head qe; - void (*qresume) (void *cbarg); - void *cbarg; -}; - -/* * Circular queue usage assignments */ enum { @@ -128,21 +119,10 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), #define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe) - -/* - * Generic BFA callback element. - */ -struct bfa_cb_qe_s { - struct list_head qe; - bfa_cb_cbfn_t cbfn; - bfa_boolean_t once; - u32 rsvd; - void *cbarg; -}; - #define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \ (__hcb_qe)->cbfn = (__cbfn); \ (__hcb_qe)->cbarg = (__cbarg); \ + (__hcb_qe)->pre_rmv = BFA_FALSE; \ list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \ } while (0) @@ -157,6 +137,11 @@ struct bfa_cb_qe_s { } \ } while (0) +#define bfa_cb_queue_status(__bfa, __hcb_qe, __status) do { \ + (__hcb_qe)->fw_status = (__status); \ + list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \ +} while (0) + #define bfa_cb_queue_done(__hcb_qe) do { \ (__hcb_qe)->once = BFA_FALSE; \ } while (0) @@ -172,44 +157,14 @@ struct bfa_pciid_s { extern char bfa_version[]; -/* - * BFA memory resources - */ -enum bfa_mem_type { - BFA_MEM_TYPE_KVA = 1, /* Kernel Virtual Memory *(non-dma-able) */ - BFA_MEM_TYPE_DMA = 2, /* DMA-able memory */ - BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA, -}; - -struct bfa_mem_elem_s { - enum bfa_mem_type mem_type; /* see enum bfa_mem_type */ - u32 mem_len; /* Total Length in Bytes */ - u8 *kva; /* kernel virtual address */ - u64 dma; /* dma address if DMA memory */ - u8 *kva_curp; /* kva allocation cursor */ - u64 dma_curp; /* dma allocation cursor */ -}; - -struct bfa_meminfo_s { - struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX]; -}; -#define bfa_meminfo_kva(_m) \ - ((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp) -#define bfa_meminfo_dma_virt(_m) \ - ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp) -#define bfa_meminfo_dma_phys(_m) \ - ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp) - struct bfa_iocfc_regs_s { void __iomem *intr_status; void __iomem *intr_mask; void __iomem *cpe_q_pi[BFI_IOC_MAX_CQS]; void __iomem *cpe_q_ci[BFI_IOC_MAX_CQS]; - void __iomem *cpe_q_depth[BFI_IOC_MAX_CQS]; void __iomem *cpe_q_ctrl[BFI_IOC_MAX_CQS]; void __iomem *rme_q_ci[BFI_IOC_MAX_CQS]; void __iomem *rme_q_pi[BFI_IOC_MAX_CQS]; - void __iomem *rme_q_depth[BFI_IOC_MAX_CQS]; void __iomem *rme_q_ctrl[BFI_IOC_MAX_CQS]; }; @@ -229,27 +184,57 @@ struct bfa_msix_s { struct bfa_hwif_s { void (*hw_reginit)(struct bfa_s *bfa); void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq); - void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq); + void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci); void (*hw_msix_init)(struct bfa_s *bfa, int nvecs); - void (*hw_msix_install)(struct bfa_s *bfa); + void (*hw_msix_ctrl_install)(struct bfa_s *bfa); + void (*hw_msix_queue_install)(struct bfa_s *bfa); void (*hw_msix_uninstall)(struct bfa_s *bfa); void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix); void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, u32 *maxvec); void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start, u32 *end); + int cpe_vec_q0; + int rme_vec_q0; }; typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status); +struct bfa_faa_cbfn_s { + bfa_cb_iocfc_t faa_cbfn; + void *faa_cbarg; +}; + +#define BFA_FAA_ENABLED 1 +#define BFA_FAA_DISABLED 2 + +/* + * FAA attributes + */ +struct bfa_faa_attr_s { + wwn_t faa; + u8 faa_state; + u8 pwwn_source; + u8 rsvd[6]; +}; + +struct bfa_faa_args_s { + struct bfa_faa_attr_s *faa_attr; + struct bfa_faa_cbfn_s faa_cb; + u8 faa_state; + bfa_boolean_t busy; +}; + struct bfa_iocfc_s { struct bfa_s *bfa; struct bfa_iocfc_cfg_s cfg; int action; u32 req_cq_pi[BFI_IOC_MAX_CQS]; u32 rsp_cq_ci[BFI_IOC_MAX_CQS]; + u8 hw_qid[BFI_IOC_MAX_CQS]; struct bfa_cb_qe_s init_hcb_qe; struct bfa_cb_qe_s stop_hcb_qe; struct bfa_cb_qe_s dis_hcb_qe; + struct bfa_cb_qe_s en_hcb_qe; struct bfa_cb_qe_s stats_hcb_qe; bfa_boolean_t cfgdone; @@ -257,7 +242,6 @@ struct bfa_iocfc_s { struct bfi_iocfc_cfg_s *cfginfo; struct bfa_dma_s cfgrsp_dma; struct bfi_iocfc_cfgrsp_s *cfgrsp; - struct bfi_iocfc_cfg_reply_s *cfg_reply; struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS]; struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS]; struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS]; @@ -267,18 +251,40 @@ struct bfa_iocfc_s { bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */ void *updateq_cbarg; /* bios callback arg */ u32 intr_mask; + struct bfa_faa_args_s faa_args; + struct bfa_mem_dma_s ioc_dma; + struct bfa_mem_dma_s iocfc_dma; + struct bfa_mem_dma_s reqq_dma[BFI_IOC_MAX_CQS]; + struct bfa_mem_dma_s rspq_dma[BFI_IOC_MAX_CQS]; + struct bfa_mem_kva_s kva_seg; }; -#define bfa_lpuid(__bfa) \ - bfa_ioc_portid(&(__bfa)->ioc) +#define BFA_MEM_IOC_DMA(_bfa) (&((_bfa)->iocfc.ioc_dma)) +#define BFA_MEM_IOCFC_DMA(_bfa) (&((_bfa)->iocfc.iocfc_dma)) +#define BFA_MEM_REQQ_DMA(_bfa, _qno) (&((_bfa)->iocfc.reqq_dma[(_qno)])) +#define BFA_MEM_RSPQ_DMA(_bfa, _qno) (&((_bfa)->iocfc.rspq_dma[(_qno)])) +#define BFA_MEM_IOCFC_KVA(_bfa) (&((_bfa)->iocfc.kva_seg)) + +#define bfa_fn_lpu(__bfa) \ + bfi_fn_lpu(bfa_ioc_pcifn(&(__bfa)->ioc), bfa_ioc_portid(&(__bfa)->ioc)) #define bfa_msix_init(__bfa, __nvecs) \ ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs)) -#define bfa_msix_install(__bfa) \ - ((__bfa)->iocfc.hwif.hw_msix_install(__bfa)) +#define bfa_msix_ctrl_install(__bfa) \ + ((__bfa)->iocfc.hwif.hw_msix_ctrl_install(__bfa)) +#define bfa_msix_queue_install(__bfa) \ + ((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa)) #define bfa_msix_uninstall(__bfa) \ ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa)) -#define bfa_isr_mode_set(__bfa, __msix) \ - ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix)) +#define bfa_isr_rspq_ack(__bfa, __queue, __ci) \ + ((__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue, __ci)) +#define bfa_isr_reqq_ack(__bfa, __queue) do { \ + if ((__bfa)->iocfc.hwif.hw_reqq_ack) \ + (__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue); \ +} while (0) +#define bfa_isr_mode_set(__bfa, __msix) do { \ + if ((__bfa)->iocfc.hwif.hw_isr_mode_set) \ + (__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix); \ +} while (0) #define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \ ((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \ __nvecs, __maxvec)) @@ -290,17 +296,17 @@ struct bfa_iocfc_s { /* * FC specific IOC functions. */ -void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, - u32 *dm_len); +void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, + struct bfa_s *bfa); void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev); void bfa_iocfc_init(struct bfa_s *bfa); void bfa_iocfc_start(struct bfa_s *bfa); void bfa_iocfc_stop(struct bfa_s *bfa); void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg); -void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa); +void bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa); bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa); void bfa_iocfc_reset_queues(struct bfa_s *bfa); @@ -310,10 +316,10 @@ void bfa_msix_rspq(struct bfa_s *bfa, int vec); void bfa_msix_lpu_err(struct bfa_s *bfa, int vec); void bfa_hwcb_reginit(struct bfa_s *bfa); -void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq); -void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq); +void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci); void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs); -void bfa_hwcb_msix_install(struct bfa_s *bfa); +void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa); +void bfa_hwcb_msix_queue_install(struct bfa_s *bfa); void bfa_hwcb_msix_uninstall(struct bfa_s *bfa); void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, @@ -321,10 +327,13 @@ void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end); void bfa_hwct_reginit(struct bfa_s *bfa); +void bfa_hwct2_reginit(struct bfa_s *bfa); void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq); -void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq); +void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci); +void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci); void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs); -void bfa_hwct_msix_install(struct bfa_s *bfa); +void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa); +void bfa_hwct_msix_queue_install(struct bfa_s *bfa); void bfa_hwct_msix_uninstall(struct bfa_s *bfa); void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, @@ -373,11 +382,28 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, #define bfa_get_fw_clock_res(__bfa) \ ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res) +/* + * lun mask macros return NULL when min cfg is enabled and there is + * no memory allocated for lunmask. + */ +#define bfa_get_lun_mask(__bfa) \ + ((&(__bfa)->modules.dconf_mod)->min_cfg) ? NULL : \ + (&(BFA_DCONF_MOD(__bfa)->dconf->lun_mask)) + +#define bfa_get_lun_mask_list(_bfa) \ + ((&(_bfa)->modules.dconf_mod)->min_cfg) ? NULL : \ + (bfa_get_lun_mask(_bfa)->lun_list) + +#define bfa_get_lun_mask_status(_bfa) \ + (((&(_bfa)->modules.dconf_mod)->min_cfg) \ + ? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status)) + void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids); void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg); void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg); void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo); + struct bfa_meminfo_s *meminfo, + struct bfa_s *bfa); void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev); @@ -402,7 +428,22 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa, void bfa_iocfc_enable(struct bfa_s *bfa); void bfa_iocfc_disable(struct bfa_s *bfa); +void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status); #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout) +struct bfa_cb_pending_q_s { + struct bfa_cb_qe_s hcb_qe; + void *data; /* Driver buffer */ +}; + +/* Common macros to operate on pending stats/attr apis */ +#define bfa_pending_q_init(__qe, __cbfn, __cbarg, __data) do { \ + bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \ + (__qe)->hcb_qe.cbfn = (__cbfn); \ + (__qe)->hcb_qe.cbarg = (__cbarg); \ + (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \ + (__qe)->data = (__data); \ +} while (0) + #endif /* __BFA_H__ */ diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 91838c51fb7..4bd546bcc24 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -17,7 +17,7 @@ #include "bfad_drv.h" #include "bfa_modules.h" -#include "bfi_ctreg.h" +#include "bfi_reg.h" BFA_TRC_FILE(HAL, CORE); @@ -25,13 +25,15 @@ BFA_TRC_FILE(HAL, CORE); * BFA module list terminated by NULL */ static struct bfa_module_s *hal_mods[] = { + &hal_mod_fcdiag, &hal_mod_sgpg, &hal_mod_fcport, &hal_mod_fcxp, &hal_mod_lps, &hal_mod_uf, &hal_mod_rport, - &hal_mod_fcpim, + &hal_mod_fcp, + &hal_mod_dconf, NULL }; @@ -41,7 +43,7 @@ static struct bfa_module_s *hal_mods[] = { static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { bfa_isr_unhandled, /* NONE */ bfa_isr_unhandled, /* BFI_MC_IOC */ - bfa_isr_unhandled, /* BFI_MC_DIAG */ + bfa_fcdiag_intr, /* BFI_MC_DIAG */ bfa_isr_unhandled, /* BFI_MC_FLASH */ bfa_isr_unhandled, /* BFI_MC_CEE */ bfa_fcport_isr, /* BFI_MC_FCPORT */ @@ -51,7 +53,7 @@ static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { bfa_fcxp_isr, /* BFI_MC_FCXP */ bfa_lps_isr, /* BFI_MC_LPS */ bfa_rport_isr, /* BFI_MC_RPORT */ - bfa_itnim_isr, /* BFI_MC_ITNIM */ + bfa_itn_isr, /* BFI_MC_ITN */ bfa_isr_unhandled, /* BFI_MC_IOIM_READ */ bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */ bfa_isr_unhandled, /* BFI_MC_IOIM_IO */ @@ -89,23 +91,78 @@ static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { static void -bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi) +bfa_com_port_attach(struct bfa_s *bfa) { struct bfa_port_s *port = &bfa->modules.port; - u32 dm_len; - u8 *dm_kva; - u64 dm_pa; + struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); - dm_len = bfa_port_meminfo(); - dm_kva = bfa_meminfo_dma_virt(mi); - dm_pa = bfa_meminfo_dma_phys(mi); - - memset(port, 0, sizeof(struct bfa_port_s)); bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod); - bfa_port_mem_claim(port, dm_kva, dm_pa); + bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp); +} + +/* + * ablk module attach + */ +static void +bfa_com_ablk_attach(struct bfa_s *bfa) +{ + struct bfa_ablk_s *ablk = &bfa->modules.ablk; + struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); + + bfa_ablk_attach(ablk, &bfa->ioc); + bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp); +} + +static void +bfa_com_cee_attach(struct bfa_s *bfa) +{ + struct bfa_cee_s *cee = &bfa->modules.cee; + struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); + + cee->trcmod = bfa->trcmod; + bfa_cee_attach(cee, &bfa->ioc, bfa); + bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp); +} + +static void +bfa_com_sfp_attach(struct bfa_s *bfa) +{ + struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa); + struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); + + bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod); + bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp); +} + +static void +bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) +{ + struct bfa_flash_s *flash = BFA_FLASH(bfa); + struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); + + bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg); + bfa_flash_memclaim(flash, flash_dma->kva_curp, + flash_dma->dma_curp, mincfg); +} + +static void +bfa_com_diag_attach(struct bfa_s *bfa) +{ + struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa); + struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); + + bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod); + bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp); +} + +static void +bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) +{ + struct bfa_phy_s *phy = BFA_PHY(bfa); + struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); - bfa_meminfo_dma_virt(mi) = dm_kva + dm_len; - bfa_meminfo_dma_phys(mi) = dm_pa + dm_len; + bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg); + bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg); } /* @@ -122,6 +179,7 @@ enum { BFA_IOCFC_ACT_INIT = 1, BFA_IOCFC_ACT_STOP = 2, BFA_IOCFC_ACT_DISABLE = 3, + BFA_IOCFC_ACT_ENABLE = 4, }; #define DEF_CFG_NUM_FABRICS 1 @@ -173,10 +231,88 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid) } } +static inline void +bfa_isr_rspq(struct bfa_s *bfa, int qid) +{ + struct bfi_msg_s *m; + u32 pi, ci; + struct list_head *waitq; + + ci = bfa_rspq_ci(bfa, qid); + pi = bfa_rspq_pi(bfa, qid); + + while (ci != pi) { + m = bfa_rspq_elem(bfa, qid, ci); + WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX); + + bfa_isrs[m->mhdr.msg_class] (bfa, m); + CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); + } + + /* + * acknowledge RME completions and update CI + */ + bfa_isr_rspq_ack(bfa, qid, ci); + + /* + * Resume any pending requests in the corresponding reqq. + */ + waitq = bfa_reqq(bfa, qid); + if (!list_empty(waitq)) + bfa_reqq_resume(bfa, qid); +} + +static inline void +bfa_isr_reqq(struct bfa_s *bfa, int qid) +{ + struct list_head *waitq; + + bfa_isr_reqq_ack(bfa, qid); + + /* + * Resume any pending requests in the corresponding reqq. + */ + waitq = bfa_reqq(bfa, qid); + if (!list_empty(waitq)) + bfa_reqq_resume(bfa, qid); +} + void bfa_msix_all(struct bfa_s *bfa, int vec) { - bfa_intx(bfa); + u32 intr, qintr; + int queue; + + intr = readl(bfa->iocfc.bfa_regs.intr_status); + if (!intr) + return; + + /* + * RME completion queue interrupt + */ + qintr = intr & __HFN_INT_RME_MASK; + if (qintr && bfa->queue_process) { + for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) + bfa_isr_rspq(bfa, queue); + } + + intr &= ~qintr; + if (!intr) + return; + + /* + * CPE completion queue interrupt + */ + qintr = intr & __HFN_INT_CPE_MASK; + if (qintr && bfa->queue_process) { + for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) + bfa_isr_reqq(bfa, queue); + } + intr &= ~qintr; + if (!intr) + return; + + bfa_msix_lpu_err(bfa, intr); } bfa_boolean_t @@ -186,20 +322,19 @@ bfa_intx(struct bfa_s *bfa) int queue; intr = readl(bfa->iocfc.bfa_regs.intr_status); - if (!intr) - return BFA_FALSE; + + qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK); + if (qintr) + writel(qintr, bfa->iocfc.bfa_regs.intr_status); /* - * RME completion queue interrupt + * Unconditional RME completion queue interrupt */ - qintr = intr & __HFN_INT_RME_MASK; - writel(qintr, bfa->iocfc.bfa_regs.intr_status); - - for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { - if (intr & (__HFN_INT_RME_Q0 << queue)) - bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); + if (bfa->queue_process) { + for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) + bfa_isr_rspq(bfa, queue); } - intr &= ~qintr; + if (!intr) return BFA_TRUE; @@ -207,11 +342,9 @@ bfa_intx(struct bfa_s *bfa) * CPE completion queue interrupt */ qintr = intr & __HFN_INT_CPE_MASK; - writel(qintr, bfa->iocfc.bfa_regs.intr_status); - - for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { - if (intr & (__HFN_INT_CPE_Q0 << queue)) - bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); + if (qintr && bfa->queue_process) { + for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) + bfa_isr_reqq(bfa, queue); } intr &= ~qintr; if (!intr) @@ -225,32 +358,25 @@ bfa_intx(struct bfa_s *bfa) void bfa_isr_enable(struct bfa_s *bfa) { - u32 intr_unmask; + u32 umsk; int pci_func = bfa_ioc_pcifn(&bfa->ioc); bfa_trc(bfa, pci_func); - bfa_msix_install(bfa); - intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | - __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | - __HFN_INT_LL_HALT); - - if (pci_func == 0) - intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | - __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 | - __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | - __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | - __HFN_INT_MBOX_LPU0); - else - intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | - __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 | - __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | - __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | - __HFN_INT_MBOX_LPU1); - - writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status); - writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask); - bfa->iocfc.intr_mask = ~intr_unmask; + bfa_msix_ctrl_install(bfa); + + if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { + umsk = __HFN_INT_ERR_MASK_CT2; + umsk |= pci_func == 0 ? + __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2; + } else { + umsk = __HFN_INT_ERR_MASK; + umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK; + } + + writel(umsk, bfa->iocfc.bfa_regs.intr_status); + writel(~umsk, bfa->iocfc.bfa_regs.intr_mask); + bfa->iocfc.intr_mask = ~umsk; bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); } @@ -263,20 +389,9 @@ bfa_isr_disable(struct bfa_s *bfa) } void -bfa_msix_reqq(struct bfa_s *bfa, int qid) +bfa_msix_reqq(struct bfa_s *bfa, int vec) { - struct list_head *waitq; - - qid &= (BFI_IOC_MAX_CQS - 1); - - bfa->iocfc.hwif.hw_reqq_ack(bfa, qid); - - /* - * Resume any pending requests in the corresponding reqq. - */ - waitq = bfa_reqq(bfa, qid); - if (!list_empty(waitq)) - bfa_reqq_resume(bfa, qid); + bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0); } void @@ -290,57 +405,38 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) } void -bfa_msix_rspq(struct bfa_s *bfa, int qid) +bfa_msix_rspq(struct bfa_s *bfa, int vec) { - struct bfi_msg_s *m; - u32 pi, ci; - struct list_head *waitq; - - qid &= (BFI_IOC_MAX_CQS - 1); - - bfa->iocfc.hwif.hw_rspq_ack(bfa, qid); - - ci = bfa_rspq_ci(bfa, qid); - pi = bfa_rspq_pi(bfa, qid); - - if (bfa->rme_process) { - while (ci != pi) { - m = bfa_rspq_elem(bfa, qid, ci); - bfa_isrs[m->mhdr.msg_class] (bfa, m); - CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); - } - } - - /* - * update CI - */ - bfa_rspq_ci(bfa, qid) = pi; - writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]); - mmiowb(); - - /* - * Resume any pending requests in the corresponding reqq. - */ - waitq = bfa_reqq(bfa, qid); - if (!list_empty(waitq)) - bfa_reqq_resume(bfa, qid); + bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0); } void bfa_msix_lpu_err(struct bfa_s *bfa, int vec) { u32 intr, curr_value; + bfa_boolean_t lpu_isr, halt_isr, pss_isr; intr = readl(bfa->iocfc.bfa_regs.intr_status); - if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) - bfa_ioc_mbox_isr(&bfa->ioc); + if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { + halt_isr = intr & __HFN_INT_CPQ_HALT_CT2; + pss_isr = intr & __HFN_INT_ERR_PSS_CT2; + lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 | + __HFN_INT_MBOX_LPU1_CT2); + intr &= __HFN_INT_ERR_MASK_CT2; + } else { + halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ? + (intr & __HFN_INT_LL_HALT) : 0; + pss_isr = intr & __HFN_INT_ERR_PSS; + lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1); + intr &= __HFN_INT_ERR_MASK; + } - intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | - __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT); + if (lpu_isr) + bfa_ioc_mbox_isr(&bfa->ioc); if (intr) { - if (intr & __HFN_INT_LL_HALT) { + if (halt_isr) { /* * If LL_HALT bit is set then FW Init Halt LL Port * Register needs to be cleared as well so Interrupt @@ -351,7 +447,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec) writel(curr_value, bfa->ioc.ioc_regs.ll_halt); } - if (intr & __HFN_INT_ERR_PSS) { + if (pss_isr) { /* * ERR_PSS bit needs to be cleared as well in case * interrups are shared so driver's interrupt handler is @@ -359,7 +455,6 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec) */ curr_value = readl( bfa->ioc.ioc_regs.pss_err_status_reg); - curr_value &= __PSS_ERR_STATUS_SET; writel(curr_value, bfa->ioc.ioc_regs.pss_err_status_reg); } @@ -377,41 +472,6 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec) * BFA IOC private functions */ -static void -bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) -{ - int i, per_reqq_sz, per_rspq_sz; - - per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), - BFA_DMA_ALIGN_SZ); - per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), - BFA_DMA_ALIGN_SZ); - - /* - * Calculate CQ size - */ - for (i = 0; i < cfg->fwcfg.num_cqs; i++) { - *dm_len = *dm_len + per_reqq_sz; - *dm_len = *dm_len + per_rspq_sz; - } - - /* - * Calculate Shadow CI/PI size - */ - for (i = 0; i < cfg->fwcfg.num_cqs; i++) - *dm_len += (2 * BFA_CACHELINE_SZ); -} - -static void -bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) -{ - *dm_len += - BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); - *dm_len += - BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), - BFA_CACHELINE_SZ); -} - /* * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ */ @@ -433,8 +493,13 @@ bfa_iocfc_send_cfg(void *bfa_arg) /* * initialize IOC configuration info */ + cfg_info->single_msix_vec = 0; + if (bfa->msix.nvecs == 1) + cfg_info->single_msix_vec = 1; cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; cfg_info->num_cqs = cfg->fwcfg.num_cqs; + cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs); + cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs); bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); /* @@ -469,7 +534,7 @@ bfa_iocfc_send_cfg(void *bfa_arg) * dma map IOC configuration itself */ bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, - bfa_lpuid(bfa)); + bfa_fn_lpu(bfa)); bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, @@ -491,26 +556,40 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, /* * Initialize chip specific handlers. */ - if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) { + if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) { iocfc->hwif.hw_reginit = bfa_hwct_reginit; iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; - iocfc->hwif.hw_msix_install = bfa_hwct_msix_install; + iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install; + iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install; iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range; + iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT; + iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT; } else { iocfc->hwif.hw_reginit = bfa_hwcb_reginit; - iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack; + iocfc->hwif.hw_reqq_ack = NULL; iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; - iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install; + iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install; + iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install; iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range; + iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB + + bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; + iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB + + bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; + } + + if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) { + iocfc->hwif.hw_reginit = bfa_hwct2_reginit; + iocfc->hwif.hw_isr_mode_set = NULL; + iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack; } iocfc->hwif.hw_reginit(bfa); @@ -518,48 +597,42 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, } static void -bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo) +bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg) { - u8 *dm_kva; - u64 dm_pa; - int i, per_reqq_sz, per_rspq_sz; + u8 *dm_kva = NULL; + u64 dm_pa = 0; + int i, per_reqq_sz, per_rspq_sz, dbgsz; struct bfa_iocfc_s *iocfc = &bfa->iocfc; - int dbgsz; + struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); + struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); + struct bfa_mem_dma_s *reqq_dma, *rspq_dma; - dm_kva = bfa_meminfo_dma_virt(meminfo); - dm_pa = bfa_meminfo_dma_phys(meminfo); + /* First allocate dma memory for IOC */ + bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma), + bfa_mem_dma_phys(ioc_dma)); - /* - * First allocate dma memory for IOC. - */ - bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa); - dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); - dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); - - /* - * Claim DMA-able memory for the request/response queues and for shadow - * ci/pi registers - */ + /* Claim DMA-able memory for the request/response queues */ per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), - BFA_DMA_ALIGN_SZ); + BFA_DMA_ALIGN_SZ); per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), - BFA_DMA_ALIGN_SZ); + BFA_DMA_ALIGN_SZ); for (i = 0; i < cfg->fwcfg.num_cqs; i++) { - iocfc->req_cq_ba[i].kva = dm_kva; - iocfc->req_cq_ba[i].pa = dm_pa; - memset(dm_kva, 0, per_reqq_sz); - dm_kva += per_reqq_sz; - dm_pa += per_reqq_sz; - - iocfc->rsp_cq_ba[i].kva = dm_kva; - iocfc->rsp_cq_ba[i].pa = dm_pa; - memset(dm_kva, 0, per_rspq_sz); - dm_kva += per_rspq_sz; - dm_pa += per_rspq_sz; + reqq_dma = BFA_MEM_REQQ_DMA(bfa, i); + iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma); + iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma); + memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz); + + rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i); + iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma); + iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma); + memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz); } + /* Claim IOCFC dma memory - for shadow CI/PI */ + dm_kva = bfa_mem_dma_virt(iocfc_dma); + dm_pa = bfa_mem_dma_phys(iocfc_dma); + for (i = 0; i < cfg->fwcfg.num_cqs; i++) { iocfc->req_cq_shadow_ci[i].kva = dm_kva; iocfc->req_cq_shadow_ci[i].pa = dm_pa; @@ -572,36 +645,27 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg, dm_pa += BFA_CACHELINE_SZ; } - /* - * Claim DMA-able memory for the config info page - */ + /* Claim IOCFC dma memory - for the config info page */ bfa->iocfc.cfg_info.kva = dm_kva; bfa->iocfc.cfg_info.pa = dm_pa; bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); - /* - * Claim DMA-able memory for the config response - */ + /* Claim IOCFC dma memory - for the config response */ bfa->iocfc.cfgrsp_dma.kva = dm_kva; bfa->iocfc.cfgrsp_dma.pa = dm_pa; bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; - - dm_kva += - BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), - BFA_CACHELINE_SZ); + dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), + BFA_CACHELINE_SZ); dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), - BFA_CACHELINE_SZ); - - - bfa_meminfo_dma_virt(meminfo) = dm_kva; - bfa_meminfo_dma_phys(meminfo) = dm_pa; + BFA_CACHELINE_SZ); + /* Claim IOCFC kva memory */ dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0; if (dbgsz > 0) { - bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo)); - bfa_meminfo_kva(meminfo) += dbgsz; + bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc)); + bfa_mem_kva_curp(iocfc) += dbgsz; } } @@ -613,7 +677,9 @@ bfa_iocfc_start_submod(struct bfa_s *bfa) { int i; - bfa->rme_process = BFA_TRUE; + bfa->queue_process = BFA_TRUE; + for (i = 0; i < BFI_IOC_MAX_CQS; i++) + bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i)); for (i = 0; hal_mods[i]; i++) hal_mods[i]->start(bfa); @@ -637,7 +703,7 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) struct bfa_s *bfa = bfa_arg; if (complete) { - if (bfa->iocfc.cfgdone) + if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone) bfa_cb_init(bfa->bfad, BFA_STATUS_OK); else bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); @@ -660,6 +726,16 @@ bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) } static void +bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl) +{ + struct bfa_s *bfa = bfa_arg; + struct bfad_s *bfad = bfa->bfad; + + if (compl) + complete(&bfad->enable_comp); +} + +static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) { struct bfa_s *bfa = bfa_arg; @@ -669,6 +745,37 @@ bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) complete(&bfad->disable_comp); } +/** + * configure queue registers from firmware response + */ +static void +bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg) +{ + int i; + struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs; + void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); + + for (i = 0; i < BFI_IOC_MAX_CQS; i++) { + bfa->iocfc.hw_qid[i] = qreg->hw_qid[i]; + r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]); + r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]); + r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]); + r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]); + r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]); + r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]); + } +} + +static void +bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg) +{ + bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs); + bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs); + bfa_rport_res_recfg(bfa, fwcfg->num_rports); + bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs); + bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs); +} + /* * Update BFA configuration from firmware configuration. */ @@ -681,6 +788,7 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa) fwcfg->num_cqs = fwcfg->num_cqs; fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs); + fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs); fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs); fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs); fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); @@ -689,14 +797,35 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa) iocfc->cfgdone = BFA_TRUE; /* + * configure queue register offsets as learnt from firmware + */ + bfa_iocfc_qreg(bfa, &cfgrsp->qreg); + + /* + * Re-configure resources as learnt from Firmware + */ + bfa_iocfc_res_recfg(bfa, fwcfg); + + /* + * Install MSIX queue handlers + */ + bfa_msix_queue_install(bfa); + + /* * Configuration is complete - initialize/start submodules */ bfa_fcport_init(bfa); - if (iocfc->action == BFA_IOCFC_ACT_INIT) - bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa); - else + if (iocfc->action == BFA_IOCFC_ACT_INIT) { + if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE) + bfa_cb_queue(bfa, &iocfc->init_hcb_qe, + bfa_iocfc_init_cb, bfa); + } else { + if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE) + bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe, + bfa_iocfc_enable_cb, bfa); bfa_iocfc_start_submod(bfa); + } } void bfa_iocfc_reset_queues(struct bfa_s *bfa) @@ -711,6 +840,181 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa) } } +/* Fabric Assigned Address specific functions */ + +/* + * Check whether IOC is ready before sending command down + */ +static bfa_status_t +bfa_faa_validate_request(struct bfa_s *bfa) +{ + enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); + u32 card_type = bfa->ioc.attr->card_type; + + if (bfa_ioc_is_operational(&bfa->ioc)) { + if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type)) + return BFA_STATUS_FEATURE_NOT_SUPPORTED; + } else { + if (!bfa_ioc_is_acq_addr(&bfa->ioc)) + return BFA_STATUS_IOC_NON_OP; + } + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg) +{ + struct bfi_faa_en_dis_s faa_enable_req; + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + bfa_status_t status; + + iocfc->faa_args.faa_cb.faa_cbfn = cbfn; + iocfc->faa_args.faa_cb.faa_cbarg = cbarg; + + status = bfa_faa_validate_request(bfa); + if (status != BFA_STATUS_OK) + return status; + + if (iocfc->faa_args.busy == BFA_TRUE) + return BFA_STATUS_DEVBUSY; + + if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED) + return BFA_STATUS_FAA_ENABLED; + + if (bfa_fcport_is_trunk_enabled(bfa)) + return BFA_STATUS_ERROR_TRUNK_ENABLED; + + bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED); + iocfc->faa_args.busy = BFA_TRUE; + + memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s)); + bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC, + BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa)); + + bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req, + sizeof(struct bfi_faa_en_dis_s)); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, + void *cbarg) +{ + struct bfi_faa_en_dis_s faa_disable_req; + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + bfa_status_t status; + + iocfc->faa_args.faa_cb.faa_cbfn = cbfn; + iocfc->faa_args.faa_cb.faa_cbarg = cbarg; + + status = bfa_faa_validate_request(bfa); + if (status != BFA_STATUS_OK) + return status; + + if (iocfc->faa_args.busy == BFA_TRUE) + return BFA_STATUS_DEVBUSY; + + if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED) + return BFA_STATUS_FAA_DISABLED; + + bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED); + iocfc->faa_args.busy = BFA_TRUE; + + memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s)); + bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC, + BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa)); + + bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req, + sizeof(struct bfi_faa_en_dis_s)); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, + bfa_cb_iocfc_t cbfn, void *cbarg) +{ + struct bfi_faa_query_s faa_attr_req; + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + bfa_status_t status; + + iocfc->faa_args.faa_attr = attr; + iocfc->faa_args.faa_cb.faa_cbfn = cbfn; + iocfc->faa_args.faa_cb.faa_cbarg = cbarg; + + status = bfa_faa_validate_request(bfa); + if (status != BFA_STATUS_OK) + return status; + + if (iocfc->faa_args.busy == BFA_TRUE) + return BFA_STATUS_DEVBUSY; + + iocfc->faa_args.busy = BFA_TRUE; + memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s)); + bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC, + BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa)); + + bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req, + sizeof(struct bfi_faa_query_s)); + + return BFA_STATUS_OK; +} + +/* + * FAA enable response + */ +static void +bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc, + struct bfi_faa_en_dis_rsp_s *rsp) +{ + void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; + bfa_status_t status = rsp->status; + + WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); + + iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status); + iocfc->faa_args.busy = BFA_FALSE; +} + +/* + * FAA disable response + */ +static void +bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc, + struct bfi_faa_en_dis_rsp_s *rsp) +{ + void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; + bfa_status_t status = rsp->status; + + WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); + + iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status); + iocfc->faa_args.busy = BFA_FALSE; +} + +/* + * FAA query response + */ +static void +bfa_faa_query_reply(struct bfa_iocfc_s *iocfc, + bfi_faa_query_rsp_t *rsp) +{ + void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; + + if (iocfc->faa_args.faa_attr) { + iocfc->faa_args.faa_attr->faa = rsp->faa; + iocfc->faa_args.faa_attr->faa_state = rsp->faa_status; + iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source; + } + + WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); + + iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK); + iocfc->faa_args.busy = BFA_FALSE; +} + /* * IOC enable request is complete */ @@ -719,15 +1023,25 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) { struct bfa_s *bfa = bfa_arg; + if (status == BFA_STATUS_FAA_ACQ_ADDR) { + bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, + bfa_iocfc_init_cb, bfa); + return; + } + if (status != BFA_STATUS_OK) { bfa_isr_disable(bfa); if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, bfa); + else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE) + bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe, + bfa_iocfc_enable_cb, bfa); return; } bfa_iocfc_send_cfg(bfa); + bfa_dconf_modinit(bfa); } /* @@ -759,7 +1073,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; - bfa->rme_process = BFA_FALSE; + bfa->queue_process = BFA_FALSE; bfa_isr_disable(bfa); bfa_iocfc_disable_submod(bfa); @@ -786,15 +1100,47 @@ bfa_iocfc_reset_cbfn(void *bfa_arg) * Query IOC memory requirement information. */ void -bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, - u32 *dm_len) +bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, + struct bfa_s *bfa) { - /* dma memory for IOC */ - *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); + int q, per_reqq_sz, per_rspq_sz; + struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); + struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); + struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa); + u32 dm_len = 0; + + /* dma memory setup for IOC */ + bfa_mem_dma_setup(meminfo, ioc_dma, + BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ)); + + /* dma memory setup for REQ/RSP queues */ + per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), + BFA_DMA_ALIGN_SZ); + per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), + BFA_DMA_ALIGN_SZ); - bfa_iocfc_fw_cfg_sz(cfg, dm_len); - bfa_iocfc_cqs_sz(cfg, dm_len); - *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0; + for (q = 0; q < cfg->fwcfg.num_cqs; q++) { + bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q), + per_reqq_sz); + bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q), + per_rspq_sz); + } + + /* IOCFC dma memory - calculate Shadow CI/PI size */ + for (q = 0; q < cfg->fwcfg.num_cqs; q++) + dm_len += (2 * BFA_CACHELINE_SZ); + + /* IOCFC dma memory - calculate config info / rsp size */ + dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); + dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), + BFA_CACHELINE_SZ); + + /* dma memory setup for IOCFC */ + bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len); + + /* kva memory setup for IOCFC */ + bfa_mem_kva_setup(meminfo, iocfc_kva, + ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0)); } /* @@ -802,7 +1148,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, */ void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) + struct bfa_pcidev_s *pcidev) { int i; struct bfa_ioc_s *ioc = &bfa->ioc; @@ -815,17 +1161,11 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, ioc->trcmod = bfa->trcmod; bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); - /* - * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC. - */ - if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC) - bfa_ioc_set_fcmode(&bfa->ioc); - - bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC); + bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC); bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); - bfa_iocfc_mem_claim(bfa, cfg, meminfo); + bfa_iocfc_mem_claim(bfa, cfg); INIT_LIST_HEAD(&bfa->timer_mod.timer_q); INIT_LIST_HEAD(&bfa->comp_q); @@ -863,8 +1203,10 @@ bfa_iocfc_stop(struct bfa_s *bfa) { bfa->iocfc.action = BFA_IOCFC_ACT_STOP; - bfa->rme_process = BFA_FALSE; - bfa_ioc_disable(&bfa->ioc); + bfa->queue_process = BFA_FALSE; + bfa_dconf_modexit(bfa); + if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE) + bfa_ioc_disable(&bfa->ioc); } void @@ -879,12 +1221,22 @@ bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) switch (msg->mh.msg_id) { case BFI_IOCFC_I2H_CFG_REPLY: - iocfc->cfg_reply = &msg->cfg_reply; bfa_iocfc_cfgrsp(bfa); break; case BFI_IOCFC_I2H_UPDATEQ_RSP: iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); break; + case BFI_IOCFC_I2H_FAA_ENABLE_RSP: + bfa_faa_enable_reply(iocfc, + (struct bfi_faa_en_dis_rsp_s *)msg); + break; + case BFI_IOCFC_I2H_FAA_DISABLE_RSP: + bfa_faa_disable_reply(iocfc, + (struct bfi_faa_en_dis_rsp_s *)msg); + break; + case BFI_IOCFC_I2H_FAA_QUERY_RSP: + bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg); + break; default: WARN_ON(1); } @@ -926,7 +1278,7 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) return BFA_STATUS_DEVBUSY; bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, - bfa_lpuid(bfa)); + bfa_fn_lpu(bfa)); m->coalesce = iocfc->cfginfo->intr_attr.coalesce; m->delay = iocfc->cfginfo->intr_attr.delay; m->latency = iocfc->cfginfo->intr_attr.latency; @@ -934,17 +1286,17 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) bfa_trc(bfa, attr->delay); bfa_trc(bfa, attr->latency); - bfa_reqq_produce(bfa, BFA_REQQ_IOC); + bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh); return BFA_STATUS_OK; } void -bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa) +bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); - bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa); + bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa); } /* * Enable IOC after it is disabled. @@ -954,6 +1306,7 @@ bfa_iocfc_enable(struct bfa_s *bfa) { bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, "IOC Enable"); + bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE; bfa_ioc_enable(&bfa->ioc); } @@ -964,7 +1317,7 @@ bfa_iocfc_disable(struct bfa_s *bfa) "IOC Disable"); bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; - bfa->rme_process = BFA_FALSE; + bfa->queue_process = BFA_FALSE; bfa_ioc_disable(&bfa->ioc); } @@ -1033,33 +1386,49 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) * starting address for each block and provide the same * structure as input parameter to bfa_attach() call. * + * @param[in] bfa - pointer to the bfa structure, used while fetching the + * dma, kva memory information of the bfa sub-modules. + * * @return void * * Special Considerations: @note */ void -bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) +bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, + struct bfa_s *bfa) { int i; - u32 km_len = 0, dm_len = 0; + struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); + struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); + struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); + struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); + struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); + struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); + struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); WARN_ON((cfg == NULL) || (meminfo == NULL)); memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); - meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type = - BFA_MEM_TYPE_KVA; - meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type = - BFA_MEM_TYPE_DMA; - bfa_iocfc_meminfo(cfg, &km_len, &dm_len); - - for (i = 0; hal_mods[i]; i++) - hal_mods[i]->meminfo(cfg, &km_len, &dm_len); + /* Initialize the DMA & KVA meminfo queues */ + INIT_LIST_HEAD(&meminfo->dma_info.qe); + INIT_LIST_HEAD(&meminfo->kva_info.qe); - dm_len += bfa_port_meminfo(); + bfa_iocfc_meminfo(cfg, meminfo, bfa); - meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len; - meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; + for (i = 0; hal_mods[i]; i++) + hal_mods[i]->meminfo(cfg, meminfo, bfa); + + /* dma info setup */ + bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo()); + bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo()); + bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo()); + bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo()); + bfa_mem_dma_setup(meminfo, flash_dma, + bfa_flash_meminfo(cfg->drvcfg.min_cfg)); + bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo()); + bfa_mem_dma_setup(meminfo, phy_dma, + bfa_phy_meminfo(cfg->drvcfg.min_cfg)); } /* @@ -1092,28 +1461,46 @@ void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) { - int i; - struct bfa_mem_elem_s *melem; + int i; + struct bfa_mem_dma_s *dma_info, *dma_elem; + struct bfa_mem_kva_s *kva_info, *kva_elem; + struct list_head *dm_qe, *km_qe; bfa->fcs = BFA_FALSE; WARN_ON((cfg == NULL) || (meminfo == NULL)); - /* - * initialize all memory pointers for iterative allocation - */ - for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { - melem = meminfo->meminfo + i; - melem->kva_curp = melem->kva; - melem->dma_curp = melem->dma; + /* Initialize memory pointers for iterative allocation */ + dma_info = &meminfo->dma_info; + dma_info->kva_curp = dma_info->kva; + dma_info->dma_curp = dma_info->dma; + + kva_info = &meminfo->kva_info; + kva_info->kva_curp = kva_info->kva; + + list_for_each(dm_qe, &dma_info->qe) { + dma_elem = (struct bfa_mem_dma_s *) dm_qe; + dma_elem->kva_curp = dma_elem->kva; + dma_elem->dma_curp = dma_elem->dma; } - bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev); + list_for_each(km_qe, &kva_info->qe) { + kva_elem = (struct bfa_mem_kva_s *) km_qe; + kva_elem->kva_curp = kva_elem->kva; + } - for (i = 0; hal_mods[i]; i++) - hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev); + bfa_iocfc_attach(bfa, bfad, cfg, pcidev); - bfa_com_port_attach(bfa, meminfo); + for (i = 0; hal_mods[i]; i++) + hal_mods[i]->attach(bfa, bfad, cfg, pcidev); + + bfa_com_port_attach(bfa); + bfa_com_ablk_attach(bfa); + bfa_com_cee_attach(bfa); + bfa_com_sfp_attach(bfa); + bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg); + bfa_com_diag_attach(bfa); + bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg); } /* @@ -1152,10 +1539,17 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) struct list_head *qe; struct list_head *qen; struct bfa_cb_qe_s *hcb_qe; + bfa_cb_cbfn_status_t cbfn; list_for_each_safe(qe, qen, comp_q) { hcb_qe = (struct bfa_cb_qe_s *) qe; - hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); + if (hcb_qe->pre_rmv) { + /* qe is invalid after return, dequeue before cbfn() */ + list_del(qe); + cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn); + cbfn(hcb_qe->cbarg, hcb_qe->fw_status); + } else + hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); } } @@ -1168,10 +1562,20 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) while (!list_empty(comp_q)) { bfa_q_deq(comp_q, &qe); hcb_qe = (struct bfa_cb_qe_s *) qe; + WARN_ON(hcb_qe->pre_rmv); hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE); } } +void +bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status) +{ + if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) { + if (bfa->iocfc.cfgdone == BFA_TRUE) + bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, + bfa_iocfc_init_cb, bfa); + } +} /* * Return the list of PCI vendor/device id lists supported by this @@ -1215,6 +1619,7 @@ bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS; cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS; cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS; + cfg->fwcfg.num_fwtio_reqs = 0; cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS; cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS; @@ -1236,6 +1641,7 @@ bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN; cfg->fwcfg.num_uf_bufs = BFA_UF_MIN; cfg->fwcfg.num_rports = BFA_RPORT_MIN; + cfg->fwcfg.num_fwtio_reqs = 0; cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h index d85f93aea46..7b3d235d20b 100644 --- a/drivers/scsi/bfa/bfa_defs.h +++ b/drivers/scsi/bfa/bfa_defs.h @@ -40,7 +40,12 @@ enum { BFA_MFG_TYPE_ASTRA = 807, /* Astra mezz card */ BFA_MFG_TYPE_LIGHTNING_P0 = 902, /* Lightning mezz card - old */ BFA_MFG_TYPE_LIGHTNING = 1741, /* Lightning mezz card */ - BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */ + BFA_MFG_TYPE_PROWLER_F = 1560, /* Prowler FC only cards */ + BFA_MFG_TYPE_PROWLER_N = 1410, /* Prowler NIC only cards */ + BFA_MFG_TYPE_PROWLER_C = 1710, /* Prowler CNA only cards */ + BFA_MFG_TYPE_PROWLER_D = 1860, /* Prowler Dual cards */ + BFA_MFG_TYPE_CHINOOK = 1867, /* Chinook cards */ + BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */ }; #pragma pack(1) @@ -53,7 +58,8 @@ enum { (type) == BFA_MFG_TYPE_WANCHESE || \ (type) == BFA_MFG_TYPE_ASTRA || \ (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \ - (type) == BFA_MFG_TYPE_LIGHTNING)) + (type) == BFA_MFG_TYPE_LIGHTNING || \ + (type) == BFA_MFG_TYPE_CHINOOK)) /* * Check if the card having old wwn/mac handling @@ -124,30 +130,60 @@ enum bfa_status { BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if persists, * contact support */ BFA_STATUS_EPROTOCOL = 6, /* Protocol error */ + BFA_STATUS_SFP_UNSUPP = 10, /* Unsupported SFP - Replace SFP */ + BFA_STATUS_UNKNOWN_VFID = 11, /* VF_ID not found */ + BFA_STATUS_DATACORRUPTED = 12, /* Diag returned data corrupted */ BFA_STATUS_DEVBUSY = 13, /* Device busy - Retry operation */ + BFA_STATUS_HDMA_FAILED = 16, /* Host dma failed contact support */ + BFA_STATUS_FLASH_BAD_LEN = 17, /* Flash bad length */ BFA_STATUS_UNKNOWN_LWWN = 18, /* LPORT PWWN not found */ BFA_STATUS_UNKNOWN_RWWN = 19, /* RPORT PWWN not found */ BFA_STATUS_VPORT_EXISTS = 21, /* VPORT already exists */ BFA_STATUS_VPORT_MAX = 22, /* Reached max VPORT supported limit */ BFA_STATUS_UNSUPP_SPEED = 23, /* Invalid Speed Check speed setting */ BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */ + BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */ BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */ + BFA_STATUS_UNKNOWN_VWWN = 30, /* VPORT PWWN not found */ + BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */ BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */ + BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */ BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the rport */ BFA_STATUS_IOC_FAILURE = 56, /* IOC failure - Retry, if persists * contact support */ BFA_STATUS_INVALID_WWN = 57, /* Invalid WWN */ + BFA_STATUS_ADAPTER_ENABLED = 60, /* Adapter is not disabled */ + BFA_STATUS_IOC_NON_OP = 61, /* IOC is not operational */ + BFA_STATUS_VERSION_FAIL = 70, /* Application/Driver version mismatch */ BFA_STATUS_DIAG_BUSY = 71, /* diag busy */ + BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */ BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */ BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */ + BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */ + BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */ + BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */ BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */ BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot * configuration */ + BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */ + BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */ + BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */ BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on * this adapter */ BFA_STATUS_TRUNK_DISABLED = 165, /* Trunking is disabled on * the adapter */ BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */ + BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */ + BFA_STATUS_FEATURE_NOT_SUPPORTED = 192, /* Feature not supported */ + BFA_STATUS_ENTRY_EXISTS = 193, /* Entry already exists */ + BFA_STATUS_ENTRY_NOT_EXISTS = 194, /* Entry does not exist */ + BFA_STATUS_NO_CHANGE = 195, /* Feature already in that state */ + BFA_STATUS_FAA_ENABLED = 197, /* FAA is already enabled */ + BFA_STATUS_FAA_DISABLED = 198, /* FAA is already disabled */ + BFA_STATUS_FAA_ACQUIRED = 199, /* FAA is already acquired */ + BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */ + BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */ + BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */ BFA_STATUS_MAX_VAL /* Unknown error code */ }; #define bfa_status_t enum bfa_status @@ -265,6 +301,8 @@ enum bfa_ioc_state { BFA_IOC_DISABLED = 10, /* IOC is disabled */ BFA_IOC_FWMISMATCH = 11, /* IOC f/w different from drivers */ BFA_IOC_ENABLING = 12, /* IOC is being enabled */ + BFA_IOC_HWFAIL = 13, /* PCI mapping doesn't exist */ + BFA_IOC_ACQ_ADDR = 14, /* Acquiring addr from fabric */ }; /* @@ -294,6 +332,7 @@ struct bfa_ioc_drv_stats_s { u32 enable_reqs; u32 disable_replies; u32 enable_replies; + u32 rsvd; }; /* @@ -320,7 +359,143 @@ struct bfa_ioc_attr_s { struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */ struct bfa_ioc_pci_attr_s pci_attr; u8 port_id; /* port number */ - u8 rsvd[7]; /* 64bit align */ + u8 port_mode; /* bfa_mode_s */ + u8 cap_bm; /* capability */ + u8 port_mode_cfg; /* bfa_mode_s */ + u8 rsvd[4]; /* 64bit align */ +}; + +/* + * AEN related definitions + */ +enum bfa_aen_category { + BFA_AEN_CAT_ADAPTER = 1, + BFA_AEN_CAT_PORT = 2, + BFA_AEN_CAT_LPORT = 3, + BFA_AEN_CAT_RPORT = 4, + BFA_AEN_CAT_ITNIM = 5, + BFA_AEN_CAT_AUDIT = 8, + BFA_AEN_CAT_IOC = 9, +}; + +/* BFA adapter level events */ +enum bfa_adapter_aen_event { + BFA_ADAPTER_AEN_ADD = 1, /* New Adapter found event */ + BFA_ADAPTER_AEN_REMOVE = 2, /* Adapter removed event */ +}; + +struct bfa_adapter_aen_data_s { + char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; + u32 nports; /* Number of NPorts */ + wwn_t pwwn; /* WWN of one of its physical port */ +}; + +/* BFA physical port Level events */ +enum bfa_port_aen_event { + BFA_PORT_AEN_ONLINE = 1, /* Physical Port online event */ + BFA_PORT_AEN_OFFLINE = 2, /* Physical Port offline event */ + BFA_PORT_AEN_RLIR = 3, /* RLIR event, not supported */ + BFA_PORT_AEN_SFP_INSERT = 4, /* SFP inserted event */ + BFA_PORT_AEN_SFP_REMOVE = 5, /* SFP removed event */ + BFA_PORT_AEN_SFP_POM = 6, /* SFP POM event */ + BFA_PORT_AEN_ENABLE = 7, /* Physical Port enable event */ + BFA_PORT_AEN_DISABLE = 8, /* Physical Port disable event */ + BFA_PORT_AEN_AUTH_ON = 9, /* Physical Port auth success event */ + BFA_PORT_AEN_AUTH_OFF = 10, /* Physical Port auth fail event */ + BFA_PORT_AEN_DISCONNECT = 11, /* Physical Port disconnect event */ + BFA_PORT_AEN_QOS_NEG = 12, /* Base Port QOS negotiation event */ + BFA_PORT_AEN_FABRIC_NAME_CHANGE = 13, /* Fabric Name/WWN change */ + BFA_PORT_AEN_SFP_ACCESS_ERROR = 14, /* SFP read error event */ + BFA_PORT_AEN_SFP_UNSUPPORT = 15, /* Unsupported SFP event */ +}; + +enum bfa_port_aen_sfp_pom { + BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */ + BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */ + BFA_PORT_AEN_SFP_POM_RED = 3, /* Critical */ + BFA_PORT_AEN_SFP_POM_MAX = BFA_PORT_AEN_SFP_POM_RED +}; + +struct bfa_port_aen_data_s { + wwn_t pwwn; /* WWN of the physical port */ + wwn_t fwwn; /* WWN of the fabric port */ + u32 phy_port_num; /* For SFP related events */ + u16 ioc_type; + u16 level; /* Only transitions will be informed */ + mac_t mac; /* MAC address of the ethernet port */ + u16 rsvd; +}; + +/* BFA AEN logical port events */ +enum bfa_lport_aen_event { + BFA_LPORT_AEN_NEW = 1, /* LPort created event */ + BFA_LPORT_AEN_DELETE = 2, /* LPort deleted event */ + BFA_LPORT_AEN_ONLINE = 3, /* LPort online event */ + BFA_LPORT_AEN_OFFLINE = 4, /* LPort offline event */ + BFA_LPORT_AEN_DISCONNECT = 5, /* LPort disconnect event */ + BFA_LPORT_AEN_NEW_PROP = 6, /* VPort created event */ + BFA_LPORT_AEN_DELETE_PROP = 7, /* VPort deleted event */ + BFA_LPORT_AEN_NEW_STANDARD = 8, /* VPort created event */ + BFA_LPORT_AEN_DELETE_STANDARD = 9, /* VPort deleted event */ + BFA_LPORT_AEN_NPIV_DUP_WWN = 10, /* VPort with duplicate WWN */ + BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11, /* Max NPIV in fabric/fport */ + BFA_LPORT_AEN_NPIV_UNKNOWN = 12, /* Unknown NPIV Error code */ +}; + +struct bfa_lport_aen_data_s { + u16 vf_id; /* vf_id of this logical port */ + u16 roles; /* Logical port mode,IM/TM/IP etc */ + u32 rsvd; + wwn_t ppwwn; /* WWN of its physical port */ + wwn_t lpwwn; /* WWN of this logical port */ +}; + +/* BFA ITNIM events */ +enum bfa_itnim_aen_event { + BFA_ITNIM_AEN_ONLINE = 1, /* Target online */ + BFA_ITNIM_AEN_OFFLINE = 2, /* Target offline */ + BFA_ITNIM_AEN_DISCONNECT = 3, /* Target disconnected */ +}; + +struct bfa_itnim_aen_data_s { + u16 vf_id; /* vf_id of the IT nexus */ + u16 rsvd[3]; + wwn_t ppwwn; /* WWN of its physical port */ + wwn_t lpwwn; /* WWN of logical port */ + wwn_t rpwwn; /* WWN of remote(target) port */ +}; + +/* BFA audit events */ +enum bfa_audit_aen_event { + BFA_AUDIT_AEN_AUTH_ENABLE = 1, + BFA_AUDIT_AEN_AUTH_DISABLE = 2, + BFA_AUDIT_AEN_FLASH_ERASE = 3, + BFA_AUDIT_AEN_FLASH_UPDATE = 4, +}; + +struct bfa_audit_aen_data_s { + wwn_t pwwn; + int partition_inst; + int partition_type; +}; + +/* BFA IOC level events */ +enum bfa_ioc_aen_event { + BFA_IOC_AEN_HBGOOD = 1, /* Heart Beat restore event */ + BFA_IOC_AEN_HBFAIL = 2, /* Heart Beat failure event */ + BFA_IOC_AEN_ENABLE = 3, /* IOC enabled event */ + BFA_IOC_AEN_DISABLE = 4, /* IOC disabled event */ + BFA_IOC_AEN_FWMISMATCH = 5, /* IOC firmware mismatch */ + BFA_IOC_AEN_FWCFG_ERROR = 6, /* IOC firmware config error */ + BFA_IOC_AEN_INVALID_VENDOR = 7, + BFA_IOC_AEN_INVALID_NWWN = 8, /* Zero NWWN */ + BFA_IOC_AEN_INVALID_PWWN = 9 /* Zero PWWN */ +}; + +struct bfa_ioc_aen_data_s { + wwn_t pwwn; + u16 ioc_type; + mac_t mac; }; /* @@ -337,6 +512,21 @@ struct bfa_ioc_attr_s { #define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20 #define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20 #define BFA_MFG_SUPPLIER_REVISION_SIZE 4 +/* + * Initial capability definition + */ +#define BFA_MFG_IC_FC 0x01 +#define BFA_MFG_IC_ETH 0x02 + +/* + * Adapter capability mask definition + */ +#define BFA_CM_HBA 0x01 +#define BFA_CM_CNA 0x02 +#define BFA_CM_NIC 0x04 +#define BFA_CM_FC16G 0x08 +#define BFA_CM_SRIOV 0x10 +#define BFA_CM_MEZZ 0x20 #pragma pack(1) @@ -344,31 +534,39 @@ struct bfa_ioc_attr_s { * All numerical fields are in big-endian format. */ struct bfa_mfg_block_s { - u8 version; /* manufacturing block version */ - u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */ - u16 mfgsize; /* mfg block size */ - u16 u16_chksum; /* old u16 checksum */ - char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; - char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)]; - u8 mfg_day; /* manufacturing day */ - u8 mfg_month; /* manufacturing month */ - u16 mfg_year; /* manufacturing year */ - wwn_t mfg_wwn; /* wwn base for this adapter */ - u8 num_wwn; /* number of wwns assigned */ - u8 mfg_speeds; /* speeds allowed for this adapter */ - u8 rsv[2]; - char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)]; - char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)]; - char - supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)]; - char - supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)]; - mac_t mfg_mac; /* mac address */ - u8 num_mac; /* number of mac addresses */ - u8 rsv2; - u32 mfg_type; /* card type */ - u8 rsv3[108]; - u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */ + u8 version; /*!< manufacturing block version */ + u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */ + u16 mfgsize; /*!< mfg block size */ + u16 u16_chksum; /*!< old u16 checksum */ + char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; + char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)]; + u8 mfg_day; /*!< manufacturing day */ + u8 mfg_month; /*!< manufacturing month */ + u16 mfg_year; /*!< manufacturing year */ + wwn_t mfg_wwn; /*!< wwn base for this adapter */ + u8 num_wwn; /*!< number of wwns assigned */ + u8 mfg_speeds; /*!< speeds allowed for this adapter */ + u8 rsv[2]; + char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)]; + char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)]; + char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)]; + char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)]; + mac_t mfg_mac; /*!< base mac address */ + u8 num_mac; /*!< number of mac addresses */ + u8 rsv2; + u32 card_type; /*!< card type */ + char cap_nic; /*!< capability nic */ + char cap_cna; /*!< capability cna */ + char cap_hba; /*!< capability hba */ + char cap_fc16g; /*!< capability fc 16g */ + char cap_sriov; /*!< capability sriov */ + char cap_mezz; /*!< capability mezz */ + u8 rsv3; + u8 mfg_nports; /*!< number of ports */ + char media[8]; /*!< xfi/xaui */ + char initial_mode[8]; /*!< initial mode: hba/cna/nic */ + u8 rsv4[84]; + u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */ }; #pragma pack() @@ -386,17 +584,27 @@ enum { BFA_PCI_DEVICE_ID_FC_8G1P = 0x17, BFA_PCI_DEVICE_ID_CT = 0x14, BFA_PCI_DEVICE_ID_CT_FC = 0x21, + BFA_PCI_DEVICE_ID_CT2 = 0x22, }; -#define bfa_asic_id_ct(devid) \ - ((devid) == BFA_PCI_DEVICE_ID_CT || \ - (devid) == BFA_PCI_DEVICE_ID_CT_FC) +#define bfa_asic_id_cb(__d) \ + ((__d) == BFA_PCI_DEVICE_ID_FC_8G2P || \ + (__d) == BFA_PCI_DEVICE_ID_FC_8G1P) +#define bfa_asic_id_ct(__d) \ + ((__d) == BFA_PCI_DEVICE_ID_CT || \ + (__d) == BFA_PCI_DEVICE_ID_CT_FC) +#define bfa_asic_id_ct2(__d) ((__d) == BFA_PCI_DEVICE_ID_CT2) +#define bfa_asic_id_ctc(__d) \ + (bfa_asic_id_ct(__d) || bfa_asic_id_ct2(__d)) /* * PCI sub-system device and vendor ID information */ enum { BFA_PCI_FCOE_SSDEVICE_ID = 0x14, + BFA_PCI_CT2_SSID_FCoE = 0x22, + BFA_PCI_CT2_SSID_ETH = 0x23, + BFA_PCI_CT2_SSID_FC = 0x24, }; /* @@ -416,9 +624,7 @@ enum bfa_port_speed { BFA_PORT_SPEED_8GBPS = 8, BFA_PORT_SPEED_10GBPS = 10, BFA_PORT_SPEED_16GBPS = 16, - BFA_PORT_SPEED_AUTO = - (BFA_PORT_SPEED_1GBPS | BFA_PORT_SPEED_2GBPS | - BFA_PORT_SPEED_4GBPS | BFA_PORT_SPEED_8GBPS), + BFA_PORT_SPEED_AUTO = 0xf, }; #define bfa_port_speed_t enum bfa_port_speed @@ -454,6 +660,20 @@ struct bfa_boot_bootlun_s { /* * BOOT boot configuraton */ +struct bfa_boot_cfg_s { + u8 version; + u8 rsvd1; + u16 chksum; + u8 enable; /* enable/disable SAN boot */ + u8 speed; /* boot speed settings */ + u8 topology; /* boot topology setting */ + u8 bootopt; /* bfa_boot_bootopt_t */ + u32 nbluns; /* number of boot luns */ + u32 rsvd2; + struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX]; + struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX]; +}; + struct bfa_boot_pbc_s { u8 enable; /* enable/disable SAN boot */ u8 speed; /* boot speed settings */ @@ -463,4 +683,470 @@ struct bfa_boot_pbc_s { struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX]; }; +struct bfa_ethboot_cfg_s { + u8 version; + u8 rsvd1; + u16 chksum; + u8 enable; /* enable/disable Eth/PXE boot */ + u8 rsvd2; + u16 vlan; +}; + +/* + * ASIC block configuration related structures + */ +#define BFA_ABLK_MAX_PORTS 2 +#define BFA_ABLK_MAX_PFS 16 +#define BFA_ABLK_MAX 2 + +#pragma pack(1) +enum bfa_mode_s { + BFA_MODE_HBA = 1, + BFA_MODE_CNA = 2, + BFA_MODE_NIC = 3 +}; + +struct bfa_adapter_cfg_mode_s { + u16 max_pf; + u16 max_vf; + enum bfa_mode_s mode; +}; + +struct bfa_ablk_cfg_pf_s { + u16 pers; + u8 port_id; + u8 optrom; + u8 valid; + u8 sriov; + u8 max_vfs; + u8 rsvd[1]; + u16 num_qpairs; + u16 num_vectors; + u32 bw; +}; + +struct bfa_ablk_cfg_port_s { + u8 mode; + u8 type; + u8 max_pfs; + u8 rsvd[5]; +}; + +struct bfa_ablk_cfg_inst_s { + u8 nports; + u8 max_pfs; + u8 rsvd[6]; + struct bfa_ablk_cfg_pf_s pf_cfg[BFA_ABLK_MAX_PFS]; + struct bfa_ablk_cfg_port_s port_cfg[BFA_ABLK_MAX_PORTS]; +}; + +struct bfa_ablk_cfg_s { + struct bfa_ablk_cfg_inst_s inst[BFA_ABLK_MAX]; +}; + + +/* + * SFP module specific + */ +#define SFP_DIAGMON_SIZE 10 /* num bytes of diag monitor data */ + +/* SFP state change notification event */ +#define BFA_SFP_SCN_REMOVED 0 +#define BFA_SFP_SCN_INSERTED 1 +#define BFA_SFP_SCN_POM 2 +#define BFA_SFP_SCN_FAILED 3 +#define BFA_SFP_SCN_UNSUPPORT 4 +#define BFA_SFP_SCN_VALID 5 + +enum bfa_defs_sfp_media_e { + BFA_SFP_MEDIA_UNKNOWN = 0x00, + BFA_SFP_MEDIA_CU = 0x01, + BFA_SFP_MEDIA_LW = 0x02, + BFA_SFP_MEDIA_SW = 0x03, + BFA_SFP_MEDIA_EL = 0x04, + BFA_SFP_MEDIA_UNSUPPORT = 0x05, +}; + +/* + * values for xmtr_tech above + */ +enum { + SFP_XMTR_TECH_CU = (1 << 0), /* copper FC-BaseT */ + SFP_XMTR_TECH_CP = (1 << 1), /* copper passive */ + SFP_XMTR_TECH_CA = (1 << 2), /* copper active */ + SFP_XMTR_TECH_LL = (1 << 3), /* longwave laser */ + SFP_XMTR_TECH_SL = (1 << 4), /* shortwave laser w/ OFC */ + SFP_XMTR_TECH_SN = (1 << 5), /* shortwave laser w/o OFC */ + SFP_XMTR_TECH_EL_INTRA = (1 << 6), /* elec intra-enclosure */ + SFP_XMTR_TECH_EL_INTER = (1 << 7), /* elec inter-enclosure */ + SFP_XMTR_TECH_LC = (1 << 8), /* longwave laser */ + SFP_XMTR_TECH_SA = (1 << 9) +}; + +/* + * Serial ID: Data Fields -- Address A0h + * Basic ID field total 64 bytes + */ +struct sfp_srlid_base_s { + u8 id; /* 00: Identifier */ + u8 extid; /* 01: Extended Identifier */ + u8 connector; /* 02: Connector */ + u8 xcvr[8]; /* 03-10: Transceiver */ + u8 encoding; /* 11: Encoding */ + u8 br_norm; /* 12: BR, Nominal */ + u8 rate_id; /* 13: Rate Identifier */ + u8 len_km; /* 14: Length single mode km */ + u8 len_100m; /* 15: Length single mode 100m */ + u8 len_om2; /* 16: Length om2 fiber 10m */ + u8 len_om1; /* 17: Length om1 fiber 10m */ + u8 len_cu; /* 18: Length copper 1m */ + u8 len_om3; /* 19: Length om3 fiber 10m */ + u8 vendor_name[16];/* 20-35 */ + u8 unalloc1; + u8 vendor_oui[3]; /* 37-39 */ + u8 vendor_pn[16]; /* 40-55 */ + u8 vendor_rev[4]; /* 56-59 */ + u8 wavelen[2]; /* 60-61 */ + u8 unalloc2; + u8 cc_base; /* 63: check code for base id field */ +}; + +/* + * Serial ID: Data Fields -- Address A0h + * Extended id field total 32 bytes + */ +struct sfp_srlid_ext_s { + u8 options[2]; + u8 br_max; + u8 br_min; + u8 vendor_sn[16]; + u8 date_code[8]; + u8 diag_mon_type; /* 92: Diagnostic Monitoring type */ + u8 en_options; + u8 sff_8472; + u8 cc_ext; +}; + +/* + * Diagnostic: Data Fields -- Address A2h + * Diagnostic and control/status base field total 96 bytes + */ +struct sfp_diag_base_s { + /* + * Alarm and warning Thresholds 40 bytes + */ + u8 temp_high_alarm[2]; /* 00-01 */ + u8 temp_low_alarm[2]; /* 02-03 */ + u8 temp_high_warning[2]; /* 04-05 */ + u8 temp_low_warning[2]; /* 06-07 */ + + u8 volt_high_alarm[2]; /* 08-09 */ + u8 volt_low_alarm[2]; /* 10-11 */ + u8 volt_high_warning[2]; /* 12-13 */ + u8 volt_low_warning[2]; /* 14-15 */ + + u8 bias_high_alarm[2]; /* 16-17 */ + u8 bias_low_alarm[2]; /* 18-19 */ + u8 bias_high_warning[2]; /* 20-21 */ + u8 bias_low_warning[2]; /* 22-23 */ + + u8 tx_pwr_high_alarm[2]; /* 24-25 */ + u8 tx_pwr_low_alarm[2]; /* 26-27 */ + u8 tx_pwr_high_warning[2]; /* 28-29 */ + u8 tx_pwr_low_warning[2]; /* 30-31 */ + + u8 rx_pwr_high_alarm[2]; /* 32-33 */ + u8 rx_pwr_low_alarm[2]; /* 34-35 */ + u8 rx_pwr_high_warning[2]; /* 36-37 */ + u8 rx_pwr_low_warning[2]; /* 38-39 */ + + u8 unallocate_1[16]; + + /* + * ext_cal_const[36] + */ + u8 rx_pwr[20]; + u8 tx_i[4]; + u8 tx_pwr[4]; + u8 temp[4]; + u8 volt[4]; + u8 unallocate_2[3]; + u8 cc_dmi; +}; + +/* + * Diagnostic: Data Fields -- Address A2h + * Diagnostic and control/status extended field total 24 bytes + */ +struct sfp_diag_ext_s { + u8 diag[SFP_DIAGMON_SIZE]; + u8 unalloc1[4]; + u8 status_ctl; + u8 rsvd; + u8 alarm_flags[2]; + u8 unalloc2[2]; + u8 warning_flags[2]; + u8 ext_status_ctl[2]; +}; + +struct sfp_mem_s { + struct sfp_srlid_base_s srlid_base; + struct sfp_srlid_ext_s srlid_ext; + struct sfp_diag_base_s diag_base; + struct sfp_diag_ext_s diag_ext; +}; + +/* + * transceiver codes (SFF-8472 Rev 10.2 Table 3.5) + */ +union sfp_xcvr_e10g_code_u { + u8 b; + struct { +#ifdef __BIGENDIAN + u8 e10g_unall:1; /* 10G Ethernet compliance */ + u8 e10g_lrm:1; + u8 e10g_lr:1; + u8 e10g_sr:1; + u8 ib_sx:1; /* Infiniband compliance */ + u8 ib_lx:1; + u8 ib_cu_a:1; + u8 ib_cu_p:1; +#else + u8 ib_cu_p:1; + u8 ib_cu_a:1; + u8 ib_lx:1; + u8 ib_sx:1; /* Infiniband compliance */ + u8 e10g_sr:1; + u8 e10g_lr:1; + u8 e10g_lrm:1; + u8 e10g_unall:1; /* 10G Ethernet compliance */ +#endif + } r; +}; + +union sfp_xcvr_so1_code_u { + u8 b; + struct { + u8 escon:2; /* ESCON compliance code */ + u8 oc192_reach:1; /* SONET compliance code */ + u8 so_reach:2; + u8 oc48_reach:3; + } r; +}; + +union sfp_xcvr_so2_code_u { + u8 b; + struct { + u8 reserved:1; + u8 oc12_reach:3; /* OC12 reach */ + u8 reserved1:1; + u8 oc3_reach:3; /* OC3 reach */ + } r; +}; + +union sfp_xcvr_eth_code_u { + u8 b; + struct { + u8 base_px:1; + u8 base_bx10:1; + u8 e100base_fx:1; + u8 e100base_lx:1; + u8 e1000base_t:1; + u8 e1000base_cx:1; + u8 e1000base_lx:1; + u8 e1000base_sx:1; + } r; +}; + +struct sfp_xcvr_fc1_code_s { + u8 link_len:5; /* FC link length */ + u8 xmtr_tech2:3; + u8 xmtr_tech1:7; /* FC transmitter technology */ + u8 reserved1:1; +}; + +union sfp_xcvr_fc2_code_u { + u8 b; + struct { + u8 tw_media:1; /* twin axial pair (tw) */ + u8 tp_media:1; /* shielded twisted pair (sp) */ + u8 mi_media:1; /* miniature coax (mi) */ + u8 tv_media:1; /* video coax (tv) */ + u8 m6_media:1; /* multimode, 62.5m (m6) */ + u8 m5_media:1; /* multimode, 50m (m5) */ + u8 reserved:1; + u8 sm_media:1; /* single mode (sm) */ + } r; +}; + +union sfp_xcvr_fc3_code_u { + u8 b; + struct { +#ifdef __BIGENDIAN + u8 rsv4:1; + u8 mb800:1; /* 800 Mbytes/sec */ + u8 mb1600:1; /* 1600 Mbytes/sec */ + u8 mb400:1; /* 400 Mbytes/sec */ + u8 rsv2:1; + u8 mb200:1; /* 200 Mbytes/sec */ + u8 rsv1:1; + u8 mb100:1; /* 100 Mbytes/sec */ +#else + u8 mb100:1; /* 100 Mbytes/sec */ + u8 rsv1:1; + u8 mb200:1; /* 200 Mbytes/sec */ + u8 rsv2:1; + u8 mb400:1; /* 400 Mbytes/sec */ + u8 mb1600:1; /* 1600 Mbytes/sec */ + u8 mb800:1; /* 800 Mbytes/sec */ + u8 rsv4:1; +#endif + } r; +}; + +struct sfp_xcvr_s { + union sfp_xcvr_e10g_code_u e10g; + union sfp_xcvr_so1_code_u so1; + union sfp_xcvr_so2_code_u so2; + union sfp_xcvr_eth_code_u eth; + struct sfp_xcvr_fc1_code_s fc1; + union sfp_xcvr_fc2_code_u fc2; + union sfp_xcvr_fc3_code_u fc3; +}; + +/* + * Flash module specific + */ +#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */ +#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */ + +enum bfa_flash_part_type { + BFA_FLASH_PART_OPTROM = 1, /* option rom partition */ + BFA_FLASH_PART_FWIMG = 2, /* firmware image partition */ + BFA_FLASH_PART_FWCFG = 3, /* firmware tuneable config */ + BFA_FLASH_PART_DRV = 4, /* IOC driver config */ + BFA_FLASH_PART_BOOT = 5, /* boot config */ + BFA_FLASH_PART_ASIC = 6, /* asic bootstrap configuration */ + BFA_FLASH_PART_MFG = 7, /* manufacturing block partition */ + BFA_FLASH_PART_OPTROM2 = 8, /* 2nd option rom partition */ + BFA_FLASH_PART_VPD = 9, /* vpd data of OEM info */ + BFA_FLASH_PART_PBC = 10, /* pre-boot config */ + BFA_FLASH_PART_BOOTOVL = 11, /* boot overlay partition */ + BFA_FLASH_PART_LOG = 12, /* firmware log partition */ + BFA_FLASH_PART_PXECFG = 13, /* pxe boot config partition */ + BFA_FLASH_PART_PXEOVL = 14, /* pxe boot overlay partition */ + BFA_FLASH_PART_PORTCFG = 15, /* port cfg partition */ + BFA_FLASH_PART_ASICBK = 16, /* asic backup partition */ +}; + +/* + * flash partition attributes + */ +struct bfa_flash_part_attr_s { + u32 part_type; /* partition type */ + u32 part_instance; /* partition instance */ + u32 part_off; /* partition offset */ + u32 part_size; /* partition size */ + u32 part_len; /* partition content length */ + u32 part_status; /* partition status */ + char rsv[BFA_FLASH_PART_ENTRY_SIZE - 24]; +}; + +/* + * flash attributes + */ +struct bfa_flash_attr_s { + u32 status; /* flash overall status */ + u32 npart; /* num of partitions */ + struct bfa_flash_part_attr_s part[BFA_FLASH_PART_MAX]; +}; + +/* + * DIAG module specific + */ +#define LB_PATTERN_DEFAULT 0xB5B5B5B5 +#define QTEST_CNT_DEFAULT 10 +#define QTEST_PAT_DEFAULT LB_PATTERN_DEFAULT + +struct bfa_diag_memtest_s { + u8 algo; + u8 rsvd[7]; +}; + +struct bfa_diag_memtest_result { + u32 status; + u32 addr; + u32 exp; /* expect value read from reg */ + u32 act; /* actually value read */ + u32 err_status; /* error status reg */ + u32 err_status1; /* extra error info reg */ + u32 err_addr; /* error address reg */ + u8 algo; + u8 rsv[3]; +}; + +struct bfa_diag_loopback_result_s { + u32 numtxmfrm; /* no. of transmit frame */ + u32 numosffrm; /* no. of outstanding frame */ + u32 numrcvfrm; /* no. of received good frame */ + u32 badfrminf; /* mis-match info */ + u32 badfrmnum; /* mis-match fram number */ + u8 status; /* loopback test result */ + u8 rsvd[3]; +}; + +struct bfa_diag_ledtest_s { + u32 cmd; /* bfa_led_op_t */ + u32 color; /* bfa_led_color_t */ + u16 freq; /* no. of blinks every 10 secs */ + u8 led; /* bitmap of LEDs to be tested */ + u8 rsvd[5]; +}; + +struct bfa_diag_loopback_s { + u32 loopcnt; + u32 pattern; + u8 lb_mode; /* bfa_port_opmode_t */ + u8 speed; /* bfa_port_speed_t */ + u8 rsvd[2]; +}; + +/* + * PHY module specific + */ +enum bfa_phy_status_e { + BFA_PHY_STATUS_GOOD = 0, /* phy is good */ + BFA_PHY_STATUS_NOT_PRESENT = 1, /* phy does not exist */ + BFA_PHY_STATUS_BAD = 2, /* phy is bad */ +}; + +/* + * phy attributes for phy query + */ +struct bfa_phy_attr_s { + u32 status; /* phy present/absent status */ + u32 length; /* firmware length */ + u32 fw_ver; /* firmware version */ + u32 an_status; /* AN status */ + u32 pma_pmd_status; /* PMA/PMD link status */ + u32 pma_pmd_signal; /* PMA/PMD signal detect */ + u32 pcs_status; /* PCS link status */ +}; + +/* + * phy stats + */ +struct bfa_phy_stats_s { + u32 status; /* phy stats status */ + u32 link_breaks; /* Num of link breaks after linkup */ + u32 pma_pmd_fault; /* NPMA/PMD fault */ + u32 pcs_fault; /* PCS fault */ + u32 speed_neg; /* Num of speed negotiation */ + u32 tx_eq_training; /* Num of TX EQ training */ + u32 tx_eq_timeout; /* Num of TX EQ timeout */ + u32 crc_error; /* Num of CRC errors */ +}; + +#pragma pack() + #endif /* __BFA_DEFS_H__ */ diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h index 191d34a58b9..3bbc583f65c 100644 --- a/drivers/scsi/bfa/bfa_defs_fcs.h +++ b/drivers/scsi/bfa/bfa_defs_fcs.h @@ -90,12 +90,14 @@ enum bfa_lport_role { * FCS port configuration. */ struct bfa_lport_cfg_s { - wwn_t pwwn; /* port wwn */ - wwn_t nwwn; /* node wwn */ - struct bfa_lport_symname_s sym_name; /* vm port symbolic name */ - bfa_boolean_t preboot_vp; /* vport created from PBC */ - enum bfa_lport_role roles; /* FCS port roles */ - u8 tag[16]; /* opaque tag from application */ + wwn_t pwwn; /* port wwn */ + wwn_t nwwn; /* node wwn */ + struct bfa_lport_symname_s sym_name; /* vm port symbolic name */ + enum bfa_lport_role roles; /* FCS port roles */ + u32 rsvd; + bfa_boolean_t preboot_vp; /* vport created from PBC */ + u8 tag[16]; /* opaque tag from application */ + u8 padding[4]; }; /* @@ -249,12 +251,13 @@ enum bfa_vport_state { BFA_FCS_VPORT_FDISC_SEND = 2, BFA_FCS_VPORT_FDISC = 3, BFA_FCS_VPORT_FDISC_RETRY = 4, - BFA_FCS_VPORT_ONLINE = 5, - BFA_FCS_VPORT_DELETING = 6, - BFA_FCS_VPORT_CLEANUP = 6, - BFA_FCS_VPORT_LOGO_SEND = 7, - BFA_FCS_VPORT_LOGO = 8, - BFA_FCS_VPORT_ERROR = 9, + BFA_FCS_VPORT_FDISC_RSP_WAIT = 5, + BFA_FCS_VPORT_ONLINE = 6, + BFA_FCS_VPORT_DELETING = 7, + BFA_FCS_VPORT_CLEANUP = 8, + BFA_FCS_VPORT_LOGO_SEND = 9, + BFA_FCS_VPORT_LOGO = 10, + BFA_FCS_VPORT_ERROR = 11, BFA_FCS_VPORT_MAX_STATE, }; diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h index 207f598877c..863c6ba7d5e 100644 --- a/drivers/scsi/bfa/bfa_defs_svc.h +++ b/drivers/scsi/bfa/bfa_defs_svc.h @@ -47,13 +47,12 @@ struct bfa_iocfc_fwcfg_s { u16 num_rports; /* number of remote ports */ u16 num_ioim_reqs; /* number of IO reqs */ u16 num_tskim_reqs; /* task management requests */ - u16 num_iotm_reqs; /* number of TM IO reqs */ - u16 num_tsktm_reqs; /* TM task management requests*/ + u16 num_fwtio_reqs; /* number of TM IO reqs in FW */ u16 num_fcxp_reqs; /* unassisted FC exchanges */ u16 num_uf_bufs; /* unsolicited recv buffers */ u8 num_cqs; u8 fw_tick_res; /* FW clock resolution in ms */ - u8 rsvd[4]; + u8 rsvd[2]; }; #pragma pack() @@ -66,8 +65,12 @@ struct bfa_iocfc_drvcfg_s { u16 ioc_recover; /* IOC recovery mode */ u16 min_cfg; /* minimum configuration */ u16 path_tov; /* device path timeout */ + u16 num_tio_reqs; /*!< number of TM IO reqs */ + u8 port_mode; + u8 rsvd_a; bfa_boolean_t delay_comp; /* delay completion of failed inflight IOs */ + u16 num_ttsk_reqs; /* TM task management requests */ u32 rsvd; }; @@ -82,7 +85,7 @@ struct bfa_iocfc_cfg_s { /* * IOC firmware IO stats */ -struct bfa_fw_io_stats_s { +struct bfa_fw_ioim_stats_s { u32 host_abort; /* IO aborted by host driver*/ u32 host_cleanup; /* IO clean up by host driver */ @@ -152,6 +155,54 @@ struct bfa_fw_io_stats_s { */ }; +struct bfa_fw_tio_stats_s { + u32 tio_conf_proc; /* TIO CONF processed */ + u32 tio_conf_drop; /* TIO CONF dropped */ + u32 tio_cleanup_req; /* TIO cleanup requested */ + u32 tio_cleanup_comp; /* TIO cleanup completed */ + u32 tio_abort_rsp; /* TIO abort response */ + u32 tio_abort_rsp_comp; /* TIO abort rsp completed */ + u32 tio_abts_req; /* TIO ABTS requested */ + u32 tio_abts_ack; /* TIO ABTS ack-ed */ + u32 tio_abts_ack_nocomp; /* TIO ABTS ack-ed but not completed */ + u32 tio_abts_tmo; /* TIO ABTS timeout */ + u32 tio_snsdata_dma; /* TIO sense data DMA */ + u32 tio_rxwchan_wait; /* TIO waiting for RX wait channel */ + u32 tio_rxwchan_avail; /* TIO RX wait channel available */ + u32 tio_hit_bls; /* TIO IOH BLS event */ + u32 tio_uf_recv; /* TIO received UF */ + u32 tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */ + u32 tio_wr_invalid_sm;/* TIO write reqst in wrong state machine */ + + u32 ds_rxwchan_wait; /* DS waiting for RX wait channel */ + u32 ds_rxwchan_avail; /* DS RX wait channel available */ + u32 ds_unaligned_rd; /* DS unaligned read */ + u32 ds_rdcomp_invalid_sm; /* DS read completed in wrong state machine */ + u32 ds_wrcomp_invalid_sm; /* DS write completed in wrong state machine */ + u32 ds_flush_req; /* DS flush requested */ + u32 ds_flush_comp; /* DS flush completed */ + u32 ds_xfrdy_exp; /* DS XFER_RDY expired */ + u32 ds_seq_cnt_err; /* DS seq cnt error */ + u32 ds_seq_len_err; /* DS seq len error */ + u32 ds_data_oor; /* DS data out of order */ + u32 ds_hit_bls; /* DS hit BLS */ + u32 ds_edtov_timer_exp; /* DS edtov expired */ + u32 ds_cpu_owned; /* DS cpu owned */ + u32 ds_hit_class2; /* DS hit class2 */ + u32 ds_length_err; /* DS length error */ + u32 ds_ro_ooo_err; /* DS relative offset out-of-order error */ + u32 ds_rectov_timer_exp; /* DS rectov expired */ + u32 ds_unexp_fr_err; /* DS unexp frame error */ +}; + +/* + * IOC firmware IO stats + */ +struct bfa_fw_io_stats_s { + struct bfa_fw_ioim_stats_s ioim_stats; + struct bfa_fw_tio_stats_s tio_stats; +}; + /* * IOC port firmware stats */ @@ -205,6 +256,7 @@ struct bfa_fw_port_lksm_stats_s { u32 nos_tx; /* No. of times NOS tx started */ u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */ u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */ + u32 bbsc_lr; /* LKSM LR tx for credit recovery */ }; struct bfa_fw_port_snsm_stats_s { @@ -216,6 +268,7 @@ struct bfa_fw_port_snsm_stats_s { u32 error_resets; /* error resets initiated by upsm */ u32 sync_lost; /* Sync loss count */ u32 sig_lost; /* Signal loss count */ + u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */ }; struct bfa_fw_port_physm_stats_s { @@ -266,8 +319,8 @@ struct bfa_fw_fcoe_stats_s { * IOC firmware FCoE port stats */ struct bfa_fw_fcoe_port_stats_s { - struct bfa_fw_fcoe_stats_s fcoe_stats; - struct bfa_fw_fip_stats_s fip_stats; + struct bfa_fw_fcoe_stats_s fcoe_stats; + struct bfa_fw_fip_stats_s fip_stats; }; /* @@ -416,6 +469,7 @@ struct bfa_fw_stats_s { * QoS states */ enum bfa_qos_state { + BFA_QOS_DISABLED = 0, /* QoS is disabled */ BFA_QOS_ONLINE = 1, /* QoS is online */ BFA_QOS_OFFLINE = 2, /* QoS is offline */ }; @@ -618,6 +672,12 @@ struct bfa_itnim_iostats_s { u32 tm_iocdowns; /* TM cleaned-up due to IOC down */ u32 tm_cleanups; /* TM cleanup requests */ u32 tm_cleanup_comps; /* TM cleanup completions */ + u32 lm_lun_across_sg; /* LM lun is across sg data buf */ + u32 lm_lun_not_sup; /* LM lun not supported */ + u32 lm_rpl_data_changed; /* LM report-lun data changed */ + u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */ + u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */ + u32 lm_lun_not_rdy; /* LM lun not ready */ }; /* Modify char* port_stt[] in bfal_port.c if a new state was added */ @@ -636,6 +696,7 @@ enum bfa_port_states { BFA_PORT_ST_FWMISMATCH = 12, BFA_PORT_ST_PREBOOT_DISABLED = 13, BFA_PORT_ST_TOGGLING_QWAIT = 14, + BFA_PORT_ST_ACQ_ADDR = 15, BFA_PORT_ST_MAX_STATE, }; @@ -732,8 +793,51 @@ enum bfa_port_linkstate_rsn { CEE_ISCSI_PRI_PFC_OFF = 42, CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43 }; + +#define MAX_LUN_MASK_CFG 16 + +/* + * Initially flash content may be fff. On making LUN mask enable and disable + * state chnage. when report lun command is being processed it goes from + * BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to + * BFA_LUN_MASK_ACTIVE. + */ +enum bfa_ioim_lun_mask_state_s { + BFA_IOIM_LUN_MASK_INACTIVE = 0, + BFA_IOIM_LUN_MASK_ACTIVE = 1, + BFA_IOIM_LUN_MASK_FETCHED = 2, +}; + +enum bfa_lunmask_state_s { + BFA_LUNMASK_DISABLED = 0x00, + BFA_LUNMASK_ENABLED = 0x01, + BFA_LUNMASK_MINCFG = 0x02, + BFA_LUNMASK_UNINITIALIZED = 0xff, +}; + #pragma pack(1) /* + * LUN mask configuration + */ +struct bfa_lun_mask_s { + wwn_t lp_wwn; + wwn_t rp_wwn; + struct scsi_lun lun; + u8 ua; + u8 rsvd[3]; + u16 rp_tag; + u8 lp_tag; + u8 state; +}; + +#define MAX_LUN_MASK_CFG 16 +struct bfa_lunmask_cfg_s { + u32 status; + u32 rsvd; + struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG]; +}; + +/* * Physical port configuration */ struct bfa_port_cfg_s { @@ -748,6 +852,10 @@ struct bfa_port_cfg_s { u8 tx_bbcredit; /* transmit buffer credits */ u8 ratelimit; /* ratelimit enabled or not */ u8 trl_def_speed; /* ratelimit default speed */ + u8 bb_scn; /* BB_SCN value from FLOGI Exchg */ + u8 bb_scn_state; /* Config state of BB_SCN */ + u8 faa_state; /* FAA enabled/disabled */ + u8 rsvd[1]; u16 path_tov; /* device path timeout */ u16 q_depth; /* SCSI Queue depth */ }; @@ -783,7 +891,7 @@ struct bfa_port_attr_s { enum bfa_port_topology topology; /* current topology */ bfa_boolean_t beacon; /* current beacon status */ bfa_boolean_t link_e2e_beacon; /* link beacon is on */ - bfa_boolean_t plog_enabled; /* portlog is enabled */ + bfa_boolean_t bbsc_op_status; /* fc credit recovery oper state */ /* * Dynamic field - info from FCS @@ -792,12 +900,10 @@ struct bfa_port_attr_s { enum bfa_port_type port_type; /* current topology */ u32 loopback; /* external loopback */ u32 authfail; /* auth fail state */ - bfa_boolean_t io_profile; /* get it from fcpim mod */ - u8 pad[4]; /* for 64-bit alignement */ /* FCoE specific */ u16 fcoe_vlan; - u8 rsvd1[6]; + u8 rsvd1[2]; }; /* @@ -988,6 +1094,19 @@ struct bfa_itnim_ioprofile_s { }; /* + * vHBA port attribute values. + */ +struct bfa_vhba_attr_s { + wwn_t nwwn; /* node wwn */ + wwn_t pwwn; /* port wwn */ + u32 pid; /* port ID */ + bfa_boolean_t io_profile; /* get it from fcpim mod */ + bfa_boolean_t plog_enabled; /* portlog is enabled */ + u16 path_tov; + u8 rsvd[2]; +}; + +/* * FC physical port statistics. */ struct bfa_port_fc_stats_s { @@ -1020,6 +1139,9 @@ struct bfa_port_fc_stats_s { u64 bad_os_count; /* Invalid ordered sets */ u64 err_enc_out; /* Encoding err nonframe_8b10b */ u64 err_enc; /* Encoding err frame_8b10b */ + u64 bbsc_frames_lost; /* Credit Recovery-Frames Lost */ + u64 bbsc_credits_lost; /* Credit Recovery-Credits Lost */ + u64 bbsc_link_resets; /* Credit Recovery-Link Resets */ }; /* @@ -1078,4 +1200,131 @@ union bfa_port_stats_u { struct bfa_port_eth_stats_s eth; }; +struct bfa_port_cfg_mode_s { + u16 max_pf; + u16 max_vf; + enum bfa_mode_s mode; +}; + +#pragma pack(1) + +#define BFA_CEE_LLDP_MAX_STRING_LEN (128) +#define BFA_CEE_DCBX_MAX_PRIORITY (8) +#define BFA_CEE_DCBX_MAX_PGID (8) + +struct bfa_cee_lldp_str_s { + u8 sub_type; + u8 len; + u8 rsvd[2]; + u8 value[BFA_CEE_LLDP_MAX_STRING_LEN]; +}; + +struct bfa_cee_lldp_cfg_s { + struct bfa_cee_lldp_str_s chassis_id; + struct bfa_cee_lldp_str_s port_id; + struct bfa_cee_lldp_str_s port_desc; + struct bfa_cee_lldp_str_s sys_name; + struct bfa_cee_lldp_str_s sys_desc; + struct bfa_cee_lldp_str_s mgmt_addr; + u16 time_to_live; + u16 enabled_system_cap; +}; + +/* CEE/DCBX parameters */ +struct bfa_cee_dcbx_cfg_s { + u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY]; + u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID]; + u8 pfc_primap; /* bitmap of priorties with PFC enabled */ + u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */ + u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */ + u8 dcbx_version; /* operating version:CEE or preCEE */ + u8 lls_fcoe; /* FCoE Logical Link Status */ + u8 lls_lan; /* LAN Logical Link Status */ + u8 rsvd[2]; +}; + +/* CEE Query */ +struct bfa_cee_attr_s { + u8 cee_status; + u8 error_reason; + struct bfa_cee_lldp_cfg_s lldp_remote; + struct bfa_cee_dcbx_cfg_s dcbx_remote; + mac_t src_mac; + u8 link_speed; + u8 nw_priority; + u8 filler[2]; +}; + +/* LLDP/DCBX/CEE Statistics */ +struct bfa_cee_stats_s { + u32 lldp_tx_frames; /* LLDP Tx Frames */ + u32 lldp_rx_frames; /* LLDP Rx Frames */ + u32 lldp_rx_frames_invalid; /* LLDP Rx Frames invalid */ + u32 lldp_rx_frames_new; /* LLDP Rx Frames new */ + u32 lldp_tlvs_unrecognized; /* LLDP Rx unrecog. TLVs */ + u32 lldp_rx_shutdown_tlvs; /* LLDP Rx shutdown TLVs */ + u32 lldp_info_aged_out; /* LLDP remote info aged */ + u32 dcbx_phylink_ups; /* DCBX phy link ups */ + u32 dcbx_phylink_downs; /* DCBX phy link downs */ + u32 dcbx_rx_tlvs; /* DCBX Rx TLVs */ + u32 dcbx_rx_tlvs_invalid; /* DCBX Rx TLVs invalid */ + u32 dcbx_control_tlv_error; /* DCBX control TLV errors */ + u32 dcbx_feature_tlv_error; /* DCBX feature TLV errors */ + u32 dcbx_cee_cfg_new; /* DCBX new CEE cfg rcvd */ + u32 cee_status_down; /* DCB status down */ + u32 cee_status_up; /* DCB status up */ + u32 cee_hw_cfg_changed; /* DCB hw cfg changed */ + u32 cee_rx_invalid_cfg; /* DCB invalid cfg */ +}; + +#pragma pack() + +/* + * AEN related definitions + */ +#define BFAD_NL_VENDOR_ID (((u64)0x01 << SCSI_NL_VID_TYPE_SHIFT) \ + | BFA_PCI_VENDOR_ID_BROCADE) + +/* BFA remote port events */ +enum bfa_rport_aen_event { + BFA_RPORT_AEN_ONLINE = 1, /* RPort online event */ + BFA_RPORT_AEN_OFFLINE = 2, /* RPort offline event */ + BFA_RPORT_AEN_DISCONNECT = 3, /* RPort disconnect event */ + BFA_RPORT_AEN_QOS_PRIO = 4, /* QOS priority change event */ + BFA_RPORT_AEN_QOS_FLOWID = 5, /* QOS flow Id change event */ +}; + +struct bfa_rport_aen_data_s { + u16 vf_id; /* vf_id of this logical port */ + u16 rsvd[3]; + wwn_t ppwwn; /* WWN of its physical port */ + wwn_t lpwwn; /* WWN of this logical port */ + wwn_t rpwwn; /* WWN of this remote port */ + union { + struct bfa_rport_qos_attr_s qos; + } priv; +}; + +union bfa_aen_data_u { + struct bfa_adapter_aen_data_s adapter; + struct bfa_port_aen_data_s port; + struct bfa_lport_aen_data_s lport; + struct bfa_rport_aen_data_s rport; + struct bfa_itnim_aen_data_s itnim; + struct bfa_audit_aen_data_s audit; + struct bfa_ioc_aen_data_s ioc; +}; + +#define BFA_AEN_MAX_ENTRY 512 + +struct bfa_aen_entry_s { + struct list_head qe; + enum bfa_aen_category aen_category; + u32 aen_type; + union bfa_aen_data_u aen_data; + struct timeval aen_tv; + u32 seq_num; + u32 bfad_num; +}; + #endif /* __BFA_DEFS_SVC_H__ */ diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h index bf0067e0fd0..50b6a1c8619 100644 --- a/drivers/scsi/bfa/bfa_fc.h +++ b/drivers/scsi/bfa/bfa_fc.h @@ -56,6 +56,161 @@ struct scsi_cdb_s { #define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */ +#define SCSI_SENSE_CUR_ERR 0x70 +#define SCSI_SENSE_DEF_ERR 0x71 + +/* + * SCSI additional sense codes + */ +#define SCSI_ASC_LUN_NOT_READY 0x04 +#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25 +#define SCSI_ASC_TOCC 0x3F + +/* + * SCSI additional sense code qualifiers + */ +#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */ +#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */ + +/* + * Methods of reporting informational exceptions + */ +#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */ + +struct scsi_report_luns_data_s { + u32 lun_list_length; /* length of LUN list length */ + u32 reserved; + struct scsi_lun lun[1]; /* first LUN in lun list */ +}; + +struct scsi_inquiry_vendor_s { + u8 vendor_id[8]; +}; + +struct scsi_inquiry_prodid_s { + u8 product_id[16]; +}; + +struct scsi_inquiry_prodrev_s { + u8 product_rev[4]; +}; + +struct scsi_inquiry_data_s { +#ifdef __BIG_ENDIAN + u8 peripheral_qual:3; /* peripheral qualifier */ + u8 device_type:5; /* peripheral device type */ + u8 rmb:1; /* removable medium bit */ + u8 device_type_mod:7; /* device type modifier */ + u8 version; + u8 aenc:1; /* async evt notification capability */ + u8 trm_iop:1; /* terminate I/O process */ + u8 norm_aca:1; /* normal ACA supported */ + u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */ + u8 rsp_data_format:4; + u8 additional_len; + u8 sccs:1; + u8 reserved1:7; + u8 reserved2:1; + u8 enc_serv:1; /* enclosure service component */ + u8 reserved3:1; + u8 multi_port:1; /* multi-port device */ + u8 m_chngr:1; /* device in medium transport element */ + u8 ack_req_q:1; /* SIP specific bit */ + u8 addr32:1; /* SIP specific bit */ + u8 addr16:1; /* SIP specific bit */ + u8 rel_adr:1; /* relative address */ + u8 w_bus32:1; + u8 w_bus16:1; + u8 synchronous:1; + u8 linked_commands:1; + u8 trans_dis:1; + u8 cmd_queue:1; /* command queueing supported */ + u8 soft_reset:1; /* soft reset alternative (VS) */ +#else + u8 device_type:5; /* peripheral device type */ + u8 peripheral_qual:3; /* peripheral qualifier */ + u8 device_type_mod:7; /* device type modifier */ + u8 rmb:1; /* removable medium bit */ + u8 version; + u8 rsp_data_format:4; + u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */ + u8 norm_aca:1; /* normal ACA supported */ + u8 terminate_iop:1;/* terminate I/O process */ + u8 aenc:1; /* async evt notification capability */ + u8 additional_len; + u8 reserved1:7; + u8 sccs:1; + u8 addr16:1; /* SIP specific bit */ + u8 addr32:1; /* SIP specific bit */ + u8 ack_req_q:1; /* SIP specific bit */ + u8 m_chngr:1; /* device in medium transport element */ + u8 multi_port:1; /* multi-port device */ + u8 reserved3:1; /* TBD - Vendor Specific */ + u8 enc_serv:1; /* enclosure service component */ + u8 reserved2:1; + u8 soft_seset:1; /* soft reset alternative (VS) */ + u8 cmd_queue:1; /* command queueing supported */ + u8 trans_dis:1; + u8 linked_commands:1; + u8 synchronous:1; + u8 w_bus16:1; + u8 w_bus32:1; + u8 rel_adr:1; /* relative address */ +#endif + struct scsi_inquiry_vendor_s vendor_id; + struct scsi_inquiry_prodid_s product_id; + struct scsi_inquiry_prodrev_s product_rev; + u8 vendor_specific[20]; + u8 reserved4[40]; +}; + +/* + * SCSI sense data format + */ +struct scsi_sense_s { +#ifdef __BIG_ENDIAN + u8 valid:1; + u8 rsp_code:7; +#else + u8 rsp_code:7; + u8 valid:1; +#endif + u8 seg_num; +#ifdef __BIG_ENDIAN + u8 file_mark:1; + u8 eom:1; /* end of media */ + u8 ili:1; /* incorrect length indicator */ + u8 reserved:1; + u8 sense_key:4; +#else + u8 sense_key:4; + u8 reserved:1; + u8 ili:1; /* incorrect length indicator */ + u8 eom:1; /* end of media */ + u8 file_mark:1; +#endif + u8 information[4]; /* device-type or cmd specific info */ + u8 add_sense_length; /* additional sense length */ + u8 command_info[4];/* command specific information */ + u8 asc; /* additional sense code */ + u8 ascq; /* additional sense code qualifier */ + u8 fru_code; /* field replaceable unit code */ +#ifdef __BIG_ENDIAN + u8 sksv:1; /* sense key specific valid */ + u8 c_d:1; /* command/data bit */ + u8 res1:2; + u8 bpv:1; /* bit pointer valid */ + u8 bpointer:3; /* bit pointer */ +#else + u8 bpointer:3; /* bit pointer */ + u8 bpv:1; /* bit pointer valid */ + u8 res1:2; + u8 c_d:1; /* command/data bit */ + u8 sksv:1; /* sense key specific valid */ +#endif + u8 fpointer[2]; /* field pointer */ +}; + /* * Fibre Channel Header Structure (FCHS) definition */ @@ -1021,7 +1176,7 @@ struct fc_symname_s { #define FC_ED_TOV 2 #define FC_REC_TOV (FC_ED_TOV + 1) #define FC_RA_TOV 10 -#define FC_ELS_TOV (2 * FC_RA_TOV) +#define FC_ELS_TOV ((2 * FC_RA_TOV) + 1) #define FC_FCCT_TOV (3 * FC_RA_TOV) /* @@ -1049,15 +1204,6 @@ struct fc_vft_s { }; /* - * FCP - */ -enum { - FCP_RJT = 0x01000000, /* SRR reject */ - FCP_SRR_ACCEPT = 0x02000000, /* SRR accept */ - FCP_SRR = 0x14000000, /* Sequence Retransmission Request */ -}; - -/* * FCP_CMND definitions */ #define FCP_CMND_CDB_LEN 16 diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c index b7e25345165..17b59b8b564 100644 --- a/drivers/scsi/bfa/bfa_fcbuild.c +++ b/drivers/scsi/bfa/bfa_fcbuild.c @@ -94,7 +94,6 @@ fcbuild_init(void) */ plogi_tmpl.csp.verhi = FC_PH_VER_PH_3; plogi_tmpl.csp.verlo = FC_PH_VER_4_3; - plogi_tmpl.csp.bbcred = cpu_to_be16(0x0004); plogi_tmpl.csp.ciro = 0x1; plogi_tmpl.csp.cisc = 0x0; plogi_tmpl.csp.altbbcred = 0x0; @@ -156,6 +155,22 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id) */ } +static void +fc_gsresp_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) +{ + memset(fchs, 0, sizeof(struct fchs_s)); + + fchs->routing = FC_RTG_FC4_DEV_DATA; + fchs->cat_info = FC_CAT_SOLICIT_CTRL; + fchs->type = FC_TYPE_SERVICES; + fchs->f_ctl = + bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | + FCTL_END_SEQ | FCTL_SI_XFER); + fchs->d_id = d_id; + fchs->s_id = s_id; + fchs->ox_id = ox_id; +} + void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) { @@ -207,7 +222,7 @@ fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) static u16 fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size, u8 els_code) + u16 pdu_size, u16 bb_cr, u8 els_code) { struct fc_logi_s *plogi = (struct fc_logi_s *) (pld); @@ -220,6 +235,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, fc_els_rsp_build(fchs, d_id, s_id, ox_id); plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size); + plogi->csp.bbcred = cpu_to_be16(bb_cr); memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); @@ -268,15 +284,17 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size, u16 local_bb_credits) + u16 pdu_size, u16 local_bb_credits, u8 bb_scn) { u32 d_id = 0; + u16 bbscn_rxsz = (bb_scn << 12) | pdu_size; memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); fc_els_rsp_build(fchs, d_id, s_id, ox_id); flogi->els_cmd.els_code = FC_ELS_ACC; - flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); + flogi->class3.rxsz = cpu_to_be16(pdu_size); + flogi->csp.rxsz = cpu_to_be16(bbscn_rxsz); /* bb_scn/rxsz */ flogi->port_name = port_name; flogi->node_name = node_name; @@ -306,19 +324,19 @@ fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size) + u16 pdu_size, u16 bb_cr) { return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name, - node_name, pdu_size, FC_ELS_PLOGI); + node_name, pdu_size, bb_cr, FC_ELS_PLOGI); } u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size) + u16 pdu_size, u16 bb_cr) { return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name, - node_name, pdu_size, FC_ELS_ACC); + node_name, pdu_size, bb_cr, FC_ELS_ACC); } enum fc_parse_status @@ -1096,6 +1114,21 @@ fc_ct_rsp_parse(struct ct_hdr_s *cthdr) } u16 +fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr, + u32 d_id, u32 s_id, u16 ox_id, u8 reason_code, + u8 reason_code_expl) +{ + fc_gsresp_fchdr_build(fchs, d_id, s_id, ox_id); + + cthdr->cmd_rsp_code = cpu_to_be16(CT_RSP_REJECT); + cthdr->rev_id = CT_GS3_REVISION; + + cthdr->reason_code = reason_code; + cthdr->exp_code = reason_code_expl; + return sizeof(struct ct_hdr_s); +} + +u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg, u32 s_id, u16 ox_id) { diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h index ece51ec7620..42cd9d4da69 100644 --- a/drivers/scsi/bfa/bfa_fcbuild.h +++ b/drivers/scsi/bfa/bfa_fcbuild.h @@ -66,6 +66,9 @@ fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed speed) case RPSC_OP_SPEED_8G: return BFA_PORT_SPEED_8GBPS; + case RPSC_OP_SPEED_16G: + return BFA_PORT_SPEED_16GBPS; + case RPSC_OP_SPEED_10G: return BFA_PORT_SPEED_10GBPS; @@ -94,6 +97,9 @@ fc_bfa_speed_to_rpsc_operspeed(enum bfa_port_speed op_speed) case BFA_PORT_SPEED_8GBPS: return RPSC_OP_SPEED_8G; + case BFA_PORT_SPEED_16GBPS: + return RPSC_OP_SPEED_16G; + case BFA_PORT_SPEED_10GBPS: return RPSC_OP_SPEED_10G; @@ -141,11 +147,11 @@ u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, - u16 local_bb_credits); + u16 local_bb_credits, u8 bb_scn); u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name, - wwn_t node_name, u16 pdu_size); + wwn_t node_name, u16 pdu_size, u16 bb_cr); enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs); @@ -177,13 +183,17 @@ u16 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id, u16 ox_id, u32 port_id); +u16 fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr, + u32 d_id, u32 s_id, u16 ox_id, + u8 reason_code, u8 reason_code_expl); + u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg, u32 s_id, u16 ox_id); u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size); + u16 pdu_size, u16 bb_cr); u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name, diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index c0353cdca92..e07bd4745d8 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -19,12 +19,14 @@ #include "bfa_modules.h" BFA_TRC_FILE(HAL, FCPIM); -BFA_MODULE(fcpim); /* * BFA ITNIM Related definitions */ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); +static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim); +static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim); +static void bfa_ioim_lm_init(struct bfa_s *bfa); #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1)))) @@ -58,6 +60,14 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); } \ } while (0) +#define bfa_ioim_rp_wwn(__ioim) \ + (((struct bfa_fcs_rport_s *) \ + (__ioim)->itnim->rport->rport_drv)->pwwn) + +#define bfa_ioim_lp_wwn(__ioim) \ + ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \ + (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \ + #define bfa_itnim_sler_cb(__itnim) do { \ if ((__itnim)->bfa->fcs) \ bfa_cb_itnim_sler((__itnim)->ditn); \ @@ -67,6 +77,18 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); } \ } while (0) +enum bfa_ioim_lm_status { + BFA_IOIM_LM_PRESENT = 1, + BFA_IOIM_LM_LUN_NOT_SUP = 2, + BFA_IOIM_LM_RPL_DATA_CHANGED = 3, + BFA_IOIM_LM_LUN_NOT_RDY = 4, +}; + +enum bfa_ioim_lm_ua_status { + BFA_IOIM_LM_UA_RESET = 0, + BFA_IOIM_LM_UA_SET = 1, +}; + /* * itnim state machine event */ @@ -123,6 +145,9 @@ enum bfa_ioim_event { BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */ BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */ BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */ + BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */ + BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */ + BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */ }; @@ -220,6 +245,9 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); +static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete); /* * forward declaration of BFA IO state machine @@ -287,24 +315,16 @@ static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, * Compute and return memory needed by FCP(im) module. */ static void -bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, - u32 *dm_len) +bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len) { - bfa_itnim_meminfo(cfg, km_len, dm_len); + bfa_itnim_meminfo(cfg, km_len); /* * IO memory */ - if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN) - cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; - else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX) - cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX; - *km_len += cfg->fwcfg.num_ioim_reqs * (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s)); - *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN; - /* * task management command memory */ @@ -315,52 +335,41 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, static void -bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) +bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad, + struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfa_fcpim_s *fcpim = &fcp->fcpim; + struct bfa_s *bfa = fcp->bfa; bfa_trc(bfa, cfg->drvcfg.path_tov); bfa_trc(bfa, cfg->fwcfg.num_rports); bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs); bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs); + fcpim->fcp = fcp; fcpim->bfa = bfa; fcpim->num_itnims = cfg->fwcfg.num_rports; - fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs; fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs; fcpim->path_tov = cfg->drvcfg.path_tov; fcpim->delay_comp = cfg->drvcfg.delay_comp; fcpim->profile_comp = NULL; fcpim->profile_start = NULL; - bfa_itnim_attach(fcpim, meminfo); - bfa_tskim_attach(fcpim, meminfo); - bfa_ioim_attach(fcpim, meminfo); + bfa_itnim_attach(fcpim); + bfa_tskim_attach(fcpim); + bfa_ioim_attach(fcpim); } static void -bfa_fcpim_detach(struct bfa_s *bfa) +bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp) { -} - -static void -bfa_fcpim_start(struct bfa_s *bfa) -{ -} - -static void -bfa_fcpim_stop(struct bfa_s *bfa) -{ -} - -static void -bfa_fcpim_iocdisable(struct bfa_s *bfa) -{ - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfa_fcpim_s *fcpim = &fcp->fcpim; struct bfa_itnim_s *itnim; struct list_head *qe, *qen; + /* Enqueue unused ioim resources to free_q */ + list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q); + list_for_each_safe(qe, qen, &fcpim->itnim_q) { itnim = (struct bfa_itnim_s *) qe; bfa_itnim_iocdisable(itnim); @@ -370,7 +379,7 @@ bfa_fcpim_iocdisable(struct bfa_s *bfa) void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov) { - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); fcpim->path_tov = path_tov * 1000; if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX) @@ -380,15 +389,146 @@ bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov) u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa) { - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); return fcpim->path_tov / 1000; } +#define bfa_fcpim_add_iostats(__l, __r, __stats) \ + (__l->__stats += __r->__stats) + +void +bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats, + struct bfa_itnim_iostats_s *rstats) +{ + bfa_fcpim_add_iostats(lstats, rstats, total_ios); + bfa_fcpim_add_iostats(lstats, rstats, qresumes); + bfa_fcpim_add_iostats(lstats, rstats, no_iotags); + bfa_fcpim_add_iostats(lstats, rstats, io_aborts); + bfa_fcpim_add_iostats(lstats, rstats, no_tskims); + bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok); + bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun); + bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun); + bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted); + bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout); + bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort); + bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err); + bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err); + bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed); + bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free); + bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts); + bfa_fcpim_add_iostats(lstats, rstats, iocom_utags); + bfa_fcpim_add_iostats(lstats, rstats, io_cleanups); + bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts); + bfa_fcpim_add_iostats(lstats, rstats, onlines); + bfa_fcpim_add_iostats(lstats, rstats, offlines); + bfa_fcpim_add_iostats(lstats, rstats, creates); + bfa_fcpim_add_iostats(lstats, rstats, deletes); + bfa_fcpim_add_iostats(lstats, rstats, create_comps); + bfa_fcpim_add_iostats(lstats, rstats, delete_comps); + bfa_fcpim_add_iostats(lstats, rstats, sler_events); + bfa_fcpim_add_iostats(lstats, rstats, fw_create); + bfa_fcpim_add_iostats(lstats, rstats, fw_delete); + bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled); + bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps); + bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds); + bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps); + bfa_fcpim_add_iostats(lstats, rstats, tm_success); + bfa_fcpim_add_iostats(lstats, rstats, tm_failures); + bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps); + bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes); + bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns); + bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups); + bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps); + bfa_fcpim_add_iostats(lstats, rstats, io_comps); + bfa_fcpim_add_iostats(lstats, rstats, input_reqs); + bfa_fcpim_add_iostats(lstats, rstats, output_reqs); + bfa_fcpim_add_iostats(lstats, rstats, rd_throughput); + bfa_fcpim_add_iostats(lstats, rstats, wr_throughput); + bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg); + bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup); + bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed); + bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed); + bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue); + bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy); +} + +bfa_status_t +bfa_fcpim_port_iostats(struct bfa_s *bfa, + struct bfa_itnim_iostats_s *stats, u8 lp_tag) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + struct list_head *qe, *qen; + struct bfa_itnim_s *itnim; + + /* accumulate IO stats from itnim */ + memset(stats, 0, sizeof(struct bfa_itnim_iostats_s)); + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + if (itnim->rport->rport_info.lp_tag != lp_tag) + continue; + bfa_fcpim_add_stats(stats, &(itnim->stats)); + } + return BFA_STATUS_OK; +} + +void +bfa_ioim_profile_comp(struct bfa_ioim_s *ioim) +{ + struct bfa_itnim_latency_s *io_lat = + &(ioim->itnim->ioprofile.io_latency); + u32 val, idx; + + val = (u32)(jiffies - ioim->start_time); + idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio)); + bfa_itnim_ioprofile_update(ioim->itnim, idx); + + io_lat->count[idx]++; + io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val; + io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val; + io_lat->avg[idx] += val; +} + +void +bfa_ioim_profile_start(struct bfa_ioim_s *ioim) +{ + ioim->start_time = jiffies; +} + +bfa_status_t +bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time) +{ + struct bfa_itnim_s *itnim; + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + struct list_head *qe, *qen; + + /* accumulate IO stats from itnim */ + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + bfa_itnim_clear_stats(itnim); + } + fcpim->io_profile = BFA_TRUE; + fcpim->io_profile_start_time = time; + fcpim->profile_comp = bfa_ioim_profile_comp; + fcpim->profile_start = bfa_ioim_profile_start; + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcpim_profile_off(struct bfa_s *bfa) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + fcpim->io_profile = BFA_FALSE; + fcpim->io_profile_start_time = 0; + fcpim->profile_comp = NULL; + fcpim->profile_start = NULL; + return BFA_STATUS_OK; +} + u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa) { - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); return fcpim->q_depth; } @@ -990,8 +1130,7 @@ bfa_itnim_tskdone(struct bfa_itnim_s *itnim) } void -bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, - u32 *dm_len) +bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len) { /* * ITN memory @@ -1000,15 +1139,16 @@ bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, } void -bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) +bfa_itnim_attach(struct bfa_fcpim_s *fcpim) { struct bfa_s *bfa = fcpim->bfa; + struct bfa_fcp_mod_s *fcp = fcpim->fcp; struct bfa_itnim_s *itnim; int i, j; INIT_LIST_HEAD(&fcpim->itnim_q); - itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo); + itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp); fcpim->itnim_arr = itnim; for (i = 0; i < fcpim->num_itnims; i++, itnim++) { @@ -1030,7 +1170,7 @@ bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); } - bfa_meminfo_kva(minfo) = (u8 *) itnim; + bfa_mem_kva_curp(fcp) = (u8 *) itnim; } void @@ -1043,7 +1183,7 @@ bfa_itnim_iocdisable(struct bfa_itnim_s *itnim) static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim) { - struct bfi_itnim_create_req_s *m; + struct bfi_itn_create_req_s *m; itnim->msg_no++; @@ -1056,8 +1196,8 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim) return BFA_FALSE; } - bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ, - bfa_lpuid(itnim->bfa)); + bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ, + bfa_fn_lpu(itnim->bfa)); m->fw_handle = itnim->rport->fw_handle; m->class = FC_CLASS_3; m->seq_rec = itnim->seq_rec; @@ -1067,14 +1207,14 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim) /* * queue I/O message to firmware */ - bfa_reqq_produce(itnim->bfa, itnim->reqq); + bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh); return BFA_TRUE; } static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim) { - struct bfi_itnim_delete_req_s *m; + struct bfi_itn_delete_req_s *m; /* * check for room in queue to send request now @@ -1085,15 +1225,15 @@ bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim) return BFA_FALSE; } - bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ, - bfa_lpuid(itnim->bfa)); + bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ, + bfa_fn_lpu(itnim->bfa)); m->fw_handle = itnim->rport->fw_handle; bfa_stats(itnim, fw_delete); /* * queue I/O message to firmware */ - bfa_reqq_produce(itnim->bfa, itnim->reqq); + bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh); return BFA_TRUE; } @@ -1224,7 +1364,7 @@ bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim) static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim) { - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa); + struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa); fcpim->del_itn_stats.del_itn_iocomp_aborted += itnim->stats.iocomp_aborted; fcpim->del_itn_stats.del_itn_iocomp_timedout += @@ -1250,8 +1390,8 @@ bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim) void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) { - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); - union bfi_itnim_i2h_msg_u msg; + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + union bfi_itn_i2h_msg_u msg; struct bfa_itnim_s *itnim; bfa_trc(bfa, m->mhdr.msg_id); @@ -1259,7 +1399,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) msg.msg = m; switch (m->mhdr.msg_id) { - case BFI_ITNIM_I2H_CREATE_RSP: + case BFI_ITN_I2H_CREATE_RSP: itnim = BFA_ITNIM_FROM_TAG(fcpim, msg.create_rsp->bfa_handle); WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); @@ -1267,7 +1407,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); break; - case BFI_ITNIM_I2H_DELETE_RSP: + case BFI_ITN_I2H_DELETE_RSP: itnim = BFA_ITNIM_FROM_TAG(fcpim, msg.delete_rsp->bfa_handle); WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); @@ -1275,7 +1415,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); break; - case BFI_ITNIM_I2H_SLER_EVENT: + case BFI_ITN_I2H_SLER_EVENT: itnim = BFA_ITNIM_FROM_TAG(fcpim, msg.sler_event->bfa_handle); bfa_stats(itnim, sler_events); @@ -1295,9 +1435,11 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) struct bfa_itnim_s * bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn) { - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct bfa_itnim_s *itnim; + bfa_itn_create(bfa, rport, bfa_itnim_isr); + itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag); WARN_ON(itnim->rport != rport); @@ -1347,6 +1489,26 @@ bfa_itnim_hold_io(struct bfa_itnim_s *itnim) bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)); } +#define bfa_io_lat_clock_res_div HZ +#define bfa_io_lat_clock_res_mul 1000 +bfa_status_t +bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim, + struct bfa_itnim_ioprofile_s *ioprofile) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa); + if (!fcpim->io_profile) + return BFA_STATUS_IOPROFILE_OFF; + + itnim->ioprofile.index = BFA_IOBUCKET_MAX; + itnim->ioprofile.io_profile_start_time = + bfa_io_profile_start_time(itnim->bfa); + itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul; + itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div; + *ioprofile = itnim->ioprofile; + + return BFA_STATUS_OK; +} + void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) { @@ -1415,7 +1577,28 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, - __bfa_cb_ioim_abort, ioim); + __bfa_cb_ioim_abort, ioim); + break; + + case BFA_IOIM_SM_LM_LUN_NOT_SUP: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, + __bfa_cb_ioim_lm_lun_not_sup, ioim); + break; + + case BFA_IOIM_SM_LM_RPL_DC: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, + __bfa_cb_ioim_lm_rpl_dc, ioim); + break; + + case BFA_IOIM_SM_LM_LUN_NOT_RDY: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, + __bfa_cb_ioim_lm_lun_not_rdy, ioim); break; default: @@ -1955,6 +2138,264 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) } } +/* + * This is called from bfa_fcpim_start after the bfa_init() with flash read + * is complete by driver. now invalidate the stale content of lun mask + * like unit attention, rp tag and lp tag. + */ +static void +bfa_ioim_lm_init(struct bfa_s *bfa) +{ + struct bfa_lun_mask_s *lunm_list; + int i; + + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) + return; + + lunm_list = bfa_get_lun_mask_list(bfa); + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + lunm_list[i].ua = BFA_IOIM_LM_UA_RESET; + lunm_list[i].lp_tag = BFA_LP_TAG_INVALID; + lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID; + } +} + +/* + * Validate LUN for LUN masking + */ +static enum bfa_ioim_lm_status +bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps, + struct bfa_rport_s *rp, struct scsi_lun lun) +{ + u8 i; + struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa); + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio; + struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd; + + if ((cdb->scsi_cdb[0] == REPORT_LUNS) && + (scsilun_to_int((struct scsi_lun *)&lun) == 0)) { + ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data; + return BFA_IOIM_LM_PRESENT; + } + + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + + if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE) + continue; + + if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) == + scsilun_to_int((struct scsi_lun *)&lun)) + && (rp->rport_tag == lun_list[i].rp_tag) + && ((u8)ioim->itnim->rport->rport_info.lp_tag == + lun_list[i].lp_tag)) { + bfa_trc(ioim->bfa, lun_list[i].rp_tag); + bfa_trc(ioim->bfa, lun_list[i].lp_tag); + bfa_trc(ioim->bfa, scsilun_to_int( + (struct scsi_lun *)&lun_list[i].lun)); + + if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) && + ((cdb->scsi_cdb[0] != INQUIRY) || + (cdb->scsi_cdb[0] != REPORT_LUNS))) { + lun_list[i].ua = BFA_IOIM_LM_UA_RESET; + return BFA_IOIM_LM_RPL_DATA_CHANGED; + } + + if (cdb->scsi_cdb[0] == REPORT_LUNS) + ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data; + + return BFA_IOIM_LM_PRESENT; + } + } + + if ((cdb->scsi_cdb[0] == INQUIRY) && + (scsilun_to_int((struct scsi_lun *)&lun) == 0)) { + ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data; + return BFA_IOIM_LM_PRESENT; + } + + if (cdb->scsi_cdb[0] == TEST_UNIT_READY) + return BFA_IOIM_LM_LUN_NOT_RDY; + + return BFA_IOIM_LM_LUN_NOT_SUP; +} + +static bfa_boolean_t +bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim) +{ + return BFA_TRUE; +} + +static void +bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset, + int buf_lun_cnt) +{ + struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa); + struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset); + struct scsi_lun lun; + int i, j; + + bfa_trc(ioim->bfa, buf_lun_cnt); + for (j = 0; j < buf_lun_cnt; j++) { + lun = *((struct scsi_lun *)(lun_data + j)); + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE) + continue; + if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) && + (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) && + (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) + == scsilun_to_int((struct scsi_lun *)&lun))) { + lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED; + break; + } + } /* next lun in mask DB */ + } /* next lun in buf */ +} + +static int +bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen, + struct scsi_report_luns_data_s *rl) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio; + struct scatterlist *sg = scsi_sglist(cmnd); + struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa); + struct scsi_lun *prev_rl_data = NULL, *base_rl_data; + int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count; + int lun_across_sg_bytes, bytes_from_next_buf; + u64 last_lun, temp_last_lun; + + /* fetch luns from the first sg element */ + bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0, + (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1); + + /* fetch luns from multiple sg elements */ + scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) { + if (sgeid == 0) { + prev_sg_len = sg_dma_len(sg); + prev_rl_data = (struct scsi_lun *) + phys_to_virt(sg_dma_address(sg)); + continue; + } + + /* if the buf is having more data */ + lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun); + if (lun_across_sg_bytes) { + bfa_trc(ioim->bfa, lun_across_sg_bytes); + bfa_stats(ioim->itnim, lm_lun_across_sg); + bytes_from_next_buf = sizeof(struct scsi_lun) - + lun_across_sg_bytes; + + /* from next buf take higher bytes */ + temp_last_lun = *((u64 *) + phys_to_virt(sg_dma_address(sg))); + last_lun |= temp_last_lun >> + (lun_across_sg_bytes * BITS_PER_BYTE); + + /* from prev buf take higher bytes */ + temp_last_lun = *((u64 *)(prev_rl_data + + (prev_sg_len - lun_across_sg_bytes))); + temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE; + last_lun = last_lun | (temp_last_lun << + (bytes_from_next_buf * BITS_PER_BYTE)); + + bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1); + } else + bytes_from_next_buf = 0; + + *pgdlen += sg_dma_len(sg); + prev_sg_len = sg_dma_len(sg); + prev_rl_data = (struct scsi_lun *) + phys_to_virt(sg_dma_address(sg)); + bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data, + bytes_from_next_buf, + sg_dma_len(sg) / sizeof(struct scsi_lun)); + } + + /* update the report luns data - based on fetched luns */ + sg = scsi_sglist(cmnd); + base_rl_data = (struct scsi_lun *)rl->lun; + base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1; + for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) { + if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) { + base_rl_data[j] = lun_list[i].lun; + lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE; + j++; + lun_fetched_cnt++; + } + + if (j > base_count) { + j = 0; + sg = sg_next(sg); + base_rl_data = (struct scsi_lun *) + phys_to_virt(sg_dma_address(sg)); + base_count = sg_dma_len(sg) / sizeof(struct scsi_lun); + } + } + + bfa_trc(ioim->bfa, lun_fetched_cnt); + return lun_fetched_cnt; +} + +static bfa_boolean_t +bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim) +{ + struct scsi_inquiry_data_s *inq; + struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio); + + ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; + inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg)); + + bfa_trc(ioim->bfa, inq->device_type); + inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON; + return 0; +} + +static bfa_boolean_t +bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio; + struct scatterlist *sg = scsi_sglist(cmnd); + struct bfi_ioim_rsp_s *m; + struct scsi_report_luns_data_s *rl = NULL; + int lun_count = 0, lun_fetched_cnt = 0; + u32 residue, pgdlen = 0; + + ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; + if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED) + return BFA_TRUE; + + m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg; + if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION) + return BFA_TRUE; + + pgdlen = sg_dma_len(sg); + bfa_trc(ioim->bfa, pgdlen); + rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg)); + lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun); + lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl); + + if (lun_count == lun_fetched_cnt) + return BFA_TRUE; + + bfa_trc(ioim->bfa, lun_count); + bfa_trc(ioim->bfa, lun_fetched_cnt); + bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length)); + + if (be32_to_cpu(rl->lun_list_length) <= pgdlen) + rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) * + sizeof(struct scsi_lun); + else + bfa_stats(ioim->itnim, lm_small_buf_addresidue); + + bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length)); + bfa_trc(ioim->bfa, be32_to_cpu(m->residue)); + + residue = be32_to_cpu(m->residue); + residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun); + bfa_stats(ioim->itnim, lm_wire_residue_changed); + m->residue = be32_to_cpu(residue); + bfa_trc(ioim->bfa, ioim->nsges); + return BFA_FALSE; +} static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) @@ -1991,7 +2432,8 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete) if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) && m->sns_len) { sns_len = m->sns_len; - snsinfo = ioim->iosp->snsinfo; + snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp, + ioim->iotag); } /* @@ -2013,6 +2455,299 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete) } static void +__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_ioim_s *ioim = cbarg; + int sns_len = 0xD; + u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio); + struct scsi_sense_s *snsinfo; + + if (!complete) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); + return; + } + + snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG( + ioim->fcpim->fcp, ioim->iotag); + snsinfo->rsp_code = SCSI_SENSE_CUR_ERR; + snsinfo->add_sense_length = 0xa; + snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED; + snsinfo->sense_key = ILLEGAL_REQUEST; + bfa_trc(ioim->bfa, residue); + bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK, + SCSI_STATUS_CHECK_CONDITION, sns_len, + (u8 *)snsinfo, residue); +} + +static void +__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_ioim_s *ioim = cbarg; + int sns_len = 0xD; + u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio); + struct scsi_sense_s *snsinfo; + + if (!complete) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); + return; + } + + snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp, + ioim->iotag); + snsinfo->rsp_code = SCSI_SENSE_CUR_ERR; + snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN; + snsinfo->asc = SCSI_ASC_TOCC; + snsinfo->add_sense_length = 0x6; + snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED; + bfa_trc(ioim->bfa, residue); + bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK, + SCSI_STATUS_CHECK_CONDITION, sns_len, + (u8 *)snsinfo, residue); +} + +static void +__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_ioim_s *ioim = cbarg; + int sns_len = 0xD; + u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio); + struct scsi_sense_s *snsinfo; + + if (!complete) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); + return; + } + + snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG( + ioim->fcpim->fcp, ioim->iotag); + snsinfo->rsp_code = SCSI_SENSE_CUR_ERR; + snsinfo->add_sense_length = 0xa; + snsinfo->sense_key = NOT_READY; + snsinfo->asc = SCSI_ASC_LUN_NOT_READY; + snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ; + bfa_trc(ioim->bfa, residue); + bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK, + SCSI_STATUS_CHECK_CONDITION, sns_len, + (u8 *)snsinfo, residue); +} + +void +bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn, + u16 rp_tag, u8 lp_tag) +{ + struct bfa_lun_mask_s *lun_list; + u8 i; + + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) + return; + + lun_list = bfa_get_lun_mask_list(bfa); + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) { + if ((lun_list[i].lp_wwn == lp_wwn) && + (lun_list[i].rp_wwn == rp_wwn)) { + lun_list[i].rp_tag = rp_tag; + lun_list[i].lp_tag = lp_tag; + } + } + } +} + +/* + * set UA for all active luns in LM DB + */ +static void +bfa_ioim_lm_set_ua(struct bfa_s *bfa) +{ + struct bfa_lun_mask_s *lunm_list; + int i; + + lunm_list = bfa_get_lun_mask_list(bfa); + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE) + continue; + lunm_list[i].ua = BFA_IOIM_LM_UA_SET; + } +} + +bfa_status_t +bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update) +{ + struct bfa_lunmask_cfg_s *lun_mask; + + bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) + return BFA_STATUS_FAILED; + + if (bfa_get_lun_mask_status(bfa) == update) + return BFA_STATUS_NO_CHANGE; + + lun_mask = bfa_get_lun_mask(bfa); + lun_mask->status = update; + + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) + bfa_ioim_lm_set_ua(bfa); + + return bfa_dconf_update(bfa); +} + +bfa_status_t +bfa_fcpim_lunmask_clear(struct bfa_s *bfa) +{ + int i; + struct bfa_lun_mask_s *lunm_list; + + bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) + return BFA_STATUS_FAILED; + + lunm_list = bfa_get_lun_mask_list(bfa); + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) { + if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) + bfa_rport_unset_lunmask(bfa, + BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag)); + } + } + + memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG); + return bfa_dconf_update(bfa); +} + +bfa_status_t +bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf) +{ + struct bfa_lunmask_cfg_s *lun_mask; + + bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) + return BFA_STATUS_FAILED; + + lun_mask = bfa_get_lun_mask(bfa); + memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s)); + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn, + wwn_t rpwwn, struct scsi_lun lun) +{ + struct bfa_lun_mask_s *lunm_list; + struct bfa_rport_s *rp = NULL; + int i, free_index = MAX_LUN_MASK_CFG + 1; + struct bfa_fcs_lport_s *port = NULL; + struct bfa_fcs_rport_s *rp_fcs; + + bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) + return BFA_STATUS_FAILED; + + port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs, + vf_id, *pwwn); + if (port) { + *pwwn = port->port_cfg.pwwn; + rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn); + rp = rp_fcs->bfa_rport; + } + + lunm_list = bfa_get_lun_mask_list(bfa); + /* if entry exists */ + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE) + free_index = i; + if ((lunm_list[i].lp_wwn == *pwwn) && + (lunm_list[i].rp_wwn == rpwwn) && + (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) == + scsilun_to_int((struct scsi_lun *)&lun))) + return BFA_STATUS_ENTRY_EXISTS; + } + + if (free_index > MAX_LUN_MASK_CFG) + return BFA_STATUS_MAX_ENTRY_REACHED; + + if (rp) { + lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa, + rp->rport_info.local_pid); + lunm_list[free_index].rp_tag = rp->rport_tag; + } else { + lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID; + lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID; + } + + lunm_list[free_index].lp_wwn = *pwwn; + lunm_list[free_index].rp_wwn = rpwwn; + lunm_list[free_index].lun = lun; + lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE; + + /* set for all luns in this rp */ + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if ((lunm_list[i].lp_wwn == *pwwn) && + (lunm_list[i].rp_wwn == rpwwn)) + lunm_list[i].ua = BFA_IOIM_LM_UA_SET; + } + + return bfa_dconf_update(bfa); +} + +bfa_status_t +bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn, + wwn_t rpwwn, struct scsi_lun lun) +{ + struct bfa_lun_mask_s *lunm_list; + struct bfa_rport_s *rp = NULL; + struct bfa_fcs_lport_s *port = NULL; + struct bfa_fcs_rport_s *rp_fcs; + int i; + + /* in min cfg lunm_list could be NULL but no commands should run. */ + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) + return BFA_STATUS_FAILED; + + bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); + bfa_trc(bfa, *pwwn); + bfa_trc(bfa, rpwwn); + bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun)); + + if (*pwwn == 0) { + port = bfa_fcs_lookup_port( + &((struct bfad_s *)bfa->bfad)->bfa_fcs, + vf_id, *pwwn); + if (port) { + *pwwn = port->port_cfg.pwwn; + rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn); + rp = rp_fcs->bfa_rport; + } + } + + lunm_list = bfa_get_lun_mask_list(bfa); + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if ((lunm_list[i].lp_wwn == *pwwn) && + (lunm_list[i].rp_wwn == rpwwn) && + (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) == + scsilun_to_int((struct scsi_lun *)&lun))) { + lunm_list[i].lp_wwn = 0; + lunm_list[i].rp_wwn = 0; + int_to_scsilun(0, &lunm_list[i].lun); + lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE; + if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) { + lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID; + lunm_list[i].lp_tag = BFA_LP_TAG_INVALID; + } + return bfa_dconf_update(bfa); + } + } + + /* set for all luns in this rp */ + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if ((lunm_list[i].lp_wwn == *pwwn) && + (lunm_list[i].rp_wwn == rpwwn)) + lunm_list[i].ua = BFA_IOIM_LM_UA_SET; + } + + return BFA_STATUS_ENTRY_NOT_EXISTS; +} + +static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete) { struct bfa_ioim_s *ioim = cbarg; @@ -2022,6 +2757,7 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete) return; } + ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED, 0, 0, NULL, 0); } @@ -2037,6 +2773,7 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete) return; } + ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV, 0, 0, NULL, 0); } @@ -2051,6 +2788,7 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete) return; } + ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio); } @@ -2189,12 +2927,12 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) */ switch (m->cmnd.iodir) { case FCP_IODIR_READ: - bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa)); + bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa)); bfa_stats(itnim, input_reqs); ioim->itnim->stats.rd_throughput += fcp_dl; break; case FCP_IODIR_WRITE: - bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa)); + bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa)); bfa_stats(itnim, output_reqs); ioim->itnim->stats.wr_throughput += fcp_dl; break; @@ -2202,16 +2940,16 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) bfa_stats(itnim, input_reqs); bfa_stats(itnim, output_reqs); default: - bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); + bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa)); } if (itnim->seq_rec || (scsi_bufflen(cmnd) & (sizeof(u32) - 1))) - bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); + bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa)); /* * queue I/O message to firmware */ - bfa_reqq_produce(ioim->bfa, ioim->reqq); + bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh); return BFA_TRUE; } @@ -2269,14 +3007,14 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim) else msgop = BFI_IOIM_H2I_IOCLEANUP_REQ; - bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa)); + bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa)); m->io_tag = cpu_to_be16(ioim->iotag); m->abort_tag = ++ioim->abort_tag; /* * queue I/O message to firmware */ - bfa_reqq_produce(ioim->bfa, ioim->reqq); + bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh); return BFA_TRUE; } @@ -2360,46 +3098,32 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) * Memory allocation and initialization. */ void -bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) +bfa_ioim_attach(struct bfa_fcpim_s *fcpim) { struct bfa_ioim_s *ioim; + struct bfa_fcp_mod_s *fcp = fcpim->fcp; struct bfa_ioim_sp_s *iosp; u16 i; - u8 *snsinfo; - u32 snsbufsz; /* * claim memory first */ - ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo); + ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp); fcpim->ioim_arr = ioim; - bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs); + bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs); - iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo); + iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp); fcpim->ioim_sp_arr = iosp; - bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs); - - /* - * Claim DMA memory for per IO sense data. - */ - snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN; - fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo); - bfa_meminfo_dma_phys(minfo) += snsbufsz; - - fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo); - bfa_meminfo_dma_virt(minfo) += snsbufsz; - snsinfo = fcpim->snsbase.kva; - bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa); + bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs); /* * Initialize ioim free queues */ - INIT_LIST_HEAD(&fcpim->ioim_free_q); INIT_LIST_HEAD(&fcpim->ioim_resfree_q); INIT_LIST_HEAD(&fcpim->ioim_comp_q); - for (i = 0; i < fcpim->num_ioim_reqs; - i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) { + for (i = 0; i < fcpim->fcp->num_ioim_reqs; + i++, ioim++, iosp++) { /* * initialize IOIM */ @@ -2408,22 +3132,20 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) ioim->bfa = fcpim->bfa; ioim->fcpim = fcpim; ioim->iosp = iosp; - iosp->snsinfo = snsinfo; + ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; INIT_LIST_HEAD(&ioim->sgpg_q); bfa_reqq_winit(&ioim->iosp->reqq_wait, bfa_ioim_qresume, ioim); bfa_sgpg_winit(&ioim->iosp->sgpg_wqe, bfa_ioim_sgpg_alloced, ioim); bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); - - list_add_tail(&ioim->qe, &fcpim->ioim_free_q); } } void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) { - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; struct bfa_ioim_s *ioim; u16 iotag; @@ -2448,6 +3170,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) evt = BFA_IOIM_SM_DONE; else evt = BFA_IOIM_SM_COMP; + ioim->proc_rsp_data(ioim); break; case BFI_IOIM_STS_TIMEDOUT: @@ -2483,6 +3206,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) if (rsp->abort_tag != ioim->abort_tag) { bfa_trc(ioim->bfa, rsp->abort_tag); bfa_trc(ioim->bfa, ioim->abort_tag); + ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; return; } @@ -2501,13 +3225,14 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) WARN_ON(1); } + ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; bfa_sm_send_event(ioim, evt); } void bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m) { - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; struct bfa_ioim_s *ioim; u16 iotag; @@ -2518,7 +3243,16 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m) WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag); bfa_ioim_cb_profile_comp(fcpim, ioim); - bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); + + if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); + return; + } + + if (ioim->proc_rsp_data(ioim) == BFA_TRUE) + bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); + else + bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP); } /* @@ -2573,18 +3307,21 @@ struct bfa_ioim_s * bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio, struct bfa_itnim_s *itnim, u16 nsges) { - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct bfa_ioim_s *ioim; + struct bfa_iotag_s *iotag = NULL; /* * alocate IOIM resource */ - bfa_q_deq(&fcpim->ioim_free_q, &ioim); - if (!ioim) { + bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag); + if (!iotag) { bfa_stats(itnim, no_iotags); return NULL; } + ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag); + ioim->dio = dio; ioim->itnim = itnim; ioim->nsges = nsges; @@ -2601,7 +3338,8 @@ bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio, void bfa_ioim_free(struct bfa_ioim_s *ioim) { - struct bfa_fcpim_mod_s *fcpim = ioim->fcpim; + struct bfa_fcpim_s *fcpim = ioim->fcpim; + struct bfa_iotag_s *iotag; if (ioim->nsgpgs > 0) bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs); @@ -2610,13 +3348,51 @@ bfa_ioim_free(struct bfa_ioim_s *ioim) fcpim->ios_active--; ioim->iotag &= BFA_IOIM_IOTAG_MASK; + + WARN_ON(!(ioim->iotag < + (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs))); + iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag); + + if (ioim->iotag < fcpim->fcp->num_ioim_reqs) + list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q); + else + list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q); + list_del(&ioim->qe); - list_add_tail(&ioim->qe, &fcpim->ioim_free_q); } void bfa_ioim_start(struct bfa_ioim_s *ioim) { + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio; + struct bfa_lps_s *lps; + enum bfa_ioim_lm_status status; + struct scsi_lun scsilun; + + if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) { + lps = BFA_IOIM_TO_LPS(ioim); + int_to_scsilun(cmnd->device->lun, &scsilun); + status = bfa_ioim_lm_check(ioim, lps, + ioim->itnim->rport, scsilun); + if (status == BFA_IOIM_LM_LUN_NOT_RDY) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY); + bfa_stats(ioim->itnim, lm_lun_not_rdy); + return; + } + + if (status == BFA_IOIM_LM_LUN_NOT_SUP) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP); + bfa_stats(ioim->itnim, lm_lun_not_sup); + return; + } + + if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC); + bfa_stats(ioim->itnim, lm_rpl_data_changed); + return; + } + } + bfa_ioim_cb_profile_start(ioim->fcpim, ioim); /* @@ -3021,7 +3797,7 @@ bfa_tskim_send(struct bfa_tskim_s *tskim) * build i/o request message next */ bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ, - bfa_lpuid(tskim->bfa)); + bfa_fn_lpu(tskim->bfa)); m->tsk_tag = cpu_to_be16(tskim->tsk_tag); m->itn_fhdl = tskim->itnim->rport->fw_handle; @@ -3032,7 +3808,7 @@ bfa_tskim_send(struct bfa_tskim_s *tskim) /* * queue I/O message to firmware */ - bfa_reqq_produce(tskim->bfa, itnim->reqq); + bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh); return BFA_TRUE; } @@ -3056,14 +3832,14 @@ bfa_tskim_send_abort(struct bfa_tskim_s *tskim) * build i/o request message next */ bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ, - bfa_lpuid(tskim->bfa)); + bfa_fn_lpu(tskim->bfa)); m->tsk_tag = cpu_to_be16(tskim->tsk_tag); /* * queue I/O message to firmware */ - bfa_reqq_produce(tskim->bfa, itnim->reqq); + bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh); return BFA_TRUE; } @@ -3129,14 +3905,16 @@ bfa_tskim_cleanup(struct bfa_tskim_s *tskim) * Memory allocation and initialization. */ void -bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) +bfa_tskim_attach(struct bfa_fcpim_s *fcpim) { struct bfa_tskim_s *tskim; + struct bfa_fcp_mod_s *fcp = fcpim->fcp; u16 i; INIT_LIST_HEAD(&fcpim->tskim_free_q); + INIT_LIST_HEAD(&fcpim->tskim_unused_q); - tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo); + tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp); fcpim->tskim_arr = tskim; for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) { @@ -3155,13 +3933,13 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) list_add_tail(&tskim->qe, &fcpim->tskim_free_q); } - bfa_meminfo_kva(minfo) = (u8 *) tskim; + bfa_mem_kva_curp(fcp) = (u8 *) tskim; } void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) { - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m; struct bfa_tskim_s *tskim; u16 tsk_tag = be16_to_cpu(rsp->tsk_tag); @@ -3188,7 +3966,7 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) struct bfa_tskim_s * bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk) { - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct bfa_tskim_s *tskim; bfa_q_deq(&fcpim->tskim_free_q, &tskim); @@ -3233,3 +4011,221 @@ bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, list_add_tail(&tskim->qe, &itnim->tsk_q); bfa_sm_send_event(tskim, BFA_TSKIM_SM_START); } + +void +bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw) +{ + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); + struct list_head *qe; + int i; + + for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) { + bfa_q_deq_tail(&fcpim->tskim_free_q, &qe); + list_add_tail(qe, &fcpim->tskim_unused_q); + } +} + +/* BFA FCP module - parent module for fcpim */ + +BFA_MODULE(fcp); + +static void +bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, + struct bfa_s *bfa) +{ + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); + struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa); + struct bfa_mem_dma_s *seg_ptr; + u16 nsegs, idx, per_seg_ios, num_io_req; + u32 km_len = 0; + + /* + * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value. + * So if the values are non zero, adjust them appropriately. + */ + if (cfg->fwcfg.num_ioim_reqs && + cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN) + cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; + else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX) + cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX; + + if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX) + cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX; + + num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs); + if (num_io_req > BFA_IO_MAX) { + if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) { + cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2; + cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2; + } else if (cfg->fwcfg.num_fwtio_reqs) + cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX; + else + cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX; + } + + bfa_fcpim_meminfo(cfg, &km_len); + + num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs); + km_len += num_io_req * sizeof(struct bfa_iotag_s); + km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s); + + /* dma memory */ + nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN); + per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN); + + bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) { + if (num_io_req >= per_seg_ios) { + num_io_req -= per_seg_ios; + bfa_mem_dma_setup(minfo, seg_ptr, + per_seg_ios * BFI_IOIM_SNSLEN); + } else + bfa_mem_dma_setup(minfo, seg_ptr, + num_io_req * BFI_IOIM_SNSLEN); + } + + /* kva memory */ + bfa_mem_kva_setup(minfo, fcp_kva, km_len); +} + +static void +bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_pcidev_s *pcidev) +{ + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); + struct bfa_mem_dma_s *seg_ptr; + u16 idx, nsegs, num_io_req; + + fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs; + fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs; + fcp->num_itns = cfg->fwcfg.num_rports; + fcp->bfa = bfa; + + /* + * Setup the pool of snsbase addr's, that is passed to fw as + * part of bfi_iocfc_cfg_s. + */ + num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs); + nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN); + + bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) { + + if (!bfa_mem_dma_virt(seg_ptr)) + break; + + fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr); + fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr); + bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa); + } + + bfa_fcpim_attach(fcp, bfad, cfg, pcidev); + + bfa_iotag_attach(fcp); + + fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp); + bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr + + (fcp->num_itns * sizeof(struct bfa_itn_s)); + memset(fcp->itn_arr, 0, + (fcp->num_itns * sizeof(struct bfa_itn_s))); +} + +static void +bfa_fcp_detach(struct bfa_s *bfa) +{ +} + +static void +bfa_fcp_start(struct bfa_s *bfa) +{ + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); + + /* + * bfa_init() with flash read is complete. now invalidate the stale + * content of lun mask like unit attention, rp tag and lp tag. + */ + bfa_ioim_lm_init(fcp->bfa); +} + +static void +bfa_fcp_stop(struct bfa_s *bfa) +{ +} + +static void +bfa_fcp_iocdisable(struct bfa_s *bfa) +{ + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); + + /* Enqueue unused ioim resources to free_q */ + list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q); + + bfa_fcpim_iocdisable(fcp); +} + +void +bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw) +{ + struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa); + struct list_head *qe; + int i; + + for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) { + bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe); + list_add_tail(qe, &mod->iotag_unused_q); + } +} + +void +bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport, + void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)) +{ + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); + struct bfa_itn_s *itn; + + itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag); + itn->isr = isr; +} + +/* + * Itn interrupt processing. + */ +void +bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); + union bfi_itn_i2h_msg_u msg; + struct bfa_itn_s *itn; + + msg.msg = m; + itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle); + + if (itn->isr) + itn->isr(bfa, m); + else + WARN_ON(1); +} + +void +bfa_iotag_attach(struct bfa_fcp_mod_s *fcp) +{ + struct bfa_iotag_s *iotag; + u16 num_io_req, i; + + iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp); + fcp->iotag_arr = iotag; + + INIT_LIST_HEAD(&fcp->iotag_ioim_free_q); + INIT_LIST_HEAD(&fcp->iotag_tio_free_q); + INIT_LIST_HEAD(&fcp->iotag_unused_q); + + num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs; + for (i = 0; i < num_io_req; i++, iotag++) { + memset(iotag, 0, sizeof(struct bfa_iotag_s)); + iotag->tag = i; + if (i < fcp->num_ioim_reqs) + list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q); + else + list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q); + } + + bfa_mem_kva_curp(fcp) = (u8 *) iotag; +} diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h index 1e38dade842..1080bcb81cb 100644 --- a/drivers/scsi/bfa/bfa_fcpim.h +++ b/drivers/scsi/bfa/bfa_fcpim.h @@ -24,6 +24,34 @@ #include "bfa_defs_svc.h" #include "bfa_cs.h" +/* FCP module related definitions */ +#define BFA_IO_MAX BFI_IO_MAX +#define BFA_FWTIO_MAX 2000 + +struct bfa_fcp_mod_s; +struct bfa_iotag_s { + struct list_head qe; /* queue element */ + u16 tag; /* FW IO tag */ +}; + +struct bfa_itn_s { + bfa_isr_func_t isr; +}; + +void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport, + void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)); +void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m); +void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp); +void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw); + +#define BFA_FCP_MOD(_hal) (&(_hal)->modules.fcp_mod) +#define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg)) +#define BFA_IOTAG_FROM_TAG(_fcp, _tag) \ + (&(_fcp)->iotag_arr[(_tag & BFA_IOIM_IOTAG_MASK)]) +#define BFA_ITN_FROM_TAG(_fcp, _tag) \ + ((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1))) +#define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \ + bfa_mem_get_dmabuf_kva(_fcp, _tag, BFI_IOIM_SNSLEN) #define BFA_ITNIM_MIN 32 #define BFA_ITNIM_MAX 1024 @@ -51,14 +79,22 @@ bfa_ioim_get_index(u32 n) { if (n >= (1UL)<<22) return BFA_IOBUCKET_MAX - 1; n >>= 8; - if (n >= (1UL)<<16) - n >>= 16; pos += 16; - if (n >= 1 << 8) - n >>= 8; pos += 8; - if (n >= 1 << 4) - n >>= 4; pos += 4; - if (n >= 1 << 2) - n >>= 2; pos += 2; + if (n >= (1UL)<<16) { + n >>= 16; + pos += 16; + } + if (n >= 1 << 8) { + n >>= 8; + pos += 8; + } + if (n >= 1 << 4) { + n >>= 4; + pos += 4; + } + if (n >= 1 << 2) { + n >>= 2; + pos += 2; + } if (n >= 1 << 1) pos += 1; @@ -74,26 +110,26 @@ struct bfad_ioim_s; struct bfad_tskim_s; typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim); +typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim); -struct bfa_fcpim_mod_s { +struct bfa_fcpim_s { struct bfa_s *bfa; + struct bfa_fcp_mod_s *fcp; struct bfa_itnim_s *itnim_arr; struct bfa_ioim_s *ioim_arr; struct bfa_ioim_sp_s *ioim_sp_arr; struct bfa_tskim_s *tskim_arr; - struct bfa_dma_s snsbase; int num_itnims; - int num_ioim_reqs; int num_tskim_reqs; u32 path_tov; u16 q_depth; u8 reqq; /* Request queue to be used */ - u8 rsvd; + u8 lun_masking_pending; struct list_head itnim_q; /* queue of active itnim */ - struct list_head ioim_free_q; /* free IO resources */ struct list_head ioim_resfree_q; /* IOs waiting for f/w */ struct list_head ioim_comp_q; /* IO global comp Q */ struct list_head tskim_free_q; + struct list_head tskim_unused_q; /* Unused tskim Q */ u32 ios_active; /* current active IOs */ u32 delay_comp; struct bfa_fcpim_del_itn_stats_s del_itn_stats; @@ -104,6 +140,25 @@ struct bfa_fcpim_mod_s { bfa_fcpim_profile_t profile_start; }; +/* Max FCP dma segs required */ +#define BFA_FCP_DMA_SEGS BFI_IOIM_SNSBUF_SEGS + +struct bfa_fcp_mod_s { + struct bfa_s *bfa; + struct list_head iotag_ioim_free_q; /* free IO resources */ + struct list_head iotag_tio_free_q; /* free IO resources */ + struct list_head iotag_unused_q; /* unused IO resources*/ + struct bfa_iotag_s *iotag_arr; + struct bfa_itn_s *itn_arr; + int num_ioim_reqs; + int num_fwtio_reqs; + int num_itns; + struct bfa_dma_s snsbase[BFA_FCP_DMA_SEGS]; + struct bfa_fcpim_s fcpim; + struct bfa_mem_dma_s dma_seg[BFA_FCP_DMA_SEGS]; + struct bfa_mem_kva_s kva_seg; +}; + /* * BFA IO (initiator mode) */ @@ -111,7 +166,7 @@ struct bfa_ioim_s { struct list_head qe; /* queue elememt */ bfa_sm_t sm; /* BFA ioim state machine */ struct bfa_s *bfa; /* BFA module */ - struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */ + struct bfa_fcpim_s *fcpim; /* parent fcpim module */ struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ struct bfad_ioim_s *dio; /* driver IO handle */ u16 iotag; /* FWI IO tag */ @@ -124,12 +179,13 @@ struct bfa_ioim_s { bfa_cb_cbfn_t io_cbfn; /* IO completion handler */ struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */ u8 reqq; /* Request queue for I/O */ + u8 mode; /* IO is passthrough or not */ u64 start_time; /* IO's Profile start val */ + bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */ }; struct bfa_ioim_sp_s { struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */ - u8 *snsinfo; /* sense info for this IO */ struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ bfa_boolean_t abort_explicit; /* aborted by OS */ @@ -143,7 +199,7 @@ struct bfa_tskim_s { struct list_head qe; bfa_sm_t sm; struct bfa_s *bfa; /* BFA module */ - struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */ + struct bfa_fcpim_s *fcpim; /* parent fcpim module */ struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */ bfa_boolean_t notify; /* notify itnim on TM comp */ @@ -182,13 +238,13 @@ struct bfa_itnim_s { struct bfa_wc_s wc; /* waiting counter */ struct bfa_timer_s timer; /* pending IO TOV */ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ - struct bfa_fcpim_mod_s *fcpim; /* fcpim module */ + struct bfa_fcpim_s *fcpim; /* fcpim module */ struct bfa_itnim_iostats_s stats; struct bfa_itnim_ioprofile_s ioprofile; }; #define bfa_itnim_is_online(_itnim) ((_itnim)->is_online) -#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod) +#define BFA_FCPIM(_hal) (&(_hal)->modules.fcp_mod.fcpim) #define BFA_IOIM_TAG_2_ID(_iotag) ((_iotag) & BFA_IOIM_IOTAG_MASK) #define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \ (&fcpim->ioim_arr[(_iotag & BFA_IOIM_IOTAG_MASK)]) @@ -196,15 +252,19 @@ struct bfa_itnim_s { (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)]) #define bfa_io_profile_start_time(_bfa) \ - (_bfa->modules.fcpim_mod.io_profile_start_time) + ((_bfa)->modules.fcp_mod.fcpim.io_profile_start_time) #define bfa_fcpim_get_io_profile(_bfa) \ - (_bfa->modules.fcpim_mod.io_profile) + ((_bfa)->modules.fcp_mod.fcpim.io_profile) #define bfa_ioim_update_iotag(__ioim) do { \ uint16_t k = (__ioim)->iotag >> BFA_IOIM_RETRY_TAG_OFFSET; \ k++; (__ioim)->iotag &= BFA_IOIM_IOTAG_MASK; \ (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \ } while (0) +#define BFA_IOIM_TO_LPS(__ioim) \ + BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \ + __ioim->itnim->rport->rport_info.lp_tag) + static inline bfa_boolean_t bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim) { @@ -217,8 +277,7 @@ bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim) /* * function prototypes */ -void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, - struct bfa_meminfo_s *minfo); +void bfa_ioim_attach(struct bfa_fcpim_s *fcpim); void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); void bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); @@ -228,18 +287,15 @@ void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim); void bfa_ioim_tov(struct bfa_ioim_s *ioim); -void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, - struct bfa_meminfo_s *minfo); +void bfa_tskim_attach(struct bfa_fcpim_s *fcpim); void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); void bfa_tskim_iodone(struct bfa_tskim_s *tskim); void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim); void bfa_tskim_cleanup(struct bfa_tskim_s *tskim); +void bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw); -void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, - u32 *dm_len); -void bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, - struct bfa_meminfo_s *minfo); -void bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim); +void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len); +void bfa_itnim_attach(struct bfa_fcpim_s *fcpim); void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim); void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); void bfa_itnim_iodone(struct bfa_itnim_s *itnim); @@ -252,13 +308,19 @@ bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim); void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov); u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa); u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa); +bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa, + struct bfa_itnim_iostats_s *stats, u8 lp_tag); +void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats, + struct bfa_itnim_iostats_s *itnim_stats); +bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time); +bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa); #define bfa_fcpim_ioredirect_enabled(__bfa) \ - (((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect) + (((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect) #define bfa_fcpim_get_next_reqq(__bfa, __qid) \ { \ - struct bfa_fcpim_mod_s *__fcpim = BFA_FCPIM_MOD(__bfa); \ + struct bfa_fcpim_s *__fcpim = BFA_FCPIM(__bfa); \ __fcpim->reqq++; \ __fcpim->reqq &= (BFI_IOC_MAX_CQS - 1); \ *(__qid) = __fcpim->reqq; \ @@ -352,4 +414,14 @@ void bfa_tskim_start(struct bfa_tskim_s *tskim, void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, enum bfi_tskim_status tsk_status); +void bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, + wwn_t rp_wwn, u16 rp_tag, u8 lp_tag); +bfa_status_t bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 on_off); +bfa_status_t bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf); +bfa_status_t bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, + wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun); +bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, + wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun); +bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa); + #endif /* __BFA_FCPIM_H__ */ diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index 9b43ca4b677..eaac57e1dde 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c @@ -20,6 +20,7 @@ */ #include "bfad_drv.h" +#include "bfad_im.h" #include "bfa_fcs.h" #include "bfa_fcbuild.h" @@ -92,25 +93,49 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, void bfa_fcs_init(struct bfa_fcs_s *fcs) { - int i, npbc_vports; + int i; struct bfa_fcs_mod_s *mod; - struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS]; for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { mod = &fcs_modules[i]; if (mod->modinit) mod->modinit(fcs); } +} + +/* + * FCS update cfg - reset the pwwn/nwwn of fabric base logical port + * with values learned during bfa_init firmware GETATTR REQ. + */ +void +bfa_fcs_update_cfg(struct bfa_fcs_s *fcs) +{ + struct bfa_fcs_fabric_s *fabric = &fcs->fabric; + struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; + struct bfa_ioc_s *ioc = &fabric->fcs->bfa->ioc; + + port_cfg->nwwn = ioc->attr->nwwn; + port_cfg->pwwn = ioc->attr->pwwn; +} + +/* + * fcs pbc vport initialization + */ +void +bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs) +{ + int i, npbc_vports; + struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS]; + /* Initialize pbc vports */ if (!fcs->min_cfg) { npbc_vports = - bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports); + bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports); for (i = 0; i < npbc_vports; i++) bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]); } } - /* * brief * FCS driver details initialization. @@ -168,11 +193,14 @@ bfa_fcs_exit(struct bfa_fcs_s *fcs) #define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ #define bfa_fcs_fabric_set_opertype(__fabric) do { \ - if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \ - == BFA_PORT_TOPOLOGY_P2P) \ + if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \ + == BFA_PORT_TOPOLOGY_P2P) { \ + if (fabric->fab_type == BFA_FCS_FABRIC_SWITCHED) \ (__fabric)->oper_type = BFA_PORT_TYPE_NPORT; \ else \ - (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \ + (__fabric)->oper_type = BFA_PORT_TYPE_P2P; \ + } else \ + (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \ } while (0) /* @@ -196,6 +224,9 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, u32 rsp_len, u32 resid_len, struct fchs_s *rspfchs); +static u8 bfa_fcs_fabric_oper_bbscn(struct bfa_fcs_fabric_s *fabric); +static bfa_boolean_t bfa_fcs_fabric_is_bbscn_enabled( + struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); @@ -269,8 +300,8 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, break; case BFA_FCS_FABRIC_SM_DELETE: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); - bfa_wc_down(&fabric->fcs->wc); + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_fcs_fabric_delete(fabric); break; default: @@ -322,7 +353,8 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, case BFA_FCS_FABRIC_SM_CONT_OP: bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, - fabric->bb_credit); + fabric->bb_credit, + bfa_fcs_fabric_oper_bbscn(fabric)); fabric->fab_type = BFA_FCS_FABRIC_SWITCHED; if (fabric->auth_reqd && fabric->is_auth) { @@ -350,7 +382,8 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, case BFA_FCS_FABRIC_SM_NO_FABRIC: fabric->fab_type = BFA_FCS_FABRIC_N2N; bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, - fabric->bb_credit); + fabric->bb_credit, + bfa_fcs_fabric_oper_bbscn(fabric)); bfa_fcs_fabric_notify_online(fabric); bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric); break; @@ -518,7 +551,11 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, case BFA_FCS_FABRIC_SM_NO_FABRIC: bfa_trc(fabric->fcs, fabric->bb_credit); bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, - fabric->bb_credit); + fabric->bb_credit, + bfa_fcs_fabric_oper_bbscn(fabric)); + break; + + case BFA_FCS_FABRIC_SM_RETRY_OP: break; default: @@ -764,6 +801,10 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status) case BFA_STATUS_FABRIC_RJT: fabric->stats.flogi_rejects++; + if (fabric->lps->lsrjt_rsn == FC_LS_RJT_RSN_LOGICAL_ERROR && + fabric->lps->lsrjt_expl == FC_LS_RJT_EXP_NO_ADDL_INFO) + fabric->fcs->bbscn_flogi_rjt = BFA_TRUE; + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; @@ -793,6 +834,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status) */ fabric->bport.port_topo.pn2n.rem_port_wwn = fabric->lps->pr_pwwn; + fabric->fab_type = BFA_FCS_FABRIC_N2N; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); } @@ -808,13 +850,17 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric) { struct bfa_s *bfa = fabric->fcs->bfa; struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; - u8 alpa = 0; + u8 alpa = 0, bb_scn = 0; if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) alpa = bfa_fcport_get_myalpa(bfa); + if (bfa_fcs_fabric_is_bbscn_enabled(fabric) && + (!fabric->fcs->bbscn_flogi_rjt)) + bb_scn = BFA_FCS_PORT_DEF_BB_SCN; + bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa), - pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd); + pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd, bb_scn); fabric->stats.flogi_sent++; } @@ -873,6 +919,40 @@ bfa_fcs_fabric_delay(void *cbarg) } /* + * Computes operating BB_SCN value + */ +static u8 +bfa_fcs_fabric_oper_bbscn(struct bfa_fcs_fabric_s *fabric) +{ + u8 pr_bbscn = fabric->lps->pr_bbscn; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fabric->fcs->bfa); + + if (!(fcport->cfg.bb_scn_state && pr_bbscn)) + return 0; + + /* return max of local/remote bb_scn values */ + return ((pr_bbscn > BFA_FCS_PORT_DEF_BB_SCN) ? + pr_bbscn : BFA_FCS_PORT_DEF_BB_SCN); +} + +/* + * Check if BB_SCN can be enabled. + */ +static bfa_boolean_t +bfa_fcs_fabric_is_bbscn_enabled(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fabric->fcs->bfa); + + if (bfa_ioc_get_fcmode(&fabric->fcs->bfa->ioc) && + fcport->cfg.bb_scn_state && + !bfa_fcport_is_qos_enabled(fabric->fcs->bfa) && + !bfa_fcport_is_trunk_enabled(fabric->fcs->bfa)) + return BFA_TRUE; + else + return BFA_FALSE; +} + +/* * Delete all vports and wait for vport delete completions. */ static void @@ -989,6 +1069,7 @@ void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + fabric->fcs->bbscn_flogi_rjt = BFA_FALSE; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); } @@ -1192,6 +1273,7 @@ bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, } fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred); + fabric->lps->pr_bbscn = (be16_to_cpu(flogi->csp.rxsz) >> 12); bport->port_topo.pn2n.rem_port_wwn = flogi->port_name; bport->port_topo.pn2n.reply_oxid = fchs->ox_id; @@ -1224,9 +1306,10 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric) n2n_port->reply_oxid, pcfg->pwwn, pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa), - bfa_fcport_get_rx_bbcredit(bfa)); + bfa_fcport_get_rx_bbcredit(bfa), + bfa_fcs_fabric_oper_bbscn(fabric)); - bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->lp_tag, + bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->bfa_tag, BFA_FALSE, FC_CLASS_3, reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric, FC_MAX_PDUSZ, 0); @@ -1245,6 +1328,29 @@ bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_trc(fabric->fcs, status); } + +/* + * Send AEN notification + */ +static void +bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port, + enum bfa_port_aen_event event) +{ + struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad; + struct bfa_aen_entry_s *aen_entry; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + aen_entry->aen_data.port.pwwn = bfa_fcs_lport_get_pwwn(port); + aen_entry->aen_data.port.fwwn = bfa_fcs_lport_get_fabric_name(port); + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq, + BFA_AEN_CAT_PORT, event); +} + /* * * @param[in] fabric - fabric @@ -1276,6 +1382,8 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, BFA_LOG(KERN_WARNING, bfad, bfa_log_level, "Base port WWN = %s Fabric WWN = %s\n", pwwn_ptr, fwwn_ptr); + bfa_fcs_fabric_aen_post(&fabric->bport, + BFA_PORT_AEN_FABRIC_NAME_CHANGE); } } @@ -1298,6 +1406,45 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id) } /* + * Return the list of local logical ports present in the given VF. + * + * @param[in] vf vf for which logical ports are returned + * @param[out] lpwwn returned logical port wwn list + * @param[in,out] nlports in:size of lpwwn list; + * out:total elements present, + * actual elements returned is limited by the size + */ +void +bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports) +{ + struct list_head *qe; + struct bfa_fcs_vport_s *vport; + int i = 0; + struct bfa_fcs_s *fcs; + + if (vf == NULL || lpwwn == NULL || *nlports == 0) + return; + + fcs = vf->fcs; + + bfa_trc(fcs, vf->vf_id); + bfa_trc(fcs, (uint32_t) *nlports); + + lpwwn[i++] = vf->bport.port_cfg.pwwn; + + list_for_each(qe, &vf->vport_q) { + if (i >= *nlports) + break; + + vport = (struct bfa_fcs_vport_s *) qe; + lpwwn[i++] = vport->lport.port_cfg.pwwn; + } + + bfa_trc(fcs, i); + *nlports = i; +} + +/* * BFA FCS PPORT ( physical port) */ static void diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 61cdce4bd91..e75e07d2591 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -254,6 +254,9 @@ struct bfa_fcs_fabric_s; #define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48 #define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16 +/* bb_scn value in 2^bb_scn */ +#define BFA_FCS_PORT_DEF_BB_SCN 3 + /* * Get FC port ID for a logical port. */ @@ -379,6 +382,7 @@ void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport); +void bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport); #define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */ #define BFA_FCS_RPORT_MAX_RETRIES (5) @@ -420,6 +424,7 @@ struct bfa_fcs_rport_s { enum fc_cos fc_cos; /* FC classes of service supp */ bfa_boolean_t cisc; /* CISC capable device */ bfa_boolean_t prlo; /* processing prlo or LOGO */ + bfa_boolean_t plogi_pending; /* Rx Plogi Pending */ wwn_t pwwn; /* port wwn of rport */ wwn_t nwwn; /* node wwn of rport */ struct bfa_rport_symname_s psym_name; /* port symbolic name */ @@ -447,6 +452,8 @@ bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport) /* * bfa fcs rport API functions */ +void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, + struct bfa_rport_attr_s *attr); struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn); struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn( @@ -591,10 +598,21 @@ void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim); void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs, u16 len); -#define BFA_FCS_FDMI_SUPORTED_SPEEDS (FDMI_TRANS_SPEED_1G | \ - FDMI_TRANS_SPEED_2G | \ - FDMI_TRANS_SPEED_4G | \ - FDMI_TRANS_SPEED_8G) +#define BFA_FCS_FDMI_SUPP_SPEEDS_4G (FDMI_TRANS_SPEED_1G | \ + FDMI_TRANS_SPEED_2G | \ + FDMI_TRANS_SPEED_4G) + +#define BFA_FCS_FDMI_SUPP_SPEEDS_8G (FDMI_TRANS_SPEED_1G | \ + FDMI_TRANS_SPEED_2G | \ + FDMI_TRANS_SPEED_4G | \ + FDMI_TRANS_SPEED_8G) + +#define BFA_FCS_FDMI_SUPP_SPEEDS_16G (FDMI_TRANS_SPEED_2G | \ + FDMI_TRANS_SPEED_4G | \ + FDMI_TRANS_SPEED_8G | \ + FDMI_TRANS_SPEED_16G) + +#define BFA_FCS_FDMI_SUPP_SPEEDS_10G FDMI_TRANS_SPEED_10G /* * HBA Attribute Block : BFA internal representation. Note : Some variable @@ -649,12 +667,15 @@ struct bfa_fcs_s { struct bfa_trc_mod_s *trcmod; /* tracing module */ bfa_boolean_t vf_enabled; /* VF mode is enabled */ bfa_boolean_t fdmi_enabled; /* FDMI is enabled */ + bfa_boolean_t bbscn_enabled; /* Driver Config Parameter */ + bfa_boolean_t bbscn_flogi_rjt;/* FLOGI reject due to BB_SCN */ bfa_boolean_t min_cfg; /* min cfg enabled/disabled */ u16 port_vfid; /* port default VF ID */ struct bfa_fcs_driver_info_s driver_info; struct bfa_fcs_fabric_s fabric; /* base fabric state machine */ struct bfa_fcs_stats_s stats; /* FCS statistics */ struct bfa_wc_s wc; /* waiting counter */ + int fcs_aen_seq; }; /* @@ -715,6 +736,8 @@ void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, bfa_boolean_t min_cfg); void bfa_fcs_init(struct bfa_fcs_s *fcs); +void bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs); +void bfa_fcs_update_cfg(struct bfa_fcs_s *fcs); void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, struct bfa_fcs_driver_info_s *driver_info); void bfa_fcs_exit(struct bfa_fcs_s *fcs); @@ -723,6 +746,7 @@ void bfa_fcs_exit(struct bfa_fcs_s *fcs); * bfa fcs vf public functions */ bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id); +void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports); /* * fabric protected interface functions diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c index e7b49f4cb51..9272840a240 100644 --- a/drivers/scsi/bfa/bfa_fcs_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c @@ -37,6 +37,8 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); +static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim, + enum bfa_itnim_aen_event event); /* * fcs_itnim_sm FCS itnim state machine events @@ -54,6 +56,7 @@ enum bfa_fcs_itnim_event { BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */ BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */ BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */ + BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */ }; static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, @@ -178,6 +181,10 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, BFA_FCS_RETRY_TIMEOUT); break; + case BFA_FCS_ITNIM_SM_RSP_NOT_SUPP: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + break; + case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_discard(itnim->fcxp); @@ -264,6 +271,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Target (WWN = %s) is online for initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); + bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE); break; case BFA_FCS_ITNIM_SM_OFFLINE: @@ -300,14 +308,17 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, bfa_itnim_offline(itnim->bfa_itnim); wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); wwn2str(rpwwn_buf, itnim->rport->pwwn); - if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) + if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Target (WWN = %s) connectivity lost for " "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); - else + bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT); + } else { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Target (WWN = %s) offlined by initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); + bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE); + } break; case BFA_FCS_ITNIM_SM_DELETE: @@ -377,6 +388,33 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, } static void +bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim, + enum bfa_itnim_aen_event event) +{ + struct bfa_fcs_rport_s *rport = itnim->rport; + struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad; + struct bfa_aen_entry_s *aen_entry; + + /* Don't post events for well known addresses */ + if (BFA_FCS_PID_IS_WKA(rport->pid)) + return; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + aen_entry->aen_data.itnim.vf_id = rport->port->fabric->vf_id; + aen_entry->aen_data.itnim.ppwwn = bfa_fcs_lport_get_pwwn( + bfa_fcs_get_base_port(itnim->fcs)); + aen_entry->aen_data.itnim.lpwwn = bfa_fcs_lport_get_pwwn(rport->port); + aen_entry->aen_data.itnim.rpwwn = rport->pwwn; + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq, + BFA_AEN_CAT_ITNIM, event); +} + +static void bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_itnim_s *itnim = itnim_cbarg; @@ -447,6 +485,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, itnim->rport->scsi_function = BFA_RPORT_INITIATOR; itnim->stats.prli_rsp_acc++; + itnim->stats.initiator++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK); return; @@ -472,6 +511,10 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_trc(itnim->fcs, ls_rjt->reason_code_expl); itnim->stats.prli_rsp_rjt++; + if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) { + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_NOT_SUPP); + return; + } bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR); } } diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 1d6be8c1447..d4f951fe753 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -16,6 +16,7 @@ */ #include "bfad_drv.h" +#include "bfad_im.h" #include "bfa_fcs.h" #include "bfa_fcbuild.h" #include "bfa_fc.h" @@ -74,6 +75,7 @@ enum bfa_fcs_lport_event { BFA_FCS_PORT_SM_OFFLINE = 3, BFA_FCS_PORT_SM_DELETE = 4, BFA_FCS_PORT_SM_DELRPORT = 5, + BFA_FCS_PORT_SM_STOP = 6, }; static void bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port, @@ -86,6 +88,8 @@ static void bfa_fcs_lport_sm_offline(struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event); static void bfa_fcs_lport_sm_deleting(struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event); +static void bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event); static void bfa_fcs_lport_sm_uninit( @@ -123,6 +127,12 @@ bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port, bfa_fcs_lport_deleted(port); break; + case BFA_FCS_PORT_SM_STOP: + /* If vport - send completion call back */ + if (port->vport) + bfa_fcs_vport_stop_comp(port->vport); + break; + case BFA_FCS_PORT_SM_OFFLINE: break; @@ -148,6 +158,23 @@ bfa_fcs_lport_sm_online( bfa_fcs_lport_offline_actions(port); break; + case BFA_FCS_PORT_SM_STOP: + __port_action[port->fabric->fab_type].offline(port); + + if (port->num_rports == 0) { + bfa_sm_set_state(port, bfa_fcs_lport_sm_init); + /* If vport - send completion call back */ + if (port->vport) + bfa_fcs_vport_stop_comp(port->vport); + } else { + bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping); + list_for_each_safe(qe, qen, &port->rport_q) { + rport = (struct bfa_fcs_rport_s *) qe; + bfa_sm_send_event(rport, RPSM_EVENT_DELETE); + } + } + break; + case BFA_FCS_PORT_SM_DELETE: __port_action[port->fabric->fab_type].offline(port); @@ -189,6 +216,21 @@ bfa_fcs_lport_sm_offline( bfa_fcs_lport_online_actions(port); break; + case BFA_FCS_PORT_SM_STOP: + if (port->num_rports == 0) { + bfa_sm_set_state(port, bfa_fcs_lport_sm_init); + /* If vport - send completion call back */ + if (port->vport) + bfa_fcs_vport_stop_comp(port->vport); + } else { + bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping); + list_for_each_safe(qe, qen, &port->rport_q) { + rport = (struct bfa_fcs_rport_s *) qe; + bfa_sm_send_event(rport, RPSM_EVENT_DELETE); + } + } + break; + case BFA_FCS_PORT_SM_DELETE: if (port->num_rports == 0) { bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit); @@ -212,6 +254,28 @@ bfa_fcs_lport_sm_offline( } static void +bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event) +{ + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case BFA_FCS_PORT_SM_DELRPORT: + if (port->num_rports == 0) { + bfa_sm_set_state(port, bfa_fcs_lport_sm_init); + /* If vport - send completion call back */ + if (port->vport) + bfa_fcs_vport_stop_comp(port->vport); + } + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void bfa_fcs_lport_sm_deleting( struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event) @@ -237,6 +301,31 @@ bfa_fcs_lport_sm_deleting( */ /* + * Send AEN notification + */ +static void +bfa_fcs_lport_aen_post(struct bfa_fcs_lport_s *port, + enum bfa_lport_aen_event event) +{ + struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad; + struct bfa_aen_entry_s *aen_entry; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + aen_entry->aen_data.lport.vf_id = port->fabric->vf_id; + aen_entry->aen_data.lport.roles = port->port_cfg.roles; + aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn( + bfa_fcs_get_base_port(port->fcs)); + aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port); + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq, + BFA_AEN_CAT_LPORT, event); +} + +/* * Send a LS reject */ static void @@ -265,6 +354,40 @@ bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, } /* + * Send a FCCT Reject + */ +static void +bfa_fcs_lport_send_fcgs_rjt(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl) +{ + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + struct bfa_rport_s *bfa_rport = NULL; + int len; + struct ct_hdr_s *rx_cthdr = (struct ct_hdr_s *)(rx_fchs + 1); + struct ct_hdr_s *ct_hdr; + + bfa_trc(port->fcs, rx_fchs->d_id); + bfa_trc(port->fcs, rx_fchs->s_id); + + fcxp = bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) + return; + + ct_hdr = bfa_fcxp_get_reqbuf(fcxp); + ct_hdr->gs_type = rx_cthdr->gs_type; + ct_hdr->gs_sub_type = rx_cthdr->gs_sub_type; + + len = fc_gs_rjt_build(&fchs, ct_hdr, rx_fchs->s_id, + bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, reason_code, reason_code_expl); + + bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, + BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); +} + +/* * Process incoming plogi from a remote port. */ static void @@ -496,6 +619,7 @@ bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port) BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Logical port online: WWN = %s Role = %s\n", lpwwn_buf, "Initiator"); + bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE); bfad->bfad_flags |= BFAD_PORT_ONLINE; } @@ -514,14 +638,17 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port) wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); if (bfa_sm_cmp_state(port->fabric, - bfa_fcs_fabric_sm_online) == BFA_TRUE) + bfa_fcs_fabric_sm_online) == BFA_TRUE) { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Logical port lost fabric connectivity: WWN = %s Role = %s\n", lpwwn_buf, "Initiator"); - else + bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT); + } else { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Logical port taken offline: WWN = %s Role = %s\n", lpwwn_buf, "Initiator"); + bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE); + } list_for_each_safe(qe, qen, &port->rport_q) { rport = (struct bfa_fcs_rport_s *) qe; @@ -579,6 +706,7 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port) BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Logical port deleted: WWN = %s Role = %s\n", lpwwn_buf, "Initiator"); + bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE); /* Base port will be deleted by the OS driver */ if (port->vport) { @@ -647,6 +775,16 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, bfa_fcs_lport_abts_acc(lport, fchs); return; } + + if (fchs->type == FC_TYPE_SERVICES) { + /* + * Unhandled FC-GS frames. Send a FC-CT Reject + */ + bfa_fcs_lport_send_fcgs_rjt(lport, fchs, CT_RSN_NOT_SUPP, + CT_NS_EXP_NOADDITIONAL); + return; + } + /* * look for a matching remote port ID */ @@ -835,8 +973,8 @@ bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs, lport->fcs = fcs; lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id); lport->vport = vport; - lport->lp_tag = (vport) ? vport->lps->lp_tag : - lport->fabric->lps->lp_tag; + lport->lp_tag = (vport) ? vport->lps->bfa_tag : + lport->fabric->lps->bfa_tag; INIT_LIST_HEAD(&lport->rport_q); lport->num_rports = 0; @@ -866,6 +1004,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport, BFA_LOG(KERN_INFO, bfad, bfa_log_level, "New logical port created: WWN = %s Role = %s\n", lpwwn_buf, "Initiator"); + bfa_fcs_lport_aen_post(lport, BFA_LPORT_AEN_NEW); bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit); bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); @@ -1074,6 +1213,8 @@ static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, struct bfa_fcs_fdmi_hba_attr_s *hba_attr); static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, struct bfa_fcs_fdmi_port_attr_s *port_attr); +u32 bfa_fcs_fdmi_convert_speed(enum bfa_port_speed pport_speed); + /* * fcs_fdmi_sm FCS FDMI state machine */ @@ -1672,7 +1813,7 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) memcpy(attr->value, fcs_hba_attr->driver_version, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; - len += templen;; + len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); @@ -2160,12 +2301,36 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, /* * Supported Speeds */ - port_attr->supp_speed = cpu_to_be32(BFA_FCS_FDMI_SUPORTED_SPEEDS); + switch (pport_attr.speed_supported) { + case BFA_PORT_SPEED_16GBPS: + port_attr->supp_speed = + cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_16G); + break; + + case BFA_PORT_SPEED_10GBPS: + port_attr->supp_speed = + cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_10G); + break; + + case BFA_PORT_SPEED_8GBPS: + port_attr->supp_speed = + cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_8G); + break; + + case BFA_PORT_SPEED_4GBPS: + port_attr->supp_speed = + cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_4G); + break; + + default: + bfa_sm_fault(port->fcs, pport_attr.speed_supported); + } /* * Current Speed */ - port_attr->curr_speed = cpu_to_be32(pport_attr.speed); + port_attr->curr_speed = cpu_to_be32( + bfa_fcs_fdmi_convert_speed(pport_attr.speed)); /* * Max PDU Size. @@ -2186,6 +2351,41 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, } +/* + * Convert BFA speed to FDMI format. + */ +u32 +bfa_fcs_fdmi_convert_speed(bfa_port_speed_t pport_speed) +{ + u32 ret; + + switch (pport_speed) { + case BFA_PORT_SPEED_1GBPS: + case BFA_PORT_SPEED_2GBPS: + ret = pport_speed; + break; + + case BFA_PORT_SPEED_4GBPS: + ret = FDMI_TRANS_SPEED_4G; + break; + + case BFA_PORT_SPEED_8GBPS: + ret = FDMI_TRANS_SPEED_8G; + break; + + case BFA_PORT_SPEED_10GBPS: + ret = FDMI_TRANS_SPEED_10G; + break; + + case BFA_PORT_SPEED_16GBPS: + ret = FDMI_TRANS_SPEED_16G; + break; + + default: + ret = FDMI_TRANS_SPEED_UNKNOWN; + } + return ret; +} void bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms) @@ -2829,7 +3029,8 @@ bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) bfa_hton3b(FC_MGMT_SERVER), bfa_fcs_lport_get_fcid(port), 0, port->port_cfg.pwwn, port->port_cfg.nwwn, - bfa_fcport_get_maxfrsize(port->fcs->bfa)); + bfa_fcport_get_maxfrsize(port->fcs->bfa), + bfa_fcport_get_rx_bbcredit(port->fcs->bfa)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, @@ -3573,7 +3774,7 @@ bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) bfa_trc(port->fcs, port->pid); -fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); if (!fcxp) { port->stats.ns_plogi_alloc_wait++; bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, @@ -3586,7 +3787,8 @@ fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); bfa_hton3b(FC_NAME_SERVER), bfa_fcs_lport_get_fcid(port), 0, port->port_cfg.pwwn, port->port_cfg.nwwn, - bfa_fcport_get_maxfrsize(port->fcs->bfa)); + bfa_fcport_get_maxfrsize(port->fcs->bfa), + bfa_fcport_get_rx_bbcredit(port->fcs->bfa)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, @@ -4762,8 +4964,8 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port) while (qe != qh) { rport = (struct bfa_fcs_rport_s *) qe; if ((bfa_ntoh3b(rport->pid) > 0xFFF000) || - (bfa_fcs_rport_get_state(rport) == - BFA_RPORT_OFFLINE)) { + (bfa_fcs_rport_get_state(rport) == BFA_RPORT_OFFLINE) || + (rport->scsi_function != BFA_RPORT_TARGET)) { qe = bfa_q_next(qe); continue; } @@ -4776,17 +4978,15 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port) bfa_fcport_get_ratelim_speed(port->fcs->bfa); } - if ((rport_speed == BFA_PORT_SPEED_8GBPS) || - (rport_speed > port_speed)) { - max_speed = rport_speed; - break; - } else if (rport_speed > max_speed) { + if (rport_speed > max_speed) max_speed = rport_speed; - } qe = bfa_q_next(qe); } + if (max_speed > port_speed) + max_speed = port_speed; + bfa_trc(fcs, max_speed); return max_speed; } @@ -4918,6 +5118,7 @@ enum bfa_fcs_vport_event { BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */ BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/ BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */ + BFA_FCS_VPORT_SM_STOPCOMP = 14, /* vport delete completion */ }; static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport, @@ -4930,6 +5131,8 @@ static void bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport, @@ -4940,6 +5143,10 @@ static void bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); static struct bfa_sm_table_s vport_sm_table[] = { {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT}, @@ -4947,6 +5154,7 @@ static struct bfa_sm_table_s vport_sm_table[] = { {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE}, {BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC}, {BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY}, + {BFA_SM(bfa_fcs_vport_sm_fdisc_rsp_wait), BFA_FCS_VPORT_FDISC_RSP_WAIT}, {BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE}, {BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING}, {BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP}, @@ -5042,6 +5250,11 @@ bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport, bfa_fcs_vport_do_fdisc(vport); break; + case BFA_FCS_VPORT_SM_STOP: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP); + break; + case BFA_FCS_VPORT_SM_OFFLINE: /* * This can happen if the vport couldn't be initialzied @@ -5070,9 +5283,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport, switch (event) { case BFA_FCS_VPORT_SM_DELETE: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); - bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); - bfa_fcs_lport_delete(&vport->lport); + bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_rsp_wait); break; case BFA_FCS_VPORT_SM_OFFLINE: @@ -5140,6 +5351,41 @@ bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport, } /* + * FDISC is in progress and we got a vport delete request - + * this is a wait state while we wait for fdisc response and + * we will transition to the appropriate state - on rsp status. + */ +static void +bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_RSP_OK: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting); + bfa_fcs_lport_delete(&vport->lport); + break; + + case BFA_FCS_VPORT_SM_DELETE: + break; + + case BFA_FCS_VPORT_SM_OFFLINE: + case BFA_FCS_VPORT_SM_RSP_ERROR: + case BFA_FCS_VPORT_SM_RSP_FAILED: + case BFA_FCS_VPORT_SM_RSP_DUP_WWN: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); + bfa_fcs_lport_delete(&vport->lport); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/* * Vport is online (FDISC is complete). */ static void @@ -5155,6 +5401,11 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, bfa_fcs_lport_delete(&vport->lport); break; + case BFA_FCS_VPORT_SM_STOP: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_stopping); + bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP); + break; + case BFA_FCS_VPORT_SM_OFFLINE: bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); @@ -5167,6 +5418,32 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, } /* + * Vport is being stopped - awaiting lport stop completion to send + * LOGO to fabric. + */ +static void +bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_STOPCOMP: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo_for_stop); + bfa_fcs_vport_do_logo(vport); + break; + + case BFA_FCS_VPORT_SM_OFFLINE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/* * Vport is being deleted - awaiting lport delete completion to send * LOGO to fabric. */ @@ -5236,6 +5513,10 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport, bfa_fcs_vport_free(vport); break; + case BFA_FCS_VPORT_SM_STOPCOMP: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_created); + break; + case BFA_FCS_VPORT_SM_DELETE: break; @@ -5245,6 +5526,34 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport, } /* + * LOGO is sent to fabric. Vport stop is in progress. Lport stop cleanup + * is done. + */ +static void +bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_OFFLINE: + bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); + /* + * !!! fall through !!! + */ + + case BFA_FCS_VPORT_SM_RSP_OK: + case BFA_FCS_VPORT_SM_RSP_ERROR: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_created); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/* * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup * is done. */ @@ -5282,6 +5591,31 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, * fcs_vport_private FCS virtual port private functions */ /* + * Send AEN notification + */ +static void +bfa_fcs_vport_aen_post(struct bfa_fcs_lport_s *port, + enum bfa_lport_aen_event event) +{ + struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad; + struct bfa_aen_entry_s *aen_entry; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + aen_entry->aen_data.lport.vf_id = port->fabric->vf_id; + aen_entry->aen_data.lport.roles = port->port_cfg.roles; + aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn( + bfa_fcs_get_base_port(port->fcs)); + aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port); + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq, + BFA_AEN_CAT_LPORT, event); +} + +/* * This routine will be called to send a FDISC command. */ static void @@ -5308,8 +5642,11 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport) case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */ if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); - else + else { + bfa_fcs_vport_aen_post(&vport->lport, + BFA_LPORT_AEN_NPIV_DUP_WWN); bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN); + } break; case FC_LS_RJT_EXP_INSUFF_RES: @@ -5319,11 +5656,17 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport) */ if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); - else + else { + bfa_fcs_vport_aen_post(&vport->lport, + BFA_LPORT_AEN_NPIV_FABRIC_MAX); bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED); + } break; default: + if (vport->fdisc_retries == 0) + bfa_fcs_vport_aen_post(&vport->lport, + BFA_LPORT_AEN_NPIV_UNKNOWN); bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); } } @@ -5391,7 +5734,10 @@ void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport) { vport->vport_stats.fab_online++; - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); + if (bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); + else + vport->vport_stats.fab_no_npiv++; } /* @@ -5422,6 +5768,15 @@ bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport) } /* + * Stop completion callback from associated lport + */ +void +bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport) +{ + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOPCOMP); +} + +/* * Delete completion callback from associated lport */ void diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index caaee6f0693..52628d5d3c9 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -20,6 +20,7 @@ */ #include "bfad_drv.h" +#include "bfad_im.h" #include "bfa_fcs.h" #include "bfa_fcbuild.h" @@ -262,6 +263,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, break; case RPSM_EVENT_PLOGI_RCVD: + case RPSM_EVENT_PLOGI_COMP: case RPSM_EVENT_SCN: /* * Ignore, SCN is possibly online notification. @@ -470,6 +472,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, break; case RPSM_EVENT_PRLO_RCVD: + case RPSM_EVENT_PLOGI_COMP: break; case RPSM_EVENT_LOGO_RCVD: @@ -484,9 +487,9 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, break; case RPSM_EVENT_PLOGI_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + rport->plogi_pending = BFA_TRUE; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); - bfa_fcs_rport_send_plogiacc(rport, NULL); break; case RPSM_EVENT_DELETE: @@ -891,6 +894,18 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, switch (event) { case RPSM_EVENT_HCB_OFFLINE: + if (bfa_fcs_lport_is_online(rport->port) && + (rport->plogi_pending)) { + rport->plogi_pending = BFA_FALSE; + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + } + /* + * !! fall through !! + */ + case RPSM_EVENT_ADDRESS_CHANGE: if (bfa_fcs_lport_is_online(rport->port)) { if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { @@ -921,6 +936,8 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_SCN: case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: + case RPSM_EVENT_PLOGI_RCVD: + case RPSM_EVENT_LOGO_IMP: /* * Ignore, already offline. */ @@ -957,10 +974,18 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, */ if (bfa_fcs_lport_is_online(rport->port) && (!BFA_FCS_PID_IS_WKA(rport->pid))) { - bfa_sm_set_state(rport, - bfa_fcs_rport_sm_nsdisc_sending); - rport->ns_retries = 0; - bfa_fcs_rport_send_nsdisc(rport, NULL); + if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + } else { + /* For N2N Direct Attach, try to re-login */ + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_plogi_sending); + rport->plogi_retries = 0; + bfa_fcs_rport_send_plogi(rport, NULL); + } } else { /* * if it is not a well known address, reset the @@ -1356,7 +1381,8 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, bfa_fcs_lport_get_fcid(port), 0, port->port_cfg.pwwn, port->port_cfg.nwwn, - bfa_fcport_get_maxfrsize(port->fcs->bfa)); + bfa_fcport_get_maxfrsize(port->fcs->bfa), + bfa_fcport_get_rx_bbcredit(port->fcs->bfa)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response, @@ -1476,7 +1502,8 @@ bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) rport->pid, bfa_fcs_lport_get_fcid(port), rport->reply_oxid, port->port_cfg.pwwn, port->port_cfg.nwwn, - bfa_fcport_get_maxfrsize(port->fcs->bfa)); + bfa_fcport_get_maxfrsize(port->fcs->bfa), + bfa_fcport_get_rx_bbcredit(port->fcs->bfa)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); @@ -2015,6 +2042,35 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport) } static void +bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport, + enum bfa_rport_aen_event event, + struct bfa_rport_aen_data_s *data) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; + struct bfa_aen_entry_s *aen_entry; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + if (event == BFA_RPORT_AEN_QOS_PRIO) + aen_entry->aen_data.rport.priv.qos = data->priv.qos; + else if (event == BFA_RPORT_AEN_QOS_FLOWID) + aen_entry->aen_data.rport.priv.qos = data->priv.qos; + + aen_entry->aen_data.rport.vf_id = rport->port->fabric->vf_id; + aen_entry->aen_data.rport.ppwwn = bfa_fcs_lport_get_pwwn( + bfa_fcs_get_base_port(rport->fcs)); + aen_entry->aen_data.rport.lpwwn = bfa_fcs_lport_get_pwwn(rport->port); + aen_entry->aen_data.rport.rpwwn = rport->pwwn; + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq, + BFA_AEN_CAT_RPORT, event); +} + +static void bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport) { struct bfa_fcs_lport_s *port = rport->port; @@ -2024,6 +2080,11 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport) rport->stats.onlines++; + if ((!rport->pid) || (!rport->pwwn)) { + bfa_trc(rport->fcs, rport->pid); + bfa_sm_fault(rport->fcs, rport->pid); + } + if (bfa_fcs_lport_is_initiator(port)) { bfa_fcs_itnim_rport_online(rport->itnim); if (!BFA_FCS_PID_IS_WKA(rport->pid)) @@ -2032,10 +2093,12 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport) wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); wwn2str(rpwwn_buf, rport->pwwn); - if (!BFA_FCS_PID_IS_WKA(rport->pid)) + if (!BFA_FCS_PID_IS_WKA(rport->pid)) { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Remote port (WWN = %s) online for logical port (WWN = %s)\n", rpwwn_buf, lpwwn_buf); + bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL); + } } static void @@ -2047,20 +2110,26 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport) char rpwwn_buf[BFA_STRING_32]; rport->stats.offlines++; + rport->plogi_pending = BFA_FALSE; wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); wwn2str(rpwwn_buf, rport->pwwn); if (!BFA_FCS_PID_IS_WKA(rport->pid)) { - if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) + if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Remote port (WWN = %s) connectivity lost for " "logical port (WWN = %s)\n", rpwwn_buf, lpwwn_buf); - else + bfa_fcs_rport_aen_post(rport, + BFA_RPORT_AEN_DISCONNECT, NULL); + } else { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Remote port (WWN = %s) offlined by " "logical port (WWN = %s)\n", rpwwn_buf, lpwwn_buf); + bfa_fcs_rport_aen_post(rport, + BFA_RPORT_AEN_OFFLINE, NULL); + } } if (bfa_fcs_lport_is_initiator(port)) { @@ -2120,7 +2189,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi) port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred); bfa_fcport_set_tx_bbcredit(port->fcs->bfa, - port->fabric->bb_credit); + port->fabric->bb_credit, 0); } } @@ -2233,22 +2302,6 @@ bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *fchs, bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); } -static int -wwn_compare(wwn_t wwn1, wwn_t wwn2) -{ - u8 *b1 = (u8 *) &wwn1; - u8 *b2 = (u8 *) &wwn2; - int i; - - for (i = 0; i < sizeof(wwn_t); i++) { - if (b1[i] < b2[i]) - return -1; - if (b1[i] > b2[i]) - return 1; - } - return 0; -} - /* * Called by bport/vport to handle PLOGI received from an existing * remote port. @@ -2266,19 +2319,8 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, rport->reply_oxid = rx_fchs->ox_id; bfa_trc(rport->fcs, rport->reply_oxid); - /* - * In Switched fabric topology, - * PLOGI to each other. If our pwwn is smaller, ignore it, - * if it is not a well known address. - * If the link topology is N2N, - * this Plogi should be accepted. - */ - if ((wwn_compare(rport->port->port_cfg.pwwn, rport->pwwn) == -1) && - (bfa_fcs_fabric_is_switched(rport->port->fabric)) && - (!BFA_FCS_PID_IS_WKA(rport->pid))) { - bfa_trc(rport->fcs, rport->pid); - return; - } + rport->pid = rx_fchs->s_id; + bfa_trc(rport->fcs, rport->pid); rport->stats.plogi_rcvd++; bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); @@ -2361,8 +2403,11 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg, struct bfa_rport_qos_attr_s new_qos_attr) { struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + struct bfa_rport_aen_data_s aen_data; bfa_trc(rport->fcs, rport->pwwn); + aen_data.priv.qos = new_qos_attr; + bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data); } /* @@ -2385,8 +2430,11 @@ bfa_cb_rport_qos_scn_prio(void *cbarg, struct bfa_rport_qos_attr_s new_qos_attr) { struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + struct bfa_rport_aen_data_s aen_data; bfa_trc(rport->fcs, rport->pwwn); + aen_data.priv.qos = new_qos_attr; + bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data); } /* @@ -2531,7 +2579,45 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id) bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD); } - +void +bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, + struct bfa_rport_attr_s *rport_attr) +{ + struct bfa_rport_qos_attr_s qos_attr; + struct bfa_fcs_lport_s *port = rport->port; + bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed; + + memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); + memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s)); + + rport_attr->pid = rport->pid; + rport_attr->pwwn = rport->pwwn; + rport_attr->nwwn = rport->nwwn; + rport_attr->cos_supported = rport->fc_cos; + rport_attr->df_sz = rport->maxfrsize; + rport_attr->state = bfa_fcs_rport_get_state(rport); + rport_attr->fc_cos = rport->fc_cos; + rport_attr->cisc = rport->cisc; + rport_attr->scsi_function = rport->scsi_function; + rport_attr->curr_speed = rport->rpf.rpsc_speed; + rport_attr->assigned_speed = rport->rpf.assigned_speed; + + qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority; + qos_attr.qos_flow_id = + cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id); + rport_attr->qos_attr = qos_attr; + + rport_attr->trl_enforced = BFA_FALSE; + if (bfa_fcport_is_ratelim(port->fcs->bfa) && + (rport->scsi_function == BFA_RPORT_TARGET)) { + if (rport_speed == BFA_PORT_SPEED_UNKNOWN) + rport_speed = + bfa_fcport_get_ratelim_speed(rport->fcs->bfa); + + if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port)) + rport_attr->trl_enforced = BFA_TRUE; + } +} /* * Remote port implementation. diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c index 977e681ec80..ea24d4c6e67 100644 --- a/drivers/scsi/bfa/bfa_hw_cb.c +++ b/drivers/scsi/bfa/bfa_hw_cb.c @@ -17,14 +17,14 @@ #include "bfad_drv.h" #include "bfa_modules.h" -#include "bfi_cbreg.h" +#include "bfi_reg.h" void bfa_hwcb_reginit(struct bfa_s *bfa) { struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); - int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); + int fn = bfa_ioc_pcifn(&bfa->ioc); if (fn == 0) { bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); @@ -33,29 +33,6 @@ bfa_hwcb_reginit(struct bfa_s *bfa) bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS); bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK); } - - for (i = 0; i < BFI_IOC_MAX_CQS; i++) { - /* - * CPE registers - */ - q = CPE_Q_NUM(fn, i); - bfa_regs->cpe_q_pi[i] = (kva + CPE_Q_PI(q)); - bfa_regs->cpe_q_ci[i] = (kva + CPE_Q_CI(q)); - bfa_regs->cpe_q_depth[i] = (kva + CPE_Q_DEPTH(q)); - - /* - * RME registers - */ - q = CPE_Q_NUM(fn, i); - bfa_regs->rme_q_pi[i] = (kva + RME_Q_PI(q)); - bfa_regs->rme_q_ci[i] = (kva + RME_Q_CI(q)); - bfa_regs->rme_q_depth[i] = (kva + RME_Q_DEPTH(q)); - } -} - -void -bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq) -{ } static void @@ -65,16 +42,36 @@ bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq) bfa->iocfc.bfa_regs.intr_status); } -void -bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq) +/* + * Actions to respond RME Interrupt for Crossbow ASIC: + * - Write 1 to Interrupt Status register + * INTX - done in bfa_intx() + * MSIX - done in bfa_hwcb_rspq_ack_msix() + * - Update CI (only if new CI) + */ +static void +bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci) { + writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq), + bfa->iocfc.bfa_regs.intr_status); + + if (bfa_rspq_ci(bfa, rspq) == ci) + return; + + bfa_rspq_ci(bfa, rspq) = ci; + writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); + mmiowb(); } -static void -bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq) +void +bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) { - writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq), - bfa->iocfc.bfa_regs.intr_status); + if (bfa_rspq_ci(bfa, rspq) == ci) + return; + + bfa_rspq_ci(bfa, rspq) = ci; + writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); + mmiowb(); } void @@ -104,43 +101,71 @@ bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, } /* + * Dummy interrupt handler for handling spurious interrupts. + */ +static void +bfa_hwcb_msix_dummy(struct bfa_s *bfa, int vec) +{ +} + +/* * No special setup required for crossbow -- vector assignments are implicit. */ void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs) { - int i; - WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS)); bfa->msix.nvecs = nvecs; - if (nvecs == 1) { - for (i = 0; i < BFA_MSIX_CB_MAX; i++) + bfa_hwcb_msix_uninstall(bfa); +} + +void +bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa) +{ + int i; + + if (bfa->msix.nvecs == 0) + return; + + if (bfa->msix.nvecs == 1) { + for (i = BFI_MSIX_CPE_QMIN_CB; i < BFI_MSIX_CB_MAX; i++) bfa->msix.handler[i] = bfa_msix_all; return; } - for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q7; i++) - bfa->msix.handler[i] = bfa_msix_reqq; - - for (i = BFA_MSIX_RME_Q0; i <= BFA_MSIX_RME_Q7; i++) - bfa->msix.handler[i] = bfa_msix_rspq; - - for (; i < BFA_MSIX_CB_MAX; i++) + for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++) bfa->msix.handler[i] = bfa_msix_lpu_err; } -/* - * Crossbow -- dummy, interrupts are masked - */ void -bfa_hwcb_msix_install(struct bfa_s *bfa) +bfa_hwcb_msix_queue_install(struct bfa_s *bfa) { + int i; + + if (bfa->msix.nvecs == 0) + return; + + if (bfa->msix.nvecs == 1) { + for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++) + bfa->msix.handler[i] = bfa_msix_all; + return; + } + + for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++) + bfa->msix.handler[i] = bfa_msix_reqq; + + for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++) + bfa->msix.handler[i] = bfa_msix_rspq; } void bfa_hwcb_msix_uninstall(struct bfa_s *bfa) { + int i; + + for (i = 0; i < BFI_MSIX_CB_MAX; i++) + bfa->msix.handler[i] = bfa_hwcb_msix_dummy; } /* @@ -149,13 +174,18 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa) void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) { - bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix; - bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix; + if (msix) { + bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix; + bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix; + } else { + bfa->iocfc.hwif.hw_reqq_ack = NULL; + bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; + } } void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end) { - *start = BFA_MSIX_RME_Q0; - *end = BFA_MSIX_RME_Q7; + *start = BFI_MSIX_RME_QMIN_CB; + *end = BFI_MSIX_RME_QMAX_CB; } diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c index 21018d98a07..637527f48b4 100644 --- a/drivers/scsi/bfa/bfa_hw_ct.c +++ b/drivers/scsi/bfa/bfa_hw_ct.c @@ -17,29 +17,10 @@ #include "bfad_drv.h" #include "bfa_modules.h" -#include "bfi_ctreg.h" +#include "bfi_reg.h" BFA_TRC_FILE(HAL, IOCFC_CT); -static u32 __ct_msix_err_vec_reg[] = { - HOST_MSIX_ERR_INDEX_FN0, - HOST_MSIX_ERR_INDEX_FN1, - HOST_MSIX_ERR_INDEX_FN2, - HOST_MSIX_ERR_INDEX_FN3, -}; - -static void -bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec) -{ - int fn = bfa_ioc_pcifn(&bfa->ioc); - void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); - - if (msix) - writel(vec, kva + __ct_msix_err_vec_reg[fn]); - else - writel(0, kva + __ct_msix_err_vec_reg[fn]); -} - /* * Dummy interrupt handler for handling spurious interrupt during chip-reinit. */ @@ -53,7 +34,7 @@ bfa_hwct_reginit(struct bfa_s *bfa) { struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); - int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); + int fn = bfa_ioc_pcifn(&bfa->ioc); if (fn == 0) { bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); @@ -62,26 +43,16 @@ bfa_hwct_reginit(struct bfa_s *bfa) bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS); bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK); } +} - for (i = 0; i < BFI_IOC_MAX_CQS; i++) { - /* - * CPE registers - */ - q = CPE_Q_NUM(fn, i); - bfa_regs->cpe_q_pi[i] = (kva + CPE_PI_PTR_Q(q << 5)); - bfa_regs->cpe_q_ci[i] = (kva + CPE_CI_PTR_Q(q << 5)); - bfa_regs->cpe_q_depth[i] = (kva + CPE_DEPTH_Q(q << 5)); - bfa_regs->cpe_q_ctrl[i] = (kva + CPE_QCTRL_Q(q << 5)); - - /* - * RME registers - */ - q = CPE_Q_NUM(fn, i); - bfa_regs->rme_q_pi[i] = (kva + RME_PI_PTR_Q(q << 5)); - bfa_regs->rme_q_ci[i] = (kva + RME_CI_PTR_Q(q << 5)); - bfa_regs->rme_q_depth[i] = (kva + RME_DEPTH_Q(q << 5)); - bfa_regs->rme_q_ctrl[i] = (kva + RME_QCTRL_Q(q << 5)); - } +void +bfa_hwct2_reginit(struct bfa_s *bfa) +{ + struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; + void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); + + bfa_regs->intr_status = (kva + CT2_HOSTFN_INT_STATUS); + bfa_regs->intr_mask = (kva + CT2_HOSTFN_INTR_MASK); } void @@ -93,22 +64,45 @@ bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq) writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); } +/* + * Actions to respond RME Interrupt for Catapult ASIC: + * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx()) + * - Acknowledge by writing to RME Queue Control register + * - Update CI + */ void -bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq) +bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) { u32 r32; r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); + + bfa_rspq_ci(bfa, rspq) = ci; + writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); + mmiowb(); +} + +/* + * Actions to respond RME Interrupt for Catapult2 ASIC: + * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx()) + * - Update CI + */ +void +bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) +{ + bfa_rspq_ci(bfa, rspq) = ci; + writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); + mmiowb(); } void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, u32 *num_vecs, u32 *max_vec_bit) { - *msix_vecs_bmap = (1 << BFA_MSIX_CT_MAX) - 1; - *max_vec_bit = (1 << (BFA_MSIX_CT_MAX - 1)); - *num_vecs = BFA_MSIX_CT_MAX; + *msix_vecs_bmap = (1 << BFI_MSIX_CT_MAX) - 1; + *max_vec_bit = (1 << (BFI_MSIX_CT_MAX - 1)); + *num_vecs = BFI_MSIX_CT_MAX; } /* @@ -117,7 +111,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs) { - WARN_ON((nvecs != 1) && (nvecs != BFA_MSIX_CT_MAX)); + WARN_ON((nvecs != 1) && (nvecs != BFI_MSIX_CT_MAX)); bfa_trc(bfa, nvecs); bfa->msix.nvecs = nvecs; @@ -125,7 +119,19 @@ bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs) } void -bfa_hwct_msix_install(struct bfa_s *bfa) +bfa_hwct_msix_ctrl_install(struct bfa_s *bfa) +{ + if (bfa->msix.nvecs == 0) + return; + + if (bfa->msix.nvecs == 1) + bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_all; + else + bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err; +} + +void +bfa_hwct_msix_queue_install(struct bfa_s *bfa) { int i; @@ -133,19 +139,16 @@ bfa_hwct_msix_install(struct bfa_s *bfa) return; if (bfa->msix.nvecs == 1) { - for (i = 0; i < BFA_MSIX_CT_MAX; i++) + for (i = BFI_MSIX_CPE_QMIN_CT; i < BFI_MSIX_CT_MAX; i++) bfa->msix.handler[i] = bfa_msix_all; return; } - for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q3; i++) + for (i = BFI_MSIX_CPE_QMIN_CT; i <= BFI_MSIX_CPE_QMAX_CT; i++) bfa->msix.handler[i] = bfa_msix_reqq; - for (; i <= BFA_MSIX_RME_Q3; i++) + for (i = BFI_MSIX_RME_QMIN_CT; i <= BFI_MSIX_RME_QMAX_CT; i++) bfa->msix.handler[i] = bfa_msix_rspq; - - WARN_ON(i != BFA_MSIX_LPU_ERR); - bfa->msix.handler[BFA_MSIX_LPU_ERR] = bfa_msix_lpu_err; } void @@ -153,7 +156,7 @@ bfa_hwct_msix_uninstall(struct bfa_s *bfa) { int i; - for (i = 0; i < BFA_MSIX_CT_MAX; i++) + for (i = 0; i < BFI_MSIX_CT_MAX; i++) bfa->msix.handler[i] = bfa_hwct_msix_dummy; } @@ -164,13 +167,12 @@ void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) { bfa_trc(bfa, 0); - bfa_hwct_msix_lpu_err_set(bfa, msix, BFA_MSIX_LPU_ERR); bfa_ioc_isr_mode_set(&bfa->ioc, msix); } void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end) { - *start = BFA_MSIX_RME_Q0; - *end = BFA_MSIX_RME_Q3; + *start = BFI_MSIX_RME_QMIN_CT; + *end = BFI_MSIX_RME_QMAX_CT; } diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 6c7e0339dda..1ac5aecf25a 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -16,8 +16,9 @@ */ #include "bfad_drv.h" +#include "bfad_im.h" #include "bfa_ioc.h" -#include "bfi_ctreg.h" +#include "bfi_reg.h" #include "bfa_defs.h" #include "bfa_defs_svc.h" @@ -29,8 +30,8 @@ BFA_TRC_FILE(CNA, IOC); #define BFA_IOC_TOV 3000 /* msecs */ #define BFA_IOC_HWSEM_TOV 500 /* msecs */ #define BFA_IOC_HB_TOV 500 /* msecs */ -#define BFA_IOC_HWINIT_MAX 5 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV +#define BFA_IOC_POLL_TOV BFA_TIMER_FREQ #define bfa_ioc_timer_start(__ioc) \ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ @@ -79,14 +80,17 @@ bfa_boolean_t bfa_auto_recover = BFA_TRUE; static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); static void bfa_ioc_timeout(void *ioc); +static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc); static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); -static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); +static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc); static void bfa_ioc_recover(struct bfa_ioc_s *ioc); static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc); +static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc , + enum bfa_ioc_event_e event); static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc); @@ -105,11 +109,12 @@ enum ioc_event { IOC_E_ENABLED = 5, /* f/w enabled */ IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */ IOC_E_DISABLED = 7, /* f/w disabled */ - IOC_E_INITFAILED = 8, /* failure notice by iocpf sm */ - IOC_E_PFFAILED = 9, /* failure notice by iocpf sm */ - IOC_E_HBFAIL = 10, /* heartbeat failure */ - IOC_E_HWERROR = 11, /* hardware error interrupt */ - IOC_E_TIMEOUT = 12, /* timeout */ + IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */ + IOC_E_HBFAIL = 9, /* heartbeat failure */ + IOC_E_HWERROR = 10, /* hardware error interrupt */ + IOC_E_TIMEOUT = 11, /* timeout */ + IOC_E_HWFAILED = 12, /* PCI mapping failure notice */ + IOC_E_FWRSP_ACQ_ADDR = 13, /* Acquiring address */ }; bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); @@ -121,6 +126,8 @@ bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event); static struct bfa_sm_table_s ioc_sm_table[] = { {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, @@ -132,6 +139,8 @@ static struct bfa_sm_table_s ioc_sm_table[] = { {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, + {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL}, + {BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR}, }; /* @@ -143,9 +152,9 @@ static struct bfa_sm_table_s ioc_sm_table[] = { bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV) #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) -#define bfa_iocpf_recovery_timer_start(__ioc) \ +#define bfa_iocpf_poll_timer_start(__ioc) \ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ - bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER) + bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV) #define bfa_sem_timer_start(__ioc) \ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \ @@ -157,6 +166,7 @@ static struct bfa_sm_table_s ioc_sm_table[] = { */ static void bfa_iocpf_timeout(void *ioc_arg); static void bfa_iocpf_sem_timeout(void *ioc_arg); +static void bfa_iocpf_poll_timeout(void *ioc_arg); /* * IOCPF state machine events @@ -173,6 +183,7 @@ enum iocpf_event { IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */ IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */ IOCPF_E_TIMEOUT = 11, /* f/w response timeout */ + IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */ }; /* @@ -314,11 +325,16 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) /* !!! fall through !!! */ case IOC_E_HWERROR: ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); - bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); if (event != IOC_E_PFFAILED) bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); break; + case IOC_E_HWFAILED: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); + break; + case IOC_E_DISABLE: bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); break; @@ -356,17 +372,23 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) case IOC_E_FWRSP_GETATTR: bfa_ioc_timer_stop(ioc); bfa_ioc_check_attr_wwns(ioc); + bfa_ioc_hb_monitor(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_op); break; + case IOC_E_FWRSP_ACQ_ADDR: + bfa_ioc_timer_stop(ioc); + bfa_ioc_hb_monitor(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr); break; + case IOC_E_PFFAILED: case IOC_E_HWERROR: bfa_ioc_timer_stop(ioc); /* !!! fall through !!! */ case IOC_E_TIMEOUT: ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); - bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); if (event != IOC_E_PFFAILED) bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); break; @@ -384,6 +406,50 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) } } +/* + * Acquiring address from fabric (entry function) + */ +static void +bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc) +{ +} + +/* + * Acquiring address from the fabric + */ +static void +bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_FWRSP_GETATTR: + bfa_ioc_check_attr_wwns(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_op); + break; + + case IOC_E_PFFAILED: + case IOC_E_HWERROR: + bfa_hb_timer_stop(ioc); + case IOC_E_HBFAIL: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + if (event != IOC_E_PFFAILED) + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); + break; + + case IOC_E_DISABLE: + bfa_hb_timer_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_ENABLE: + break; + + default: + bfa_sm_fault(ioc, event); + } +} static void bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) @@ -391,8 +457,9 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); - bfa_ioc_hb_monitor(ioc); + bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n"); + bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE); } static void @@ -414,13 +481,13 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) bfa_hb_timer_stop(ioc); /* !!! fall through !!! */ case IOC_E_HBFAIL: - bfa_ioc_fail_notify(ioc); - if (ioc->iocpf.auto_recover) bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); else bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + bfa_ioc_fail_notify(ioc); + if (event != IOC_E_PFFAILED) bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); break; @@ -437,6 +504,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc) struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n"); + bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE); } /* @@ -461,6 +529,11 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); break; + case IOC_E_HWFAILED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); + bfa_ioc_disable_comp(ioc); + break; + default: bfa_sm_fault(ioc, event); } @@ -525,12 +598,14 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event) * Initialization retry failed. */ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); if (event != IOC_E_PFFAILED) bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); break; - case IOC_E_INITFAILED: - bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + case IOC_E_HWFAILED: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); break; case IOC_E_ENABLE: @@ -590,6 +665,35 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event) } } +static void +bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc) +{ + bfa_trc(ioc, 0); +} + +static void +bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_ENABLE: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + break; + + case IOC_E_DISABLE: + ioc->cbfn->disable_cbfn(ioc->bfa); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + /* * IOCPF State Machine */ @@ -600,7 +704,7 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event) static void bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf) { - iocpf->retry_count = 0; + iocpf->fw_mismatch_notified = BFA_FALSE; iocpf->auto_recover = bfa_auto_recover; } @@ -633,6 +737,28 @@ bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event) static void bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) { + struct bfi_ioc_image_hdr_s fwhdr; + u32 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate); + + /* h/w sem init */ + if (fwstate == BFI_IOC_UNINIT) + goto sem_get; + + bfa_ioc_fwver_get(iocpf->ioc, &fwhdr); + + if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) + goto sem_get; + + bfa_trc(iocpf->ioc, fwstate); + bfa_trc(iocpf->ioc, fwhdr.exec); + writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate); + + /* + * Try to lock and then unlock the semaphore. + */ + readl(iocpf->ioc->ioc_regs.ioc_sem_reg); + writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg); +sem_get: bfa_ioc_hw_sem_get(iocpf->ioc); } @@ -650,7 +776,6 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event) case IOCPF_E_SEMLOCKED: if (bfa_ioc_firmware_lock(ioc)) { if (bfa_ioc_sync_start(ioc)) { - iocpf->retry_count = 0; bfa_ioc_sync_join(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); } else { @@ -664,6 +789,11 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event) } break; + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_fsm_send_event(ioc, IOC_E_HWFAILED); + break; + case IOCPF_E_DISABLE: bfa_sem_timer_stop(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); @@ -689,10 +819,10 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf) /* * Call only the first time sm enters fwmismatch state. */ - if (iocpf->retry_count == 0) + if (iocpf->fw_mismatch_notified == BFA_FALSE) bfa_ioc_pf_fwmismatch(iocpf->ioc); - iocpf->retry_count++; + iocpf->fw_mismatch_notified = BFA_TRUE; bfa_iocpf_timer_start(iocpf->ioc); } @@ -757,6 +887,11 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event) } break; + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_fsm_send_event(ioc, IOC_E_HWFAILED); + break; + case IOCPF_E_DISABLE: bfa_sem_timer_stop(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); @@ -770,7 +905,7 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event) static void bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf) { - bfa_iocpf_timer_start(iocpf->ioc); + iocpf->poll_time = 0; bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE); } @@ -787,20 +922,12 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event) switch (event) { case IOCPF_E_FWREADY: - bfa_iocpf_timer_stop(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); break; - case IOCPF_E_INITFAIL: - bfa_iocpf_timer_stop(ioc); - /* - * !!! fall through !!! - */ - case IOCPF_E_TIMEOUT: writel(1, ioc->ioc_regs.ioc_sem_reg); - if (event == IOCPF_E_TIMEOUT) - bfa_fsm_send_event(ioc, IOC_E_PFFAILED); + bfa_fsm_send_event(ioc, IOC_E_PFFAILED); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); break; @@ -820,6 +947,10 @@ static void bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf) { bfa_iocpf_timer_start(iocpf->ioc); + /* + * Enable Interrupts before sending fw IOC ENABLE cmd. + */ + iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa); bfa_ioc_send_enable(iocpf->ioc); } @@ -860,10 +991,6 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); break; - case IOCPF_E_FWREADY: - bfa_ioc_send_enable(ioc); - break; - default: bfa_sm_fault(ioc, event); } @@ -895,16 +1022,6 @@ bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event) bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); break; - case IOCPF_E_FWREADY: - if (bfa_ioc_is_operational(ioc)) { - bfa_fsm_send_event(ioc, IOC_E_PFFAILED); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); - } else { - bfa_fsm_send_event(ioc, IOC_E_PFFAILED); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); - } - break; - default: bfa_sm_fault(ioc, event); } @@ -929,7 +1046,6 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) switch (event) { case IOCPF_E_FWRSP_DISABLE: - case IOCPF_E_FWREADY: bfa_iocpf_timer_stop(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); break; @@ -976,6 +1092,11 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); break; + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_fsm_send_event(ioc, IOC_E_HWFAILED); + break; + case IOCPF_E_FAIL: break; @@ -990,6 +1111,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) static void bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf) { + bfa_ioc_mbox_flush(iocpf->ioc); bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED); } @@ -1002,7 +1124,6 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event) switch (event) { case IOCPF_E_ENABLE: - iocpf->retry_count = 0; bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); break; @@ -1019,6 +1140,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event) static void bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf) { + bfa_ioc_debug_save_ftrc(iocpf->ioc); bfa_ioc_hw_sem_get(iocpf->ioc); } @@ -1035,20 +1157,15 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) switch (event) { case IOCPF_E_SEMLOCKED: bfa_ioc_notify_fail(ioc); - bfa_ioc_sync_ack(ioc); - iocpf->retry_count++; - if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) { - bfa_ioc_sync_leave(ioc); - writel(1, ioc->ioc_regs.ioc_sem_reg); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); - } else { - if (bfa_ioc_sync_complete(ioc)) - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); - else { - writel(1, ioc->ioc_regs.ioc_sem_reg); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); - } - } + bfa_ioc_sync_leave(ioc); + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + writel(1, ioc->ioc_regs.ioc_sem_reg); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_fsm_send_event(ioc, IOC_E_HWFAILED); break; case IOCPF_E_DISABLE: @@ -1073,7 +1190,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) static void bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf) { - bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED); + bfa_trc(iocpf->ioc, 0); } /* @@ -1112,7 +1229,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf) /* * Flush any queued up mailbox requests. */ - bfa_ioc_mbox_hbfail(iocpf->ioc); + bfa_ioc_mbox_flush(iocpf->ioc); bfa_ioc_hw_sem_get(iocpf->ioc); } @@ -1126,11 +1243,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) switch (event) { case IOCPF_E_SEMLOCKED: - iocpf->retry_count = 0; bfa_ioc_sync_ack(ioc); bfa_ioc_notify_fail(ioc); if (!iocpf->auto_recover) { bfa_ioc_sync_leave(ioc); + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); writel(1, ioc->ioc_regs.ioc_sem_reg); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); } else { @@ -1143,6 +1260,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) } break; + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_fsm_send_event(ioc, IOC_E_HWFAILED); + break; + case IOCPF_E_DISABLE: bfa_sem_timer_stop(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); @@ -1159,6 +1281,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) static void bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) { + bfa_trc(iocpf->ioc, 0); } /* @@ -1185,23 +1308,28 @@ bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event) * BFA IOC private functions */ +/* + * Notify common modules registered for notification. + */ static void -bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) +bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event) { - struct list_head *qe; - struct bfa_ioc_hbfail_notify_s *notify; + struct bfa_ioc_notify_s *notify; + struct list_head *qe; - ioc->cbfn->disable_cbfn(ioc->bfa); - - /* - * Notify common modules registered for notification. - */ - list_for_each(qe, &ioc->hb_notify_q) { - notify = (struct bfa_ioc_hbfail_notify_s *) qe; - notify->cbfn(notify->cbarg); + list_for_each(qe, &ioc->notify_q) { + notify = (struct bfa_ioc_notify_s *)qe; + notify->cbfn(notify->cbarg, event); } } +static void +bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) +{ + ioc->cbfn->disable_cbfn(ioc->bfa); + bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED); +} + bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg) { @@ -1211,16 +1339,15 @@ bfa_ioc_sem_get(void __iomem *sem_reg) r32 = readl(sem_reg); - while (r32 && (cnt < BFA_SEM_SPINCNT)) { + while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) { cnt++; udelay(2); r32 = readl(sem_reg); } - if (r32 == 0) + if (!(r32 & 1)) return BFA_TRUE; - WARN_ON(cnt >= BFA_SEM_SPINCNT); return BFA_FALSE; } @@ -1234,7 +1361,12 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) * will return 1. Semaphore is released by writing 1 to the register */ r32 = readl(ioc->ioc_regs.ioc_sem_reg); - if (r32 == 0) { + if (r32 == ~0) { + WARN_ON(r32 == ~0); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR); + return; + } + if (!(r32 & 1)) { bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); return; } @@ -1343,7 +1475,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) int i; drv_fwhdr = (struct bfi_ioc_image_hdr_s *) - bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); + bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) { @@ -1369,7 +1501,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env) bfa_ioc_fwver_get(ioc, &fwhdr); drv_fwhdr = (struct bfi_ioc_image_hdr_s *) - bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); + bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); if (fwhdr.signature != drv_fwhdr->signature) { bfa_trc(ioc, fwhdr.signature); @@ -1377,8 +1509,8 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env) return BFA_FALSE; } - if (swab32(fwhdr.param) != boot_env) { - bfa_trc(ioc, fwhdr.param); + if (swab32(fwhdr.bootenv) != boot_env) { + bfa_trc(ioc, fwhdr.bootenv); bfa_trc(ioc, boot_env); return BFA_FALSE; } @@ -1414,8 +1546,8 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) bfa_trc(ioc, ioc_fwstate); - boot_type = BFI_BOOT_TYPE_NORMAL; - boot_env = BFI_BOOT_LOADER_OS; + boot_type = BFI_FWBOOT_TYPE_NORMAL; + boot_env = BFI_FWBOOT_ENV_OS; /* * check if firmware is valid @@ -1425,6 +1557,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) if (!fwvalid) { bfa_ioc_boot(ioc, boot_type, boot_env); + bfa_ioc_poll_fwinit(ioc); return; } @@ -1433,7 +1566,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) * just wait for an initialization completion interrupt. */ if (ioc_fwstate == BFI_IOC_INITING) { - ioc->cbfn->reset_cbfn(ioc->bfa); + bfa_ioc_poll_fwinit(ioc); return; } @@ -1452,7 +1585,6 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) * be flushed. Otherwise MSI-X interrupts are not delivered. */ bfa_ioc_msgflush(ioc); - ioc->cbfn->reset_cbfn(ioc->bfa); bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); return; } @@ -1461,6 +1593,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) * Initialize the h/w for any other states. */ bfa_ioc_boot(ioc, boot_type, boot_env); + bfa_ioc_poll_fwinit(ioc); } static void @@ -1508,7 +1641,7 @@ bfa_ioc_send_enable(struct bfa_ioc_s *ioc) bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, bfa_ioc_portid(ioc)); - enable_req.ioc_class = ioc->ioc_mc; + enable_req.clscode = cpu_to_be16(ioc->clscode); do_gettimeofday(&tv); enable_req.tv_sec = be32_to_cpu(tv.tv_sec); bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); @@ -1572,25 +1705,26 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, u32 loff = 0; u32 chunkno = 0; u32 i; + u32 asicmode; /* * Initialize LMEM first before code download */ bfa_ioc_lmem_init(ioc); - bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc))); - fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); + bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc))); + fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno); pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); pgoff = PSS_SMEM_PGOFF(loff); writel(pgnum, ioc->ioc_regs.host_page_num_fn); - for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) { + for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) { if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { chunkno = BFA_IOC_FLASH_CHUNK_NO(i); - fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), + fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); } @@ -1616,11 +1750,15 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, ioc->ioc_regs.host_page_num_fn); /* - * Set boot type and boot param at the end. - */ - bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF, + * Set boot type and device mode at the end. + */ + asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, + ioc->port0_mode, ioc->port1_mode); + bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF, + swab32(asicmode)); + bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF, swab32(boot_type)); - bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF, + bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF, swab32(boot_env)); } @@ -1636,6 +1774,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) attr->adapter_prop = be32_to_cpu(attr->adapter_prop); attr->card_type = be32_to_cpu(attr->card_type); attr->maxfrsize = be16_to_cpu(attr->maxfrsize); + ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC); bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); } @@ -1690,7 +1829,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc) * Cleanup any pending requests. */ static void -bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc) +bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc) { struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; struct bfa_mbox_cmd_s *cmd; @@ -1752,6 +1891,7 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz) /* * release semaphore. */ + readl(ioc->ioc_regs.ioc_init_sem_reg); writel(1, ioc->ioc_regs.ioc_init_sem_reg); bfa_trc(ioc, pgnum); @@ -1808,6 +1948,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz) /* * release semaphore. */ + readl(ioc->ioc_regs.ioc_init_sem_reg); writel(1, ioc->ioc_regs.ioc_init_sem_reg); bfa_trc(ioc, pgnum); return BFA_STATUS_OK; @@ -1816,23 +1957,19 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz) static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc) { - struct list_head *qe; - struct bfa_ioc_hbfail_notify_s *notify; struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; /* * Notify driver and common modules registered for notification. */ ioc->cbfn->hbfail_cbfn(ioc->bfa); - list_for_each(qe, &ioc->hb_notify_q) { - notify = (struct bfa_ioc_hbfail_notify_s *) qe; - notify->cbfn(notify->cbarg); - } + bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED); bfa_ioc_debug_save_ftrc(ioc); BFA_LOG(KERN_CRIT, bfad, bfa_log_level, "Heart Beat of IOC has failed\n"); + bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL); } @@ -1847,6 +1984,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc) BFA_LOG(KERN_WARNING, bfad, bfa_log_level, "Running firmware version is incompatible " "with the driver version\n"); + bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH); } bfa_status_t @@ -1864,6 +2002,7 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc) /* * release semaphore. */ + readl(ioc->ioc_regs.ioc_init_sem_reg); writel(1, ioc->ioc_regs.ioc_init_sem_reg); return BFA_STATUS_OK; @@ -1876,8 +2015,6 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc) void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) { - void __iomem *rb; - bfa_ioc_stats(ioc, ioc_boots); if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) @@ -1886,22 +2023,16 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) /* * Initialize IOC state of all functions on a chip reset. */ - rb = ioc->pcidev.pci_bar_kva; - if (boot_type == BFI_BOOT_TYPE_MEMTEST) { - writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG)); - writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG)); + if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { + writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate); } else { - writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG)); - writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG)); + writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate); } bfa_ioc_msgflush(ioc); bfa_ioc_download_fw(ioc, boot_type, boot_env); - - /* - * Enable interrupts just before starting LPU - */ - ioc->cbfn->reset_cbfn(ioc->bfa); bfa_ioc_lpu_start(ioc); } @@ -1932,13 +2063,17 @@ bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) (r32 != BFI_IOC_MEMTEST)); } -void +bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) { __be32 *msgp = mbmsg; u32 r32; int i; + r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); + if ((r32 & 1) == 0) + return BFA_FALSE; + /* * read the MBOX msg */ @@ -1954,6 +2089,8 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) */ writel(1, ioc->ioc_regs.lpu_mbox_cmd); readl(ioc->ioc_regs.lpu_mbox_cmd); + + return BFA_TRUE; } void @@ -1970,11 +2107,10 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) case BFI_IOC_I2H_HBEAT: break; - case BFI_IOC_I2H_READY_EVENT: - bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY); - break; - case BFI_IOC_I2H_ENABLE_REPLY: + ioc->port_mode = ioc->port_mode_cfg = + (enum bfa_mode_s)msg->fw_event.port_mode; + ioc->ad_cap_bm = msg->fw_event.cap_bm; bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); break; @@ -1986,6 +2122,10 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) bfa_ioc_getattr_reply(ioc); break; + case BFI_IOC_I2H_ACQ_ADDR_REPLY: + bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR); + break; + default: bfa_trc(ioc, msg->mh.msg_id); WARN_ON(1); @@ -2011,7 +2151,7 @@ bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, ioc->iocpf.ioc = ioc; bfa_ioc_mbox_attach(ioc); - INIT_LIST_HEAD(&ioc->hb_notify_q); + INIT_LIST_HEAD(&ioc->notify_q); bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); bfa_fsm_send_event(ioc, IOC_E_RESET); @@ -2024,6 +2164,7 @@ void bfa_ioc_detach(struct bfa_ioc_s *ioc) { bfa_fsm_send_event(ioc, IOC_E_DETACH); + INIT_LIST_HEAD(&ioc->notify_q); } /* @@ -2033,20 +2174,80 @@ bfa_ioc_detach(struct bfa_ioc_s *ioc) */ void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, - enum bfi_mclass mc) + enum bfi_pcifn_class clscode) { - ioc->ioc_mc = mc; + ioc->clscode = clscode; ioc->pcidev = *pcidev; - ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); - ioc->cna = ioc->ctdev && !ioc->fcmode; + + /* + * Initialize IOC and device personality + */ + ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC; + ioc->asic_mode = BFI_ASIC_MODE_FC; + + switch (pcidev->device_id) { + case BFA_PCI_DEVICE_ID_FC_8G1P: + case BFA_PCI_DEVICE_ID_FC_8G2P: + ioc->asic_gen = BFI_ASIC_GEN_CB; + ioc->fcmode = BFA_TRUE; + ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; + ioc->ad_cap_bm = BFA_CM_HBA; + break; + + case BFA_PCI_DEVICE_ID_CT: + ioc->asic_gen = BFI_ASIC_GEN_CT; + ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; + ioc->asic_mode = BFI_ASIC_MODE_ETH; + ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA; + ioc->ad_cap_bm = BFA_CM_CNA; + break; + + case BFA_PCI_DEVICE_ID_CT_FC: + ioc->asic_gen = BFI_ASIC_GEN_CT; + ioc->fcmode = BFA_TRUE; + ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; + ioc->ad_cap_bm = BFA_CM_HBA; + break; + + case BFA_PCI_DEVICE_ID_CT2: + ioc->asic_gen = BFI_ASIC_GEN_CT2; + if (clscode == BFI_PCIFN_CLASS_FC && + pcidev->ssid == BFA_PCI_CT2_SSID_FC) { + ioc->asic_mode = BFI_ASIC_MODE_FC16; + ioc->fcmode = BFA_TRUE; + ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; + ioc->ad_cap_bm = BFA_CM_HBA; + } else { + ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; + ioc->asic_mode = BFI_ASIC_MODE_ETH; + if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) { + ioc->port_mode = + ioc->port_mode_cfg = BFA_MODE_CNA; + ioc->ad_cap_bm = BFA_CM_CNA; + } else { + ioc->port_mode = + ioc->port_mode_cfg = BFA_MODE_NIC; + ioc->ad_cap_bm = BFA_CM_NIC; + } + } + break; + + default: + WARN_ON(1); + } /* * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c */ - if (ioc->ctdev) - bfa_ioc_set_ct_hwif(ioc); - else + if (ioc->asic_gen == BFI_ASIC_GEN_CB) bfa_ioc_set_cb_hwif(ioc); + else if (ioc->asic_gen == BFI_ASIC_GEN_CT) + bfa_ioc_set_ct_hwif(ioc); + else { + WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2); + bfa_ioc_set_ct2_hwif(ioc); + bfa_ioc_ct2_poweron(ioc); + } bfa_ioc_map_port(ioc); bfa_ioc_reg_init(ioc); @@ -2172,36 +2373,38 @@ bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc) struct bfi_mbmsg_s m; int mc; - bfa_ioc_msgget(ioc, &m); + if (bfa_ioc_msgget(ioc, &m)) { + /* + * Treat IOC message class as special. + */ + mc = m.mh.msg_class; + if (mc == BFI_MC_IOC) { + bfa_ioc_isr(ioc, &m); + return; + } - /* - * Treat IOC message class as special. - */ - mc = m.mh.msg_class; - if (mc == BFI_MC_IOC) { - bfa_ioc_isr(ioc, &m); - return; + if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) + return; + + mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); } - if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) - return; + bfa_ioc_lpu_read_stat(ioc); - mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); + /* + * Try to send pending mailbox commands + */ + bfa_ioc_mbox_poll(ioc); } void bfa_ioc_error_isr(struct bfa_ioc_s *ioc) { + bfa_ioc_stats(ioc, ioc_hbfails); + ioc->stats.hb_count = ioc->hb_count; bfa_fsm_send_event(ioc, IOC_E_HWERROR); } -void -bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc) -{ - ioc->fcmode = BFA_TRUE; - ioc->port_id = bfa_ioc_pcifn(ioc); -} - /* * return true if IOC is disabled */ @@ -2213,6 +2416,15 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) } /* + * Return TRUE if IOC is in acquiring address state + */ +bfa_boolean_t +bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc) +{ + return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr); +} + +/* * return true if IOC firmware is different. */ bfa_boolean_t @@ -2239,17 +2451,16 @@ bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) { u32 ioc_state; - void __iomem *rb = ioc->pcidev.pci_bar_kva; if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) return BFA_FALSE; - ioc_state = readl(rb + BFA_IOC0_STATE_REG); + ioc_state = readl(ioc->ioc_regs.ioc_fwstate); if (!bfa_ioc_state_disabled(ioc_state)) return BFA_FALSE; if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) { - ioc_state = readl(rb + BFA_IOC1_STATE_REG); + ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate); if (!bfa_ioc_state_disabled(ioc_state)) return BFA_FALSE; } @@ -2308,24 +2519,21 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); - ad_attr->cna_capable = ioc->cna; - ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna && - !ad_attr->is_mezz; + ad_attr->cna_capable = bfa_ioc_is_cna(ioc); + ad_attr->trunk_capable = (ad_attr->nports > 1) && + !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz; } enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc) { - if (!ioc->ctdev || ioc->fcmode) - return BFA_IOC_TYPE_FC; - else if (ioc->ioc_mc == BFI_MC_IOCFC) - return BFA_IOC_TYPE_FCoE; - else if (ioc->ioc_mc == BFI_MC_LL) - return BFA_IOC_TYPE_LL; - else { - WARN_ON(ioc->ioc_mc != BFI_MC_LL); + if (ioc->clscode == BFI_PCIFN_CLASS_ETH) return BFA_IOC_TYPE_LL; - } + + WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC); + + return (ioc->attr->port_mode == BFI_PORT_MODE_FC) + ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE; } void @@ -2384,11 +2592,8 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) ioc_attr = ioc->attr; - /* - * model name - */ snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", - BFA_MFG_NAME, ioc_attr->card_type); + BFA_MFG_NAME, ioc_attr->card_type); } enum bfa_ioc_state @@ -2438,6 +2643,9 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) ioc_attr->state = bfa_ioc_get_state(ioc); ioc_attr->port_id = ioc->port_id; + ioc_attr->port_mode = ioc->port_mode; + ioc_attr->port_mode_cfg = ioc->port_mode_cfg; + ioc_attr->cap_bm = ioc->ad_cap_bm; ioc_attr->ioc_type = bfa_ioc_get_type(ioc); @@ -2475,10 +2683,41 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc) return m; } -bfa_boolean_t -bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc) +/* + * Send AEN notification + */ +void +bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) { - return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id); + struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; + struct bfa_aen_entry_s *aen_entry; + enum bfa_ioc_type_e ioc_type; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + ioc_type = bfa_ioc_get_type(ioc); + switch (ioc_type) { + case BFA_IOC_TYPE_FC: + aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn; + break; + case BFA_IOC_TYPE_FCoE: + aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn; + aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc); + break; + case BFA_IOC_TYPE_LL: + aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc); + break; + default: + WARN_ON(ioc_type != BFA_IOC_TYPE_FC); + break; + } + + /* Send the AEN notification */ + aen_entry->aen_data.ioc.ioc_type = ioc_type; + bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq, + BFA_AEN_CAT_IOC, event); } /* @@ -2531,7 +2770,7 @@ bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc) bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC, bfa_ioc_portid(ioc)); - req->ioc_class = ioc->ioc_mc; + req->clscode = cpu_to_be16(ioc->clscode); bfa_ioc_mbox_queue(ioc, &cmd); } @@ -2673,6 +2912,7 @@ static void bfa_ioc_recover(struct bfa_ioc_s *ioc) { bfa_ioc_stats(ioc, ioc_hbfails); + ioc->stats.hb_count = ioc->hb_count; bfa_fsm_send_event(ioc, IOC_E_HBFAIL); } @@ -2681,6 +2921,10 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc) { if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) return; + if (ioc->attr->nwwn == 0) + bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN); + if (ioc->attr->pwwn == 0) + bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN); } /* @@ -2703,6 +2947,34 @@ bfa_iocpf_sem_timeout(void *ioc_arg) bfa_ioc_hw_sem_get(ioc); } +static void +bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc) +{ + u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate); + + bfa_trc(ioc, fwstate); + + if (fwstate == BFI_IOC_DISABLED) { + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); + return; + } + + if (ioc->iocpf.poll_time >= BFA_IOC_TOV) + bfa_iocpf_timeout(ioc); + else { + ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; + bfa_iocpf_poll_timer_start(ioc); + } +} + +static void +bfa_iocpf_poll_timeout(void *ioc_arg) +{ + struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; + + bfa_ioc_poll_fwinit(ioc); +} + /* * bfa timer function */ @@ -2770,3 +3042,2942 @@ bfa_timer_stop(struct bfa_timer_s *timer) list_del(&timer->qe); } + +/* + * ASIC block related + */ +static void +bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg) +{ + struct bfa_ablk_cfg_inst_s *cfg_inst; + int i, j; + u16 be16; + u32 be32; + + for (i = 0; i < BFA_ABLK_MAX; i++) { + cfg_inst = &cfg->inst[i]; + for (j = 0; j < BFA_ABLK_MAX_PFS; j++) { + be16 = cfg_inst->pf_cfg[j].pers; + cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16); + be16 = cfg_inst->pf_cfg[j].num_qpairs; + cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16); + be16 = cfg_inst->pf_cfg[j].num_vectors; + cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16); + be32 = cfg_inst->pf_cfg[j].bw; + cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32); + } + } +} + +static void +bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg) +{ + struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg; + struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg; + bfa_ablk_cbfn_t cbfn; + + WARN_ON(msg->mh.msg_class != BFI_MC_ABLK); + bfa_trc(ablk->ioc, msg->mh.msg_id); + + switch (msg->mh.msg_id) { + case BFI_ABLK_I2H_QUERY: + if (rsp->status == BFA_STATUS_OK) { + memcpy(ablk->cfg, ablk->dma_addr.kva, + sizeof(struct bfa_ablk_cfg_s)); + bfa_ablk_config_swap(ablk->cfg); + ablk->cfg = NULL; + } + break; + + case BFI_ABLK_I2H_ADPT_CONFIG: + case BFI_ABLK_I2H_PORT_CONFIG: + /* update config port mode */ + ablk->ioc->port_mode_cfg = rsp->port_mode; + + case BFI_ABLK_I2H_PF_DELETE: + case BFI_ABLK_I2H_PF_UPDATE: + case BFI_ABLK_I2H_OPTROM_ENABLE: + case BFI_ABLK_I2H_OPTROM_DISABLE: + /* No-op */ + break; + + case BFI_ABLK_I2H_PF_CREATE: + *(ablk->pcifn) = rsp->pcifn; + ablk->pcifn = NULL; + break; + + default: + WARN_ON(1); + } + + ablk->busy = BFA_FALSE; + if (ablk->cbfn) { + cbfn = ablk->cbfn; + ablk->cbfn = NULL; + cbfn(ablk->cbarg, rsp->status); + } +} + +static void +bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event) +{ + struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg; + + bfa_trc(ablk->ioc, event); + + switch (event) { + case BFA_IOC_E_ENABLED: + WARN_ON(ablk->busy != BFA_FALSE); + break; + + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + /* Fail any pending requests */ + ablk->pcifn = NULL; + if (ablk->busy) { + if (ablk->cbfn) + ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED); + ablk->cbfn = NULL; + ablk->busy = BFA_FALSE; + } + break; + + default: + WARN_ON(1); + break; + } +} + +u32 +bfa_ablk_meminfo(void) +{ + return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ); +} + +void +bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa) +{ + ablk->dma_addr.kva = dma_kva; + ablk->dma_addr.pa = dma_pa; +} + +void +bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc) +{ + ablk->ioc = ioc; + + bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk); + bfa_q_qe_init(&ablk->ioc_notify); + bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk); + list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q); +} + +bfa_status_t +bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg, + bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_query_s *m; + + WARN_ON(!ablk_cfg); + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->cfg = ablk_cfg; + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY, + bfa_ioc_portid(ablk->ioc)); + bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa); + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, + u8 port, enum bfi_pcifn_class personality, int bw, + bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_pf_req_s *m; + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->pcifn = pcifn; + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE, + bfa_ioc_portid(ablk->ioc)); + m->pers = cpu_to_be16((u16)personality); + m->bw = cpu_to_be32(bw); + m->port = port; + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn, + bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_pf_req_s *m; + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE, + bfa_ioc_portid(ablk->ioc)); + m->pcifn = (u8)pcifn; + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode, + int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_cfg_req_s *m; + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG, + bfa_ioc_portid(ablk->ioc)); + m->mode = (u8)mode; + m->max_pf = (u8)max_pf; + m->max_vf = (u8)max_vf; + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode, + int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_cfg_req_s *m; + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG, + bfa_ioc_portid(ablk->ioc)); + m->port = (u8)port; + m->mode = (u8)mode; + m->max_pf = (u8)max_pf; + m->max_vf = (u8)max_vf; + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw, + bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_pf_req_s *m; + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE, + bfa_ioc_portid(ablk->ioc)); + m->pcifn = (u8)pcifn; + m->bw = cpu_to_be32(bw); + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_optrom_s *m; + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE, + bfa_ioc_portid(ablk->ioc)); + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg) +{ + struct bfi_ablk_h2i_optrom_s *m; + + if (!bfa_ioc_is_operational(ablk->ioc)) { + bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); + return BFA_STATUS_IOC_FAILURE; + } + + if (ablk->busy) { + bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); + return BFA_STATUS_DEVBUSY; + } + + ablk->cbfn = cbfn; + ablk->cbarg = cbarg; + ablk->busy = BFA_TRUE; + + m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg; + bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE, + bfa_ioc_portid(ablk->ioc)); + bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); + + return BFA_STATUS_OK; +} + +/* + * SFP module specific + */ + +/* forward declarations */ +static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp); +static void bfa_sfp_media_get(struct bfa_sfp_s *sfp); +static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, + enum bfa_port_speed portspeed); + +static void +bfa_cb_sfp_show(struct bfa_sfp_s *sfp) +{ + bfa_trc(sfp, sfp->lock); + if (sfp->cbfn) + sfp->cbfn(sfp->cbarg, sfp->status); + sfp->lock = 0; + sfp->cbfn = NULL; +} + +static void +bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp) +{ + bfa_trc(sfp, sfp->portspeed); + if (sfp->media) { + bfa_sfp_media_get(sfp); + if (sfp->state_query_cbfn) + sfp->state_query_cbfn(sfp->state_query_cbarg, + sfp->status); + sfp->media = NULL; + } + + if (sfp->portspeed) { + sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed); + if (sfp->state_query_cbfn) + sfp->state_query_cbfn(sfp->state_query_cbarg, + sfp->status); + sfp->portspeed = BFA_PORT_SPEED_UNKNOWN; + } + + sfp->state_query_lock = 0; + sfp->state_query_cbfn = NULL; +} + +/* + * IOC event handler. + */ +static void +bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event) +{ + struct bfa_sfp_s *sfp = sfp_arg; + + bfa_trc(sfp, event); + bfa_trc(sfp, sfp->lock); + bfa_trc(sfp, sfp->state_query_lock); + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (sfp->lock) { + sfp->status = BFA_STATUS_IOC_FAILURE; + bfa_cb_sfp_show(sfp); + } + + if (sfp->state_query_lock) { + sfp->status = BFA_STATUS_IOC_FAILURE; + bfa_cb_sfp_state_query(sfp); + } + break; + + default: + break; + } +} + +/* + * SFP's State Change Notification post to AEN + */ +static void +bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp) +{ + struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad; + struct bfa_aen_entry_s *aen_entry; + enum bfa_port_aen_event aen_evt = 0; + + bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) | + ((u64)rsp->event)); + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc); + aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn; + aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc); + + switch (rsp->event) { + case BFA_SFP_SCN_INSERTED: + aen_evt = BFA_PORT_AEN_SFP_INSERT; + break; + case BFA_SFP_SCN_REMOVED: + aen_evt = BFA_PORT_AEN_SFP_REMOVE; + break; + case BFA_SFP_SCN_FAILED: + aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR; + break; + case BFA_SFP_SCN_UNSUPPORT: + aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT; + break; + case BFA_SFP_SCN_POM: + aen_evt = BFA_PORT_AEN_SFP_POM; + aen_entry->aen_data.port.level = rsp->pomlvl; + break; + default: + bfa_trc(sfp, rsp->event); + WARN_ON(1); + } + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq, + BFA_AEN_CAT_PORT, aen_evt); +} + +/* + * SFP get data send + */ +static void +bfa_sfp_getdata_send(struct bfa_sfp_s *sfp) +{ + struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; + + bfa_trc(sfp, req->memtype); + + /* build host command */ + bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW, + bfa_ioc_portid(sfp->ioc)); + + /* send mbox cmd */ + bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd); +} + +/* + * SFP is valid, read sfp data + */ +static void +bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype) +{ + struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; + + WARN_ON(sfp->lock != 0); + bfa_trc(sfp, sfp->state); + + sfp->lock = 1; + sfp->memtype = memtype; + req->memtype = memtype; + + /* Setup SG list */ + bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa); + + bfa_sfp_getdata_send(sfp); +} + +/* + * SFP scn handler + */ +static void +bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg) +{ + struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg; + + switch (rsp->event) { + case BFA_SFP_SCN_INSERTED: + sfp->state = BFA_SFP_STATE_INSERTED; + sfp->data_valid = 0; + bfa_sfp_scn_aen_post(sfp, rsp); + break; + case BFA_SFP_SCN_REMOVED: + sfp->state = BFA_SFP_STATE_REMOVED; + sfp->data_valid = 0; + bfa_sfp_scn_aen_post(sfp, rsp); + break; + case BFA_SFP_SCN_FAILED: + sfp->state = BFA_SFP_STATE_FAILED; + sfp->data_valid = 0; + bfa_sfp_scn_aen_post(sfp, rsp); + break; + case BFA_SFP_SCN_UNSUPPORT: + sfp->state = BFA_SFP_STATE_UNSUPPORT; + bfa_sfp_scn_aen_post(sfp, rsp); + if (!sfp->lock) + bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL); + break; + case BFA_SFP_SCN_POM: + bfa_sfp_scn_aen_post(sfp, rsp); + break; + case BFA_SFP_SCN_VALID: + sfp->state = BFA_SFP_STATE_VALID; + if (!sfp->lock) + bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL); + break; + default: + bfa_trc(sfp, rsp->event); + WARN_ON(1); + } +} + +/* + * SFP show complete + */ +static void +bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg) +{ + struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg; + + if (!sfp->lock) { + /* + * receiving response after ioc failure + */ + bfa_trc(sfp, sfp->lock); + return; + } + + bfa_trc(sfp, rsp->status); + if (rsp->status == BFA_STATUS_OK) { + sfp->data_valid = 1; + if (sfp->state == BFA_SFP_STATE_VALID) + sfp->status = BFA_STATUS_OK; + else if (sfp->state == BFA_SFP_STATE_UNSUPPORT) + sfp->status = BFA_STATUS_SFP_UNSUPP; + else + bfa_trc(sfp, sfp->state); + } else { + sfp->data_valid = 0; + sfp->status = rsp->status; + /* sfpshow shouldn't change sfp state */ + } + + bfa_trc(sfp, sfp->memtype); + if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) { + bfa_trc(sfp, sfp->data_valid); + if (sfp->data_valid) { + u32 size = sizeof(struct sfp_mem_s); + u8 *des = (u8 *) &(sfp->sfpmem->srlid_base); + memcpy(des, sfp->dbuf_kva, size); + } + /* + * Queue completion callback. + */ + bfa_cb_sfp_show(sfp); + } else + sfp->lock = 0; + + bfa_trc(sfp, sfp->state_query_lock); + if (sfp->state_query_lock) { + sfp->state = rsp->state; + /* Complete callback */ + bfa_cb_sfp_state_query(sfp); + } +} + +/* + * SFP query fw sfp state + */ +static void +bfa_sfp_state_query(struct bfa_sfp_s *sfp) +{ + struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; + + /* Should not be doing query if not in _INIT state */ + WARN_ON(sfp->state != BFA_SFP_STATE_INIT); + WARN_ON(sfp->state_query_lock != 0); + bfa_trc(sfp, sfp->state); + + sfp->state_query_lock = 1; + req->memtype = 0; + + if (!sfp->lock) + bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL); +} + +static void +bfa_sfp_media_get(struct bfa_sfp_s *sfp) +{ + enum bfa_defs_sfp_media_e *media = sfp->media; + + *media = BFA_SFP_MEDIA_UNKNOWN; + + if (sfp->state == BFA_SFP_STATE_UNSUPPORT) + *media = BFA_SFP_MEDIA_UNSUPPORT; + else if (sfp->state == BFA_SFP_STATE_VALID) { + union sfp_xcvr_e10g_code_u e10g; + struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva; + u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 | + (sfpmem->srlid_base.xcvr[5] >> 1); + + e10g.b = sfpmem->srlid_base.xcvr[0]; + bfa_trc(sfp, e10g.b); + bfa_trc(sfp, xmtr_tech); + /* check fc transmitter tech */ + if ((xmtr_tech & SFP_XMTR_TECH_CU) || + (xmtr_tech & SFP_XMTR_TECH_CP) || + (xmtr_tech & SFP_XMTR_TECH_CA)) + *media = BFA_SFP_MEDIA_CU; + else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) || + (xmtr_tech & SFP_XMTR_TECH_EL_INTER)) + *media = BFA_SFP_MEDIA_EL; + else if ((xmtr_tech & SFP_XMTR_TECH_LL) || + (xmtr_tech & SFP_XMTR_TECH_LC)) + *media = BFA_SFP_MEDIA_LW; + else if ((xmtr_tech & SFP_XMTR_TECH_SL) || + (xmtr_tech & SFP_XMTR_TECH_SN) || + (xmtr_tech & SFP_XMTR_TECH_SA)) + *media = BFA_SFP_MEDIA_SW; + /* Check 10G Ethernet Compilance code */ + else if (e10g.b & 0x10) + *media = BFA_SFP_MEDIA_SW; + else if (e10g.b & 0x60) + *media = BFA_SFP_MEDIA_LW; + else if (e10g.r.e10g_unall & 0x80) + *media = BFA_SFP_MEDIA_UNKNOWN; + else + bfa_trc(sfp, 0); + } else + bfa_trc(sfp, sfp->state); +} + +static bfa_status_t +bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed) +{ + struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva; + struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr; + union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3; + union sfp_xcvr_e10g_code_u e10g = xcvr->e10g; + + if (portspeed == BFA_PORT_SPEED_10GBPS) { + if (e10g.r.e10g_sr || e10g.r.e10g_lr) + return BFA_STATUS_OK; + else { + bfa_trc(sfp, e10g.b); + return BFA_STATUS_UNSUPP_SPEED; + } + } + if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) || + ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) || + ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) || + ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) || + ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100)) + return BFA_STATUS_OK; + else { + bfa_trc(sfp, portspeed); + bfa_trc(sfp, fc3.b); + bfa_trc(sfp, e10g.b); + return BFA_STATUS_UNSUPP_SPEED; + } +} + +/* + * SFP hmbox handler + */ +void +bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg) +{ + struct bfa_sfp_s *sfp = sfparg; + + switch (msg->mh.msg_id) { + case BFI_SFP_I2H_SHOW: + bfa_sfp_show_comp(sfp, msg); + break; + + case BFI_SFP_I2H_SCN: + bfa_sfp_scn(sfp, msg); + break; + + default: + bfa_trc(sfp, msg->mh.msg_id); + WARN_ON(1); + } +} + +/* + * Return DMA memory needed by sfp module. + */ +u32 +bfa_sfp_meminfo(void) +{ + return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ); +} + +/* + * Attach virtual and physical memory for SFP. + */ +void +bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev, + struct bfa_trc_mod_s *trcmod) +{ + sfp->dev = dev; + sfp->ioc = ioc; + sfp->trcmod = trcmod; + + sfp->cbfn = NULL; + sfp->cbarg = NULL; + sfp->sfpmem = NULL; + sfp->lock = 0; + sfp->data_valid = 0; + sfp->state = BFA_SFP_STATE_INIT; + sfp->state_query_lock = 0; + sfp->state_query_cbfn = NULL; + sfp->state_query_cbarg = NULL; + sfp->media = NULL; + sfp->portspeed = BFA_PORT_SPEED_UNKNOWN; + sfp->is_elb = BFA_FALSE; + + bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp); + bfa_q_qe_init(&sfp->ioc_notify); + bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp); + list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q); +} + +/* + * Claim Memory for SFP + */ +void +bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa) +{ + sfp->dbuf_kva = dm_kva; + sfp->dbuf_pa = dm_pa; + memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s)); + + dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ); + dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ); +} + +/* + * Show SFP eeprom content + * + * @param[in] sfp - bfa sfp module + * + * @param[out] sfpmem - sfp eeprom data + * + */ +bfa_status_t +bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem, + bfa_cb_sfp_t cbfn, void *cbarg) +{ + + if (!bfa_ioc_is_operational(sfp->ioc)) { + bfa_trc(sfp, 0); + return BFA_STATUS_IOC_NON_OP; + } + + if (sfp->lock) { + bfa_trc(sfp, 0); + return BFA_STATUS_DEVBUSY; + } + + sfp->cbfn = cbfn; + sfp->cbarg = cbarg; + sfp->sfpmem = sfpmem; + + bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT); + return BFA_STATUS_OK; +} + +/* + * Return SFP Media type + * + * @param[in] sfp - bfa sfp module + * + * @param[out] media - port speed from user + * + */ +bfa_status_t +bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media, + bfa_cb_sfp_t cbfn, void *cbarg) +{ + if (!bfa_ioc_is_operational(sfp->ioc)) { + bfa_trc(sfp, 0); + return BFA_STATUS_IOC_NON_OP; + } + + sfp->media = media; + if (sfp->state == BFA_SFP_STATE_INIT) { + if (sfp->state_query_lock) { + bfa_trc(sfp, 0); + return BFA_STATUS_DEVBUSY; + } else { + sfp->state_query_cbfn = cbfn; + sfp->state_query_cbarg = cbarg; + bfa_sfp_state_query(sfp); + return BFA_STATUS_SFP_NOT_READY; + } + } + + bfa_sfp_media_get(sfp); + return BFA_STATUS_OK; +} + +/* + * Check if user set port speed is allowed by the SFP + * + * @param[in] sfp - bfa sfp module + * @param[in] portspeed - port speed from user + * + */ +bfa_status_t +bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed, + bfa_cb_sfp_t cbfn, void *cbarg) +{ + WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN); + + if (!bfa_ioc_is_operational(sfp->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* For Mezz card, all speed is allowed */ + if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type)) + return BFA_STATUS_OK; + + /* Check SFP state */ + sfp->portspeed = portspeed; + if (sfp->state == BFA_SFP_STATE_INIT) { + if (sfp->state_query_lock) { + bfa_trc(sfp, 0); + return BFA_STATUS_DEVBUSY; + } else { + sfp->state_query_cbfn = cbfn; + sfp->state_query_cbarg = cbarg; + bfa_sfp_state_query(sfp); + return BFA_STATUS_SFP_NOT_READY; + } + } + + if (sfp->state == BFA_SFP_STATE_REMOVED || + sfp->state == BFA_SFP_STATE_FAILED) { + bfa_trc(sfp, sfp->state); + return BFA_STATUS_NO_SFP_DEV; + } + + if (sfp->state == BFA_SFP_STATE_INSERTED) { + bfa_trc(sfp, sfp->state); + return BFA_STATUS_DEVBUSY; /* sfp is reading data */ + } + + /* For eloopback, all speed is allowed */ + if (sfp->is_elb) + return BFA_STATUS_OK; + + return bfa_sfp_speed_valid(sfp, portspeed); +} + +/* + * Flash module specific + */ + +/* + * FLASH DMA buffer should be big enough to hold both MFG block and + * asic block(64k) at the same time and also should be 2k aligned to + * avoid write segement to cross sector boundary. + */ +#define BFA_FLASH_SEG_SZ 2048 +#define BFA_FLASH_DMA_BUF_SZ \ + BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ) + +static void +bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event, + int inst, int type) +{ + struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; + struct bfa_aen_entry_s *aen_entry; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn; + aen_entry->aen_data.audit.partition_inst = inst; + aen_entry->aen_data.audit.partition_type = type; + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq, + BFA_AEN_CAT_AUDIT, event); +} + +static void +bfa_flash_cb(struct bfa_flash_s *flash) +{ + flash->op_busy = 0; + if (flash->cbfn) + flash->cbfn(flash->cbarg, flash->status); +} + +static void +bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event) +{ + struct bfa_flash_s *flash = cbarg; + + bfa_trc(flash, event); + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (flash->op_busy) { + flash->status = BFA_STATUS_IOC_FAILURE; + flash->cbfn(flash->cbarg, flash->status); + flash->op_busy = 0; + } + break; + + default: + break; + } +} + +/* + * Send flash attribute query request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_flash_query_send(void *cbarg) +{ + struct bfa_flash_s *flash = cbarg; + struct bfi_flash_query_req_s *msg = + (struct bfi_flash_query_req_s *) flash->mb.msg; + + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s), + flash->dbuf_pa); + bfa_ioc_mbox_queue(flash->ioc, &flash->mb); +} + +/* + * Send flash write request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_flash_write_send(struct bfa_flash_s *flash) +{ + struct bfi_flash_write_req_s *msg = + (struct bfi_flash_write_req_s *) flash->mb.msg; + u32 len; + + msg->type = be32_to_cpu(flash->type); + msg->instance = flash->instance; + msg->offset = be32_to_cpu(flash->addr_off + flash->offset); + len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? + flash->residue : BFA_FLASH_DMA_BUF_SZ; + msg->length = be32_to_cpu(len); + + /* indicate if it's the last msg of the whole write operation */ + msg->last = (len == flash->residue) ? 1 : 0; + + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_alen_set(&msg->alen, len, flash->dbuf_pa); + memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len); + bfa_ioc_mbox_queue(flash->ioc, &flash->mb); + + flash->residue -= len; + flash->offset += len; +} + +/* + * Send flash read request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_flash_read_send(void *cbarg) +{ + struct bfa_flash_s *flash = cbarg; + struct bfi_flash_read_req_s *msg = + (struct bfi_flash_read_req_s *) flash->mb.msg; + u32 len; + + msg->type = be32_to_cpu(flash->type); + msg->instance = flash->instance; + msg->offset = be32_to_cpu(flash->addr_off + flash->offset); + len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? + flash->residue : BFA_FLASH_DMA_BUF_SZ; + msg->length = be32_to_cpu(len); + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_alen_set(&msg->alen, len, flash->dbuf_pa); + bfa_ioc_mbox_queue(flash->ioc, &flash->mb); +} + +/* + * Send flash erase request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_flash_erase_send(void *cbarg) +{ + struct bfa_flash_s *flash = cbarg; + struct bfi_flash_erase_req_s *msg = + (struct bfi_flash_erase_req_s *) flash->mb.msg; + + msg->type = be32_to_cpu(flash->type); + msg->instance = flash->instance; + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_ioc_mbox_queue(flash->ioc, &flash->mb); +} + +/* + * Process flash response messages upon receiving interrupts. + * + * @param[in] flasharg - flash structure + * @param[in] msg - message structure + */ +static void +bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg) +{ + struct bfa_flash_s *flash = flasharg; + u32 status; + + union { + struct bfi_flash_query_rsp_s *query; + struct bfi_flash_erase_rsp_s *erase; + struct bfi_flash_write_rsp_s *write; + struct bfi_flash_read_rsp_s *read; + struct bfi_flash_event_s *event; + struct bfi_mbmsg_s *msg; + } m; + + m.msg = msg; + bfa_trc(flash, msg->mh.msg_id); + + if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) { + /* receiving response after ioc failure */ + bfa_trc(flash, 0x9999); + return; + } + + switch (msg->mh.msg_id) { + case BFI_FLASH_I2H_QUERY_RSP: + status = be32_to_cpu(m.query->status); + bfa_trc(flash, status); + if (status == BFA_STATUS_OK) { + u32 i; + struct bfa_flash_attr_s *attr, *f; + + attr = (struct bfa_flash_attr_s *) flash->ubuf; + f = (struct bfa_flash_attr_s *) flash->dbuf_kva; + attr->status = be32_to_cpu(f->status); + attr->npart = be32_to_cpu(f->npart); + bfa_trc(flash, attr->status); + bfa_trc(flash, attr->npart); + for (i = 0; i < attr->npart; i++) { + attr->part[i].part_type = + be32_to_cpu(f->part[i].part_type); + attr->part[i].part_instance = + be32_to_cpu(f->part[i].part_instance); + attr->part[i].part_off = + be32_to_cpu(f->part[i].part_off); + attr->part[i].part_size = + be32_to_cpu(f->part[i].part_size); + attr->part[i].part_len = + be32_to_cpu(f->part[i].part_len); + attr->part[i].part_status = + be32_to_cpu(f->part[i].part_status); + } + } + flash->status = status; + bfa_flash_cb(flash); + break; + case BFI_FLASH_I2H_ERASE_RSP: + status = be32_to_cpu(m.erase->status); + bfa_trc(flash, status); + flash->status = status; + bfa_flash_cb(flash); + break; + case BFI_FLASH_I2H_WRITE_RSP: + status = be32_to_cpu(m.write->status); + bfa_trc(flash, status); + if (status != BFA_STATUS_OK || flash->residue == 0) { + flash->status = status; + bfa_flash_cb(flash); + } else { + bfa_trc(flash, flash->offset); + bfa_flash_write_send(flash); + } + break; + case BFI_FLASH_I2H_READ_RSP: + status = be32_to_cpu(m.read->status); + bfa_trc(flash, status); + if (status != BFA_STATUS_OK) { + flash->status = status; + bfa_flash_cb(flash); + } else { + u32 len = be32_to_cpu(m.read->length); + bfa_trc(flash, flash->offset); + bfa_trc(flash, len); + memcpy(flash->ubuf + flash->offset, + flash->dbuf_kva, len); + flash->residue -= len; + flash->offset += len; + if (flash->residue == 0) { + flash->status = status; + bfa_flash_cb(flash); + } else + bfa_flash_read_send(flash); + } + break; + case BFI_FLASH_I2H_BOOT_VER_RSP: + break; + case BFI_FLASH_I2H_EVENT: + status = be32_to_cpu(m.event->status); + bfa_trc(flash, status); + if (status == BFA_STATUS_BAD_FWCFG) + bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR); + else if (status == BFA_STATUS_INVALID_VENDOR) { + u32 param; + param = be32_to_cpu(m.event->param); + bfa_trc(flash, param); + bfa_ioc_aen_post(flash->ioc, + BFA_IOC_AEN_INVALID_VENDOR); + } + break; + + default: + WARN_ON(1); + } +} + +/* + * Flash memory info API. + * + * @param[in] mincfg - minimal cfg variable + */ +u32 +bfa_flash_meminfo(bfa_boolean_t mincfg) +{ + /* min driver doesn't need flash */ + if (mincfg) + return 0; + return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Flash attach API. + * + * @param[in] flash - flash structure + * @param[in] ioc - ioc structure + * @param[in] dev - device structure + * @param[in] trcmod - trace module + * @param[in] logmod - log module + */ +void +bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev, + struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg) +{ + flash->ioc = ioc; + flash->trcmod = trcmod; + flash->cbfn = NULL; + flash->cbarg = NULL; + flash->op_busy = 0; + + bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash); + bfa_q_qe_init(&flash->ioc_notify); + bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash); + list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); + + /* min driver doesn't need flash */ + if (mincfg) { + flash->dbuf_kva = NULL; + flash->dbuf_pa = 0; + } +} + +/* + * Claim memory for flash + * + * @param[in] flash - flash structure + * @param[in] dm_kva - pointer to virtual memory address + * @param[in] dm_pa - physical memory address + * @param[in] mincfg - minimal cfg variable + */ +void +bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa, + bfa_boolean_t mincfg) +{ + if (mincfg) + return; + + flash->dbuf_kva = dm_kva; + flash->dbuf_pa = dm_pa; + memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ); + dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); + dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Get flash attribute. + * + * @param[in] flash - flash structure + * @param[in] attr - flash attribute structure + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr, + bfa_cb_flash_t cbfn, void *cbarg) +{ + bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ); + + if (!bfa_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (flash->op_busy) { + bfa_trc(flash, flash->op_busy); + return BFA_STATUS_DEVBUSY; + } + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->ubuf = (u8 *) attr; + bfa_flash_query_send(flash); + + return BFA_STATUS_OK; +} + +/* + * Erase flash partition. + * + * @param[in] flash - flash structure + * @param[in] type - flash partition type + * @param[in] instance - flash partition instance + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type, + u8 instance, bfa_cb_flash_t cbfn, void *cbarg) +{ + bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ); + bfa_trc(flash, type); + bfa_trc(flash, instance); + + if (!bfa_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (flash->op_busy) { + bfa_trc(flash, flash->op_busy); + return BFA_STATUS_DEVBUSY; + } + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->type = type; + flash->instance = instance; + + bfa_flash_erase_send(flash); + bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE, + instance, type); + return BFA_STATUS_OK; +} + +/* + * Update flash partition. + * + * @param[in] flash - flash structure + * @param[in] type - flash partition type + * @param[in] instance - flash partition instance + * @param[in] buf - update data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to the partition starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type, + u8 instance, void *buf, u32 len, u32 offset, + bfa_cb_flash_t cbfn, void *cbarg) +{ + bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ); + bfa_trc(flash, type); + bfa_trc(flash, instance); + bfa_trc(flash, len); + bfa_trc(flash, offset); + + if (!bfa_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* + * 'len' must be in word (4-byte) boundary + * 'offset' must be in sector (16kb) boundary + */ + if (!len || (len & 0x03) || (offset & 0x00003FFF)) + return BFA_STATUS_FLASH_BAD_LEN; + + if (type == BFA_FLASH_PART_MFG) + return BFA_STATUS_EINVAL; + + if (flash->op_busy) { + bfa_trc(flash, flash->op_busy); + return BFA_STATUS_DEVBUSY; + } + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->type = type; + flash->instance = instance; + flash->residue = len; + flash->offset = 0; + flash->addr_off = offset; + flash->ubuf = buf; + + bfa_flash_write_send(flash); + return BFA_STATUS_OK; +} + +/* + * Read flash partition. + * + * @param[in] flash - flash structure + * @param[in] type - flash partition type + * @param[in] instance - flash partition instance + * @param[in] buf - read data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to the partition starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type, + u8 instance, void *buf, u32 len, u32 offset, + bfa_cb_flash_t cbfn, void *cbarg) +{ + bfa_trc(flash, BFI_FLASH_H2I_READ_REQ); + bfa_trc(flash, type); + bfa_trc(flash, instance); + bfa_trc(flash, len); + bfa_trc(flash, offset); + + if (!bfa_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* + * 'len' must be in word (4-byte) boundary + * 'offset' must be in sector (16kb) boundary + */ + if (!len || (len & 0x03) || (offset & 0x00003FFF)) + return BFA_STATUS_FLASH_BAD_LEN; + + if (flash->op_busy) { + bfa_trc(flash, flash->op_busy); + return BFA_STATUS_DEVBUSY; + } + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->type = type; + flash->instance = instance; + flash->residue = len; + flash->offset = 0; + flash->addr_off = offset; + flash->ubuf = buf; + bfa_flash_read_send(flash); + + return BFA_STATUS_OK; +} + +/* + * DIAG module specific + */ + +#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */ +#define BFA_DIAG_FWPING_TOV 1000 /* msec */ + +/* IOC event handler */ +static void +bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event) +{ + struct bfa_diag_s *diag = diag_arg; + + bfa_trc(diag, event); + bfa_trc(diag, diag->block); + bfa_trc(diag, diag->fwping.lock); + bfa_trc(diag, diag->tsensor.lock); + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (diag->fwping.lock) { + diag->fwping.status = BFA_STATUS_IOC_FAILURE; + diag->fwping.cbfn(diag->fwping.cbarg, + diag->fwping.status); + diag->fwping.lock = 0; + } + + if (diag->tsensor.lock) { + diag->tsensor.status = BFA_STATUS_IOC_FAILURE; + diag->tsensor.cbfn(diag->tsensor.cbarg, + diag->tsensor.status); + diag->tsensor.lock = 0; + } + + if (diag->block) { + if (diag->timer_active) { + bfa_timer_stop(&diag->timer); + diag->timer_active = 0; + } + + diag->status = BFA_STATUS_IOC_FAILURE; + diag->cbfn(diag->cbarg, diag->status); + diag->block = 0; + } + break; + + default: + break; + } +} + +static void +bfa_diag_memtest_done(void *cbarg) +{ + struct bfa_diag_s *diag = cbarg; + struct bfa_ioc_s *ioc = diag->ioc; + struct bfa_diag_memtest_result *res = diag->result; + u32 loff = BFI_BOOT_MEMTEST_RES_ADDR; + u32 pgnum, pgoff, i; + + pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); + pgoff = PSS_SMEM_PGOFF(loff); + + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) / + sizeof(u32)); i++) { + /* read test result from smem */ + *((u32 *) res + i) = + bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); + loff += sizeof(u32); + } + + /* Reset IOC fwstates to BFI_IOC_UNINIT */ + bfa_ioc_reset_fwstate(ioc); + + res->status = swab32(res->status); + bfa_trc(diag, res->status); + + if (res->status == BFI_BOOT_MEMTEST_RES_SIG) + diag->status = BFA_STATUS_OK; + else { + diag->status = BFA_STATUS_MEMTEST_FAILED; + res->addr = swab32(res->addr); + res->exp = swab32(res->exp); + res->act = swab32(res->act); + res->err_status = swab32(res->err_status); + res->err_status1 = swab32(res->err_status1); + res->err_addr = swab32(res->err_addr); + bfa_trc(diag, res->addr); + bfa_trc(diag, res->exp); + bfa_trc(diag, res->act); + bfa_trc(diag, res->err_status); + bfa_trc(diag, res->err_status1); + bfa_trc(diag, res->err_addr); + } + diag->timer_active = 0; + diag->cbfn(diag->cbarg, diag->status); + diag->block = 0; +} + +/* + * Firmware ping + */ + +/* + * Perform DMA test directly + */ +static void +diag_fwping_send(struct bfa_diag_s *diag) +{ + struct bfi_diag_fwping_req_s *fwping_req; + u32 i; + + bfa_trc(diag, diag->fwping.dbuf_pa); + + /* fill DMA area with pattern */ + for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) + *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data; + + /* Fill mbox msg */ + fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg; + + /* Setup SG list */ + bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ, + diag->fwping.dbuf_pa); + /* Set up dma count */ + fwping_req->count = cpu_to_be32(diag->fwping.count); + /* Set up data pattern */ + fwping_req->data = diag->fwping.data; + + /* build host command */ + bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING, + bfa_ioc_portid(diag->ioc)); + + /* send mbox cmd */ + bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd); +} + +static void +diag_fwping_comp(struct bfa_diag_s *diag, + struct bfi_diag_fwping_rsp_s *diag_rsp) +{ + u32 rsp_data = diag_rsp->data; + u8 rsp_dma_status = diag_rsp->dma_status; + + bfa_trc(diag, rsp_data); + bfa_trc(diag, rsp_dma_status); + + if (rsp_dma_status == BFA_STATUS_OK) { + u32 i, pat; + pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) : + diag->fwping.data; + /* Check mbox data */ + if (diag->fwping.data != rsp_data) { + bfa_trc(diag, rsp_data); + diag->fwping.result->dmastatus = + BFA_STATUS_DATACORRUPTED; + diag->fwping.status = BFA_STATUS_DATACORRUPTED; + diag->fwping.cbfn(diag->fwping.cbarg, + diag->fwping.status); + diag->fwping.lock = 0; + return; + } + /* Check dma pattern */ + for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) { + if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) { + bfa_trc(diag, i); + bfa_trc(diag, pat); + bfa_trc(diag, + *((u32 *)diag->fwping.dbuf_kva + i)); + diag->fwping.result->dmastatus = + BFA_STATUS_DATACORRUPTED; + diag->fwping.status = BFA_STATUS_DATACORRUPTED; + diag->fwping.cbfn(diag->fwping.cbarg, + diag->fwping.status); + diag->fwping.lock = 0; + return; + } + } + diag->fwping.result->dmastatus = BFA_STATUS_OK; + diag->fwping.status = BFA_STATUS_OK; + diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status); + diag->fwping.lock = 0; + } else { + diag->fwping.status = BFA_STATUS_HDMA_FAILED; + diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status); + diag->fwping.lock = 0; + } +} + +/* + * Temperature Sensor + */ + +static void +diag_tempsensor_send(struct bfa_diag_s *diag) +{ + struct bfi_diag_ts_req_s *msg; + + msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg; + bfa_trc(diag, msg->temp); + /* build host command */ + bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR, + bfa_ioc_portid(diag->ioc)); + /* send mbox cmd */ + bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd); +} + +static void +diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp) +{ + if (!diag->tsensor.lock) { + /* receiving response after ioc failure */ + bfa_trc(diag, diag->tsensor.lock); + return; + } + + /* + * ASIC junction tempsensor is a reg read operation + * it will always return OK + */ + diag->tsensor.temp->temp = be16_to_cpu(rsp->temp); + diag->tsensor.temp->ts_junc = rsp->ts_junc; + diag->tsensor.temp->ts_brd = rsp->ts_brd; + diag->tsensor.temp->status = BFA_STATUS_OK; + + if (rsp->ts_brd) { + if (rsp->status == BFA_STATUS_OK) { + diag->tsensor.temp->brd_temp = + be16_to_cpu(rsp->brd_temp); + } else { + bfa_trc(diag, rsp->status); + diag->tsensor.temp->brd_temp = 0; + diag->tsensor.temp->status = BFA_STATUS_DEVBUSY; + } + } + bfa_trc(diag, rsp->ts_junc); + bfa_trc(diag, rsp->temp); + bfa_trc(diag, rsp->ts_brd); + bfa_trc(diag, rsp->brd_temp); + diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status); + diag->tsensor.lock = 0; +} + +/* + * LED Test command + */ +static void +diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest) +{ + struct bfi_diag_ledtest_req_s *msg; + + msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg; + /* build host command */ + bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST, + bfa_ioc_portid(diag->ioc)); + + /* + * convert the freq from N blinks per 10 sec to + * crossbow ontime value. We do it here because division is need + */ + if (ledtest->freq) + ledtest->freq = 500 / ledtest->freq; + + if (ledtest->freq == 0) + ledtest->freq = 1; + + bfa_trc(diag, ledtest->freq); + /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */ + msg->cmd = (u8) ledtest->cmd; + msg->color = (u8) ledtest->color; + msg->portid = bfa_ioc_portid(diag->ioc); + msg->led = ledtest->led; + msg->freq = cpu_to_be16(ledtest->freq); + + /* send mbox cmd */ + bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd); +} + +static void +diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg) +{ + bfa_trc(diag, diag->ledtest.lock); + diag->ledtest.lock = BFA_FALSE; + /* no bfa_cb_queue is needed because driver is not waiting */ +} + +/* + * Port beaconing + */ +static void +diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec) +{ + struct bfi_diag_portbeacon_req_s *msg; + + msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg; + /* build host command */ + bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON, + bfa_ioc_portid(diag->ioc)); + msg->beacon = beacon; + msg->period = cpu_to_be32(sec); + /* send mbox cmd */ + bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd); +} + +static void +diag_portbeacon_comp(struct bfa_diag_s *diag) +{ + bfa_trc(diag, diag->beacon.state); + diag->beacon.state = BFA_FALSE; + if (diag->cbfn_beacon) + diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e); +} + +/* + * Diag hmbox handler + */ +void +bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg) +{ + struct bfa_diag_s *diag = diagarg; + + switch (msg->mh.msg_id) { + case BFI_DIAG_I2H_PORTBEACON: + diag_portbeacon_comp(diag); + break; + case BFI_DIAG_I2H_FWPING: + diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg); + break; + case BFI_DIAG_I2H_TEMPSENSOR: + diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg); + break; + case BFI_DIAG_I2H_LEDTEST: + diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg); + break; + default: + bfa_trc(diag, msg->mh.msg_id); + WARN_ON(1); + } +} + +/* + * Gen RAM Test + * + * @param[in] *diag - diag data struct + * @param[in] *memtest - mem test params input from upper layer, + * @param[in] pattern - mem test pattern + * @param[in] *result - mem test result + * @param[in] cbfn - mem test callback functioin + * @param[in] cbarg - callback functioin arg + * + * @param[out] + */ +bfa_status_t +bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest, + u32 pattern, struct bfa_diag_memtest_result *result, + bfa_cb_diag_t cbfn, void *cbarg) +{ + bfa_trc(diag, pattern); + + if (!bfa_ioc_adapter_is_disabled(diag->ioc)) + return BFA_STATUS_ADAPTER_ENABLED; + + /* check to see if there is another destructive diag cmd running */ + if (diag->block) { + bfa_trc(diag, diag->block); + return BFA_STATUS_DEVBUSY; + } else + diag->block = 1; + + diag->result = result; + diag->cbfn = cbfn; + diag->cbarg = cbarg; + + /* download memtest code and take LPU0 out of reset */ + bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS); + + bfa_timer_begin(diag->ioc->timer_mod, &diag->timer, + bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV); + diag->timer_active = 1; + return BFA_STATUS_OK; +} + +/* + * DIAG firmware ping command + * + * @param[in] *diag - diag data struct + * @param[in] cnt - dma loop count for testing PCIE + * @param[in] data - data pattern to pass in fw + * @param[in] *result - pt to bfa_diag_fwping_result_t data struct + * @param[in] cbfn - callback function + * @param[in] *cbarg - callback functioin arg + * + * @param[out] + */ +bfa_status_t +bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data, + struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn, + void *cbarg) +{ + bfa_trc(diag, cnt); + bfa_trc(diag, data); + + if (!bfa_ioc_is_operational(diag->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) && + ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH)) + return BFA_STATUS_CMD_NOTSUPP; + + /* check to see if there is another destructive diag cmd running */ + if (diag->block || diag->fwping.lock) { + bfa_trc(diag, diag->block); + bfa_trc(diag, diag->fwping.lock); + return BFA_STATUS_DEVBUSY; + } + + /* Initialization */ + diag->fwping.lock = 1; + diag->fwping.cbfn = cbfn; + diag->fwping.cbarg = cbarg; + diag->fwping.result = result; + diag->fwping.data = data; + diag->fwping.count = cnt; + + /* Init test results */ + diag->fwping.result->data = 0; + diag->fwping.result->status = BFA_STATUS_OK; + + /* kick off the first ping */ + diag_fwping_send(diag); + return BFA_STATUS_OK; +} + +/* + * Read Temperature Sensor + * + * @param[in] *diag - diag data struct + * @param[in] *result - pt to bfa_diag_temp_t data struct + * @param[in] cbfn - callback function + * @param[in] *cbarg - callback functioin arg + * + * @param[out] + */ +bfa_status_t +bfa_diag_tsensor_query(struct bfa_diag_s *diag, + struct bfa_diag_results_tempsensor_s *result, + bfa_cb_diag_t cbfn, void *cbarg) +{ + /* check to see if there is a destructive diag cmd running */ + if (diag->block || diag->tsensor.lock) { + bfa_trc(diag, diag->block); + bfa_trc(diag, diag->tsensor.lock); + return BFA_STATUS_DEVBUSY; + } + + if (!bfa_ioc_is_operational(diag->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* Init diag mod params */ + diag->tsensor.lock = 1; + diag->tsensor.temp = result; + diag->tsensor.cbfn = cbfn; + diag->tsensor.cbarg = cbarg; + + /* Send msg to fw */ + diag_tempsensor_send(diag); + + return BFA_STATUS_OK; +} + +/* + * LED Test command + * + * @param[in] *diag - diag data struct + * @param[in] *ledtest - pt to ledtest data structure + * + * @param[out] + */ +bfa_status_t +bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest) +{ + bfa_trc(diag, ledtest->cmd); + + if (!bfa_ioc_is_operational(diag->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (diag->beacon.state) + return BFA_STATUS_BEACON_ON; + + if (diag->ledtest.lock) + return BFA_STATUS_LEDTEST_OP; + + /* Send msg to fw */ + diag->ledtest.lock = BFA_TRUE; + diag_ledtest_send(diag, ledtest); + + return BFA_STATUS_OK; +} + +/* + * Port beaconing command + * + * @param[in] *diag - diag data struct + * @param[in] beacon - port beaconing 1:ON 0:OFF + * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF + * @param[in] sec - beaconing duration in seconds + * + * @param[out] + */ +bfa_status_t +bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon, + bfa_boolean_t link_e2e_beacon, uint32_t sec) +{ + bfa_trc(diag, beacon); + bfa_trc(diag, link_e2e_beacon); + bfa_trc(diag, sec); + + if (!bfa_ioc_is_operational(diag->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (diag->ledtest.lock) + return BFA_STATUS_LEDTEST_OP; + + if (diag->beacon.state && beacon) /* beacon alread on */ + return BFA_STATUS_BEACON_ON; + + diag->beacon.state = beacon; + diag->beacon.link_e2e = link_e2e_beacon; + if (diag->cbfn_beacon) + diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon); + + /* Send msg to fw */ + diag_portbeacon_send(diag, beacon, sec); + + return BFA_STATUS_OK; +} + +/* + * Return DMA memory needed by diag module. + */ +u32 +bfa_diag_meminfo(void) +{ + return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Attach virtual and physical memory for Diag. + */ +void +bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev, + bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod) +{ + diag->dev = dev; + diag->ioc = ioc; + diag->trcmod = trcmod; + + diag->block = 0; + diag->cbfn = NULL; + diag->cbarg = NULL; + diag->result = NULL; + diag->cbfn_beacon = cbfn_beacon; + + bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag); + bfa_q_qe_init(&diag->ioc_notify); + bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag); + list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q); +} + +void +bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa) +{ + diag->fwping.dbuf_kva = dm_kva; + diag->fwping.dbuf_pa = dm_pa; + memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ); +} + +/* + * PHY module specific + */ +#define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */ +#define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */ + +static void +bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz) +{ + int i, m = sz >> 2; + + for (i = 0; i < m; i++) + obuf[i] = be32_to_cpu(ibuf[i]); +} + +static bfa_boolean_t +bfa_phy_present(struct bfa_phy_s *phy) +{ + return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING); +} + +static void +bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event) +{ + struct bfa_phy_s *phy = cbarg; + + bfa_trc(phy, event); + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (phy->op_busy) { + phy->status = BFA_STATUS_IOC_FAILURE; + phy->cbfn(phy->cbarg, phy->status); + phy->op_busy = 0; + } + break; + + default: + break; + } +} + +/* + * Send phy attribute query request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_phy_query_send(void *cbarg) +{ + struct bfa_phy_s *phy = cbarg; + struct bfi_phy_query_req_s *msg = + (struct bfi_phy_query_req_s *) phy->mb.msg; + + msg->instance = phy->instance; + bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ, + bfa_ioc_portid(phy->ioc)); + bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa); + bfa_ioc_mbox_queue(phy->ioc, &phy->mb); +} + +/* + * Send phy write request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_phy_write_send(void *cbarg) +{ + struct bfa_phy_s *phy = cbarg; + struct bfi_phy_write_req_s *msg = + (struct bfi_phy_write_req_s *) phy->mb.msg; + u32 len; + u16 *buf, *dbuf; + int i, sz; + + msg->instance = phy->instance; + msg->offset = cpu_to_be32(phy->addr_off + phy->offset); + len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ? + phy->residue : BFA_PHY_DMA_BUF_SZ; + msg->length = cpu_to_be32(len); + + /* indicate if it's the last msg of the whole write operation */ + msg->last = (len == phy->residue) ? 1 : 0; + + bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ, + bfa_ioc_portid(phy->ioc)); + bfa_alen_set(&msg->alen, len, phy->dbuf_pa); + + buf = (u16 *) (phy->ubuf + phy->offset); + dbuf = (u16 *)phy->dbuf_kva; + sz = len >> 1; + for (i = 0; i < sz; i++) + buf[i] = cpu_to_be16(dbuf[i]); + + bfa_ioc_mbox_queue(phy->ioc, &phy->mb); + + phy->residue -= len; + phy->offset += len; +} + +/* + * Send phy read request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_phy_read_send(void *cbarg) +{ + struct bfa_phy_s *phy = cbarg; + struct bfi_phy_read_req_s *msg = + (struct bfi_phy_read_req_s *) phy->mb.msg; + u32 len; + + msg->instance = phy->instance; + msg->offset = cpu_to_be32(phy->addr_off + phy->offset); + len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ? + phy->residue : BFA_PHY_DMA_BUF_SZ; + msg->length = cpu_to_be32(len); + bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ, + bfa_ioc_portid(phy->ioc)); + bfa_alen_set(&msg->alen, len, phy->dbuf_pa); + bfa_ioc_mbox_queue(phy->ioc, &phy->mb); +} + +/* + * Send phy stats request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_phy_stats_send(void *cbarg) +{ + struct bfa_phy_s *phy = cbarg; + struct bfi_phy_stats_req_s *msg = + (struct bfi_phy_stats_req_s *) phy->mb.msg; + + msg->instance = phy->instance; + bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ, + bfa_ioc_portid(phy->ioc)); + bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa); + bfa_ioc_mbox_queue(phy->ioc, &phy->mb); +} + +/* + * Flash memory info API. + * + * @param[in] mincfg - minimal cfg variable + */ +u32 +bfa_phy_meminfo(bfa_boolean_t mincfg) +{ + /* min driver doesn't need phy */ + if (mincfg) + return 0; + + return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Flash attach API. + * + * @param[in] phy - phy structure + * @param[in] ioc - ioc structure + * @param[in] dev - device structure + * @param[in] trcmod - trace module + * @param[in] logmod - log module + */ +void +bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev, + struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg) +{ + phy->ioc = ioc; + phy->trcmod = trcmod; + phy->cbfn = NULL; + phy->cbarg = NULL; + phy->op_busy = 0; + + bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy); + bfa_q_qe_init(&phy->ioc_notify); + bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy); + list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q); + + /* min driver doesn't need phy */ + if (mincfg) { + phy->dbuf_kva = NULL; + phy->dbuf_pa = 0; + } +} + +/* + * Claim memory for phy + * + * @param[in] phy - phy structure + * @param[in] dm_kva - pointer to virtual memory address + * @param[in] dm_pa - physical memory address + * @param[in] mincfg - minimal cfg variable + */ +void +bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa, + bfa_boolean_t mincfg) +{ + if (mincfg) + return; + + phy->dbuf_kva = dm_kva; + phy->dbuf_pa = dm_pa; + memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ); + dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); + dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +bfa_boolean_t +bfa_phy_busy(struct bfa_ioc_s *ioc) +{ + void __iomem *rb; + + rb = bfa_ioc_bar0(ioc); + return readl(rb + BFA_PHY_LOCK_STATUS); +} + +/* + * Get phy attribute. + * + * @param[in] phy - phy structure + * @param[in] attr - phy attribute structure + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance, + struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg) +{ + bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ); + bfa_trc(phy, instance); + + if (!bfa_phy_present(phy)) + return BFA_STATUS_PHY_NOT_PRESENT; + + if (!bfa_ioc_is_operational(phy->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (phy->op_busy || bfa_phy_busy(phy->ioc)) { + bfa_trc(phy, phy->op_busy); + return BFA_STATUS_DEVBUSY; + } + + phy->op_busy = 1; + phy->cbfn = cbfn; + phy->cbarg = cbarg; + phy->instance = instance; + phy->ubuf = (uint8_t *) attr; + bfa_phy_query_send(phy); + + return BFA_STATUS_OK; +} + +/* + * Get phy stats. + * + * @param[in] phy - phy structure + * @param[in] instance - phy image instance + * @param[in] stats - pointer to phy stats + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance, + struct bfa_phy_stats_s *stats, + bfa_cb_phy_t cbfn, void *cbarg) +{ + bfa_trc(phy, BFI_PHY_H2I_STATS_REQ); + bfa_trc(phy, instance); + + if (!bfa_phy_present(phy)) + return BFA_STATUS_PHY_NOT_PRESENT; + + if (!bfa_ioc_is_operational(phy->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (phy->op_busy || bfa_phy_busy(phy->ioc)) { + bfa_trc(phy, phy->op_busy); + return BFA_STATUS_DEVBUSY; + } + + phy->op_busy = 1; + phy->cbfn = cbfn; + phy->cbarg = cbarg; + phy->instance = instance; + phy->ubuf = (u8 *) stats; + bfa_phy_stats_send(phy); + + return BFA_STATUS_OK; +} + +/* + * Update phy image. + * + * @param[in] phy - phy structure + * @param[in] instance - phy image instance + * @param[in] buf - update data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_phy_update(struct bfa_phy_s *phy, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_phy_t cbfn, void *cbarg) +{ + bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ); + bfa_trc(phy, instance); + bfa_trc(phy, len); + bfa_trc(phy, offset); + + if (!bfa_phy_present(phy)) + return BFA_STATUS_PHY_NOT_PRESENT; + + if (!bfa_ioc_is_operational(phy->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* 'len' must be in word (4-byte) boundary */ + if (!len || (len & 0x03)) + return BFA_STATUS_FAILED; + + if (phy->op_busy || bfa_phy_busy(phy->ioc)) { + bfa_trc(phy, phy->op_busy); + return BFA_STATUS_DEVBUSY; + } + + phy->op_busy = 1; + phy->cbfn = cbfn; + phy->cbarg = cbarg; + phy->instance = instance; + phy->residue = len; + phy->offset = 0; + phy->addr_off = offset; + phy->ubuf = buf; + + bfa_phy_write_send(phy); + return BFA_STATUS_OK; +} + +/* + * Read phy image. + * + * @param[in] phy - phy structure + * @param[in] instance - phy image instance + * @param[in] buf - read data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_phy_read(struct bfa_phy_s *phy, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_phy_t cbfn, void *cbarg) +{ + bfa_trc(phy, BFI_PHY_H2I_READ_REQ); + bfa_trc(phy, instance); + bfa_trc(phy, len); + bfa_trc(phy, offset); + + if (!bfa_phy_present(phy)) + return BFA_STATUS_PHY_NOT_PRESENT; + + if (!bfa_ioc_is_operational(phy->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* 'len' must be in word (4-byte) boundary */ + if (!len || (len & 0x03)) + return BFA_STATUS_FAILED; + + if (phy->op_busy || bfa_phy_busy(phy->ioc)) { + bfa_trc(phy, phy->op_busy); + return BFA_STATUS_DEVBUSY; + } + + phy->op_busy = 1; + phy->cbfn = cbfn; + phy->cbarg = cbarg; + phy->instance = instance; + phy->residue = len; + phy->offset = 0; + phy->addr_off = offset; + phy->ubuf = buf; + bfa_phy_read_send(phy); + + return BFA_STATUS_OK; +} + +/* + * Process phy response messages upon receiving interrupts. + * + * @param[in] phyarg - phy structure + * @param[in] msg - message structure + */ +void +bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg) +{ + struct bfa_phy_s *phy = phyarg; + u32 status; + + union { + struct bfi_phy_query_rsp_s *query; + struct bfi_phy_stats_rsp_s *stats; + struct bfi_phy_write_rsp_s *write; + struct bfi_phy_read_rsp_s *read; + struct bfi_mbmsg_s *msg; + } m; + + m.msg = msg; + bfa_trc(phy, msg->mh.msg_id); + + if (!phy->op_busy) { + /* receiving response after ioc failure */ + bfa_trc(phy, 0x9999); + return; + } + + switch (msg->mh.msg_id) { + case BFI_PHY_I2H_QUERY_RSP: + status = be32_to_cpu(m.query->status); + bfa_trc(phy, status); + + if (status == BFA_STATUS_OK) { + struct bfa_phy_attr_s *attr = + (struct bfa_phy_attr_s *) phy->ubuf; + bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva, + sizeof(struct bfa_phy_attr_s)); + bfa_trc(phy, attr->status); + bfa_trc(phy, attr->length); + } + + phy->status = status; + phy->op_busy = 0; + if (phy->cbfn) + phy->cbfn(phy->cbarg, phy->status); + break; + case BFI_PHY_I2H_STATS_RSP: + status = be32_to_cpu(m.stats->status); + bfa_trc(phy, status); + + if (status == BFA_STATUS_OK) { + struct bfa_phy_stats_s *stats = + (struct bfa_phy_stats_s *) phy->ubuf; + bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva, + sizeof(struct bfa_phy_stats_s)); + bfa_trc(phy, stats->status); + } + + phy->status = status; + phy->op_busy = 0; + if (phy->cbfn) + phy->cbfn(phy->cbarg, phy->status); + break; + case BFI_PHY_I2H_WRITE_RSP: + status = be32_to_cpu(m.write->status); + bfa_trc(phy, status); + + if (status != BFA_STATUS_OK || phy->residue == 0) { + phy->status = status; + phy->op_busy = 0; + if (phy->cbfn) + phy->cbfn(phy->cbarg, phy->status); + } else { + bfa_trc(phy, phy->offset); + bfa_phy_write_send(phy); + } + break; + case BFI_PHY_I2H_READ_RSP: + status = be32_to_cpu(m.read->status); + bfa_trc(phy, status); + + if (status != BFA_STATUS_OK) { + phy->status = status; + phy->op_busy = 0; + if (phy->cbfn) + phy->cbfn(phy->cbarg, phy->status); + } else { + u32 len = be32_to_cpu(m.read->length); + u16 *buf = (u16 *)(phy->ubuf + phy->offset); + u16 *dbuf = (u16 *)phy->dbuf_kva; + int i, sz = len >> 1; + + bfa_trc(phy, phy->offset); + bfa_trc(phy, len); + + for (i = 0; i < sz; i++) + buf[i] = be16_to_cpu(dbuf[i]); + + phy->residue -= len; + phy->offset += len; + + if (phy->residue == 0) { + phy->status = status; + phy->op_busy = 0; + if (phy->cbfn) + phy->cbfn(phy->cbarg, phy->status); + } else + bfa_phy_read_send(phy); + } + break; + default: + WARN_ON(1); + } +} + +/* + * DCONF module specific + */ + +BFA_MODULE(dconf); + +/* + * DCONF state machine events + */ +enum bfa_dconf_event { + BFA_DCONF_SM_INIT = 1, /* dconf Init */ + BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */ + BFA_DCONF_SM_WR = 3, /* binding change, map */ + BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */ + BFA_DCONF_SM_EXIT = 5, /* exit dconf module */ + BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */ +}; + +/* forward declaration of DCONF state machine */ +static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event); +static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event); +static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event); +static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event); +static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event); +static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event); +static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event); + +static void bfa_dconf_cbfn(void *dconf, bfa_status_t status); +static void bfa_dconf_timer(void *cbarg); +static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf); +static void bfa_dconf_init_cb(void *arg, bfa_status_t status); + +/* + * Begining state of dconf module. Waiting for an event to start. + */ +static void +bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) +{ + bfa_status_t bfa_status; + bfa_trc(dconf->bfa, event); + + switch (event) { + case BFA_DCONF_SM_INIT: + if (dconf->min_cfg) { + bfa_trc(dconf->bfa, dconf->min_cfg); + return; + } + bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read); + dconf->flashdone = BFA_FALSE; + bfa_trc(dconf->bfa, dconf->flashdone); + bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), + BFA_FLASH_PART_DRV, dconf->instance, + dconf->dconf, + sizeof(struct bfa_dconf_s), 0, + bfa_dconf_init_cb, dconf->bfa); + if (bfa_status != BFA_STATUS_OK) { + bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED); + bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); + return; + } + break; + case BFA_DCONF_SM_EXIT: + dconf->flashdone = BFA_TRUE; + case BFA_DCONF_SM_IOCDISABLE: + case BFA_DCONF_SM_WR: + case BFA_DCONF_SM_FLASH_COMP: + break; + default: + bfa_sm_fault(dconf->bfa, event); + } +} + +/* + * Read flash for dconf entries and make a call back to the driver once done. + */ +static void +bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event) +{ + bfa_trc(dconf->bfa, event); + + switch (event) { + case BFA_DCONF_SM_FLASH_COMP: + bfa_sm_set_state(dconf, bfa_dconf_sm_ready); + break; + case BFA_DCONF_SM_TIMEOUT: + bfa_sm_set_state(dconf, bfa_dconf_sm_ready); + break; + case BFA_DCONF_SM_EXIT: + dconf->flashdone = BFA_TRUE; + bfa_trc(dconf->bfa, dconf->flashdone); + case BFA_DCONF_SM_IOCDISABLE: + bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); + break; + default: + bfa_sm_fault(dconf->bfa, event); + } +} + +/* + * DCONF Module is in ready state. Has completed the initialization. + */ +static void +bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) +{ + bfa_trc(dconf->bfa, event); + + switch (event) { + case BFA_DCONF_SM_WR: + bfa_timer_start(dconf->bfa, &dconf->timer, + bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); + bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); + break; + case BFA_DCONF_SM_EXIT: + dconf->flashdone = BFA_TRUE; + bfa_trc(dconf->bfa, dconf->flashdone); + bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); + break; + case BFA_DCONF_SM_INIT: + case BFA_DCONF_SM_IOCDISABLE: + break; + default: + bfa_sm_fault(dconf->bfa, event); + } +} + +/* + * entries are dirty, write back to the flash. + */ + +static void +bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) +{ + bfa_trc(dconf->bfa, event); + + switch (event) { + case BFA_DCONF_SM_TIMEOUT: + bfa_sm_set_state(dconf, bfa_dconf_sm_sync); + bfa_dconf_flash_write(dconf); + break; + case BFA_DCONF_SM_WR: + bfa_timer_stop(&dconf->timer); + bfa_timer_start(dconf->bfa, &dconf->timer, + bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); + break; + case BFA_DCONF_SM_EXIT: + bfa_timer_stop(&dconf->timer); + bfa_timer_start(dconf->bfa, &dconf->timer, + bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); + bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync); + bfa_dconf_flash_write(dconf); + break; + case BFA_DCONF_SM_FLASH_COMP: + break; + case BFA_DCONF_SM_IOCDISABLE: + bfa_timer_stop(&dconf->timer); + bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty); + break; + default: + bfa_sm_fault(dconf->bfa, event); + } +} + +/* + * Sync the dconf entries to the flash. + */ +static void +bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event) +{ + bfa_trc(dconf->bfa, event); + + switch (event) { + case BFA_DCONF_SM_IOCDISABLE: + case BFA_DCONF_SM_FLASH_COMP: + bfa_timer_stop(&dconf->timer); + case BFA_DCONF_SM_TIMEOUT: + bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); + dconf->flashdone = BFA_TRUE; + bfa_trc(dconf->bfa, dconf->flashdone); + bfa_ioc_disable(&dconf->bfa->ioc); + break; + default: + bfa_sm_fault(dconf->bfa, event); + } +} + +static void +bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) +{ + bfa_trc(dconf->bfa, event); + + switch (event) { + case BFA_DCONF_SM_FLASH_COMP: + bfa_sm_set_state(dconf, bfa_dconf_sm_ready); + break; + case BFA_DCONF_SM_WR: + bfa_timer_start(dconf->bfa, &dconf->timer, + bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); + bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); + break; + case BFA_DCONF_SM_EXIT: + bfa_timer_start(dconf->bfa, &dconf->timer, + bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); + bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync); + break; + case BFA_DCONF_SM_IOCDISABLE: + bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty); + break; + default: + bfa_sm_fault(dconf->bfa, event); + } +} + +static void +bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf, + enum bfa_dconf_event event) +{ + bfa_trc(dconf->bfa, event); + + switch (event) { + case BFA_DCONF_SM_INIT: + bfa_timer_start(dconf->bfa, &dconf->timer, + bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); + bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); + break; + case BFA_DCONF_SM_EXIT: + dconf->flashdone = BFA_TRUE; + bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); + break; + case BFA_DCONF_SM_IOCDISABLE: + break; + default: + bfa_sm_fault(dconf->bfa, event); + } +} + +/* + * Compute and return memory needed by DRV_CFG module. + */ +static void +bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, + struct bfa_s *bfa) +{ + struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa); + + if (cfg->drvcfg.min_cfg) + bfa_mem_kva_setup(meminfo, dconf_kva, + sizeof(struct bfa_dconf_hdr_s)); + else + bfa_mem_kva_setup(meminfo, dconf_kva, + sizeof(struct bfa_dconf_s)); +} + +static void +bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_pcidev_s *pcidev) +{ + struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); + + dconf->bfad = bfad; + dconf->bfa = bfa; + dconf->instance = bfa->ioc.port_id; + bfa_trc(bfa, dconf->instance); + + dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf); + if (cfg->drvcfg.min_cfg) { + bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s); + dconf->min_cfg = BFA_TRUE; + /* + * Set the flashdone flag to TRUE explicitly as no flash + * write will happen in min_cfg mode. + */ + dconf->flashdone = BFA_TRUE; + } else { + dconf->min_cfg = BFA_FALSE; + bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s); + } + + bfa_dconf_read_data_valid(bfa) = BFA_FALSE; + bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); +} + +static void +bfa_dconf_init_cb(void *arg, bfa_status_t status) +{ + struct bfa_s *bfa = arg; + struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); + + dconf->flashdone = BFA_TRUE; + bfa_trc(bfa, dconf->flashdone); + bfa_iocfc_cb_dconf_modinit(bfa, status); + if (status == BFA_STATUS_OK) { + bfa_dconf_read_data_valid(bfa) = BFA_TRUE; + if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) + dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE; + if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) + dconf->dconf->hdr.version = BFI_DCONF_VERSION; + } + bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); +} + +void +bfa_dconf_modinit(struct bfa_s *bfa) +{ + struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); + bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT); +} +static void +bfa_dconf_start(struct bfa_s *bfa) +{ +} + +static void +bfa_dconf_stop(struct bfa_s *bfa) +{ +} + +static void bfa_dconf_timer(void *cbarg) +{ + struct bfa_dconf_mod_s *dconf = cbarg; + bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT); +} +static void +bfa_dconf_iocdisable(struct bfa_s *bfa) +{ + struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); + bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE); +} + +static void +bfa_dconf_detach(struct bfa_s *bfa) +{ +} + +static bfa_status_t +bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf) +{ + bfa_status_t bfa_status; + bfa_trc(dconf->bfa, 0); + + bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa), + BFA_FLASH_PART_DRV, dconf->instance, + dconf->dconf, sizeof(struct bfa_dconf_s), 0, + bfa_dconf_cbfn, dconf); + if (bfa_status != BFA_STATUS_OK) + WARN_ON(bfa_status); + bfa_trc(dconf->bfa, bfa_status); + + return bfa_status; +} + +bfa_status_t +bfa_dconf_update(struct bfa_s *bfa) +{ + struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); + bfa_trc(dconf->bfa, 0); + if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty)) + return BFA_STATUS_FAILED; + + if (dconf->min_cfg) { + bfa_trc(dconf->bfa, dconf->min_cfg); + return BFA_STATUS_FAILED; + } + + bfa_sm_send_event(dconf, BFA_DCONF_SM_WR); + return BFA_STATUS_OK; +} + +static void +bfa_dconf_cbfn(void *arg, bfa_status_t status) +{ + struct bfa_dconf_mod_s *dconf = arg; + WARN_ON(status); + bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); +} + +void +bfa_dconf_modexit(struct bfa_s *bfa) +{ + struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); + BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE; + bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone); + bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT); +} diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index c85182a704f..546d46b3710 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -85,12 +85,75 @@ struct bfa_sge_s { #endif /* + * BFA memory resources + */ +struct bfa_mem_dma_s { + struct list_head qe; /* Queue of DMA elements */ + u32 mem_len; /* Total Length in Bytes */ + u8 *kva; /* kernel virtual address */ + u64 dma; /* dma address if DMA memory */ + u8 *kva_curp; /* kva allocation cursor */ + u64 dma_curp; /* dma allocation cursor */ +}; +#define bfa_mem_dma_t struct bfa_mem_dma_s + +struct bfa_mem_kva_s { + struct list_head qe; /* Queue of KVA elements */ + u32 mem_len; /* Total Length in Bytes */ + u8 *kva; /* kernel virtual address */ + u8 *kva_curp; /* kva allocation cursor */ +}; +#define bfa_mem_kva_t struct bfa_mem_kva_s + +struct bfa_meminfo_s { + struct bfa_mem_dma_s dma_info; + struct bfa_mem_kva_s kva_info; +}; + +/* BFA memory segment setup macros */ +#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do { \ + ((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz); \ + if (_seg_sz) \ + list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe, \ + &(_meminfo)->dma_info.qe); \ +} while (0) + +#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do { \ + ((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz); \ + if (_seg_sz) \ + list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe, \ + &(_meminfo)->kva_info.qe); \ +} while (0) + +/* BFA dma memory segments iterator */ +#define bfa_mem_dma_sptr(_mod, _i) (&(_mod)->dma_seg[(_i)]) +#define bfa_mem_dma_seg_iter(_mod, _sptr, _nr, _i) \ + for (_i = 0, _sptr = bfa_mem_dma_sptr(_mod, _i); _i < (_nr); \ + _i++, _sptr = bfa_mem_dma_sptr(_mod, _i)) + +#define bfa_mem_kva_curp(_mod) ((_mod)->kva_seg.kva_curp) +#define bfa_mem_dma_virt(_sptr) ((_sptr)->kva_curp) +#define bfa_mem_dma_phys(_sptr) ((_sptr)->dma_curp) +#define bfa_mem_dma_len(_sptr) ((_sptr)->mem_len) + +/* Get the corresponding dma buf kva for a req - from the tag */ +#define bfa_mem_get_dmabuf_kva(_mod, _tag, _rqsz) \ + (((u8 *)(_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].kva_curp) +\ + BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz)) + +/* Get the corresponding dma buf pa for a req - from the tag */ +#define bfa_mem_get_dmabuf_pa(_mod, _tag, _rqsz) \ + ((_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].dma_curp + \ + BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz)) + +/* * PCI device information required by IOC */ struct bfa_pcidev_s { int pci_slot; u8 pci_func; u16 device_id; + u16 ssid; void __iomem *pci_bar_kva; }; @@ -112,18 +175,6 @@ struct bfa_dma_s { #define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */ #define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ - -#define bfa_dma_addr_set(dma_addr, pa) \ - __bfa_dma_addr_set(&dma_addr, (u64)pa) - -static inline void -__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa) -{ - dma_addr->a32.addr_lo = (__be32) pa; - dma_addr->a32.addr_hi = (__be32) (pa >> 32); -} - - #define bfa_dma_be_addr_set(dma_addr, pa) \ __bfa_dma_be_addr_set(&dma_addr, (u64)pa) static inline void @@ -133,11 +184,22 @@ __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa) dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32); } +#define bfa_alen_set(__alen, __len, __pa) \ + __bfa_alen_set(__alen, __len, (u64)__pa) + +static inline void +__bfa_alen_set(struct bfi_alen_s *alen, u32 len, u64 pa) +{ + alen->al_len = cpu_to_be32(len); + bfa_dma_be_addr_set(alen->al_addr, pa); +} + struct bfa_ioc_regs_s { void __iomem *hfn_mbox_cmd; void __iomem *hfn_mbox; void __iomem *lpu_mbox_cmd; void __iomem *lpu_mbox; + void __iomem *lpu_read_stat; void __iomem *pss_ctl_reg; void __iomem *pss_err_status_reg; void __iomem *app_pll_fast_ctl_reg; @@ -199,18 +261,26 @@ struct bfa_ioc_cbfn_s { }; /* - * Heartbeat failure notification queue element. + * IOC event notification mechanism. */ -struct bfa_ioc_hbfail_notify_s { +enum bfa_ioc_event_e { + BFA_IOC_E_ENABLED = 1, + BFA_IOC_E_DISABLED = 2, + BFA_IOC_E_FAILED = 3, +}; + +typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event_e); + +struct bfa_ioc_notify_s { struct list_head qe; - bfa_ioc_hbfail_cbfn_t cbfn; + bfa_ioc_notify_cbfn_t cbfn; void *cbarg; }; /* - * Initialize a heartbeat failure notification structure + * Initialize a IOC event notification structure */ -#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \ +#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \ (__notify)->cbfn = (__cbfn); \ (__notify)->cbarg = (__cbarg); \ } while (0) @@ -218,8 +288,9 @@ struct bfa_ioc_hbfail_notify_s { struct bfa_iocpf_s { bfa_fsm_t fsm; struct bfa_ioc_s *ioc; - u32 retry_count; + bfa_boolean_t fw_mismatch_notified; bfa_boolean_t auto_recover; + u32 poll_time; }; struct bfa_ioc_s { @@ -231,17 +302,15 @@ struct bfa_ioc_s { struct bfa_timer_s sem_timer; struct bfa_timer_s hb_timer; u32 hb_count; - struct list_head hb_notify_q; + struct list_head notify_q; void *dbg_fwsave; int dbg_fwsave_len; bfa_boolean_t dbg_fwsave_once; - enum bfi_mclass ioc_mc; + enum bfi_pcifn_class clscode; struct bfa_ioc_regs_s ioc_regs; struct bfa_trc_mod_s *trcmod; struct bfa_ioc_drv_stats_s stats; bfa_boolean_t fcmode; - bfa_boolean_t ctdev; - bfa_boolean_t cna; bfa_boolean_t pllinit; bfa_boolean_t stats_busy; /* outstanding stats */ u8 port_id; @@ -251,10 +320,18 @@ struct bfa_ioc_s { struct bfa_ioc_mbox_mod_s mbox_mod; struct bfa_ioc_hwif_s *ioc_hwif; struct bfa_iocpf_s iocpf; + enum bfi_asic_gen asic_gen; + enum bfi_asic_mode asic_mode; + enum bfi_port_mode port0_mode; + enum bfi_port_mode port1_mode; + enum bfa_mode_s port_mode; + u8 ad_cap_bm; /* adapter cap bit mask */ + u8 port_mode_cfg; /* config port mode */ + int ioc_aen_seq; }; struct bfa_ioc_hwif_s { - bfa_status_t (*ioc_pll_init) (void __iomem *rb, bfa_boolean_t fcmode); + bfa_status_t (*ioc_pll_init) (void __iomem *rb, enum bfi_asic_mode m); bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc); void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc); void (*ioc_reg_init) (struct bfa_ioc_s *ioc); @@ -268,12 +345,400 @@ struct bfa_ioc_hwif_s { void (*ioc_sync_leave) (struct bfa_ioc_s *ioc); void (*ioc_sync_ack) (struct bfa_ioc_s *ioc); bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc); + bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc); +}; + +/* + * Queue element to wait for room in request queue. FIFO order is + * maintained when fullfilling requests. + */ +struct bfa_reqq_wait_s { + struct list_head qe; + void (*qresume) (void *cbarg); + void *cbarg; +}; + +typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete); + +/* + * Generic BFA callback element. + */ +struct bfa_cb_qe_s { + struct list_head qe; + bfa_cb_cbfn_t cbfn; + bfa_boolean_t once; + bfa_boolean_t pre_rmv; /* set for stack based qe(s) */ + bfa_status_t fw_status; /* to access fw status in comp proc */ + void *cbarg; +}; + +/* + * ASIC block configurtion related + */ + +typedef void (*bfa_ablk_cbfn_t)(void *, enum bfa_status); + +struct bfa_ablk_s { + struct bfa_ioc_s *ioc; + struct bfa_ablk_cfg_s *cfg; + u16 *pcifn; + struct bfa_dma_s dma_addr; + bfa_boolean_t busy; + struct bfa_mbox_cmd_s mb; + bfa_ablk_cbfn_t cbfn; + void *cbarg; + struct bfa_ioc_notify_s ioc_notify; + struct bfa_mem_dma_s ablk_dma; +}; +#define BFA_MEM_ABLK_DMA(__bfa) (&((__bfa)->modules.ablk.ablk_dma)) + +/* + * SFP module specific + */ +typedef void (*bfa_cb_sfp_t) (void *cbarg, bfa_status_t status); + +struct bfa_sfp_s { + void *dev; + struct bfa_ioc_s *ioc; + struct bfa_trc_mod_s *trcmod; + struct sfp_mem_s *sfpmem; + bfa_cb_sfp_t cbfn; + void *cbarg; + enum bfi_sfp_mem_e memtype; /* mem access type */ + u32 status; + struct bfa_mbox_cmd_s mbcmd; + u8 *dbuf_kva; /* dma buf virtual address */ + u64 dbuf_pa; /* dma buf physical address */ + struct bfa_ioc_notify_s ioc_notify; + enum bfa_defs_sfp_media_e *media; + enum bfa_port_speed portspeed; + bfa_cb_sfp_t state_query_cbfn; + void *state_query_cbarg; + u8 lock; + u8 data_valid; /* data in dbuf is valid */ + u8 state; /* sfp state */ + u8 state_query_lock; + struct bfa_mem_dma_s sfp_dma; + u8 is_elb; /* eloopback */ +}; + +#define BFA_SFP_MOD(__bfa) (&(__bfa)->modules.sfp) +#define BFA_MEM_SFP_DMA(__bfa) (&(BFA_SFP_MOD(__bfa)->sfp_dma)) + +u32 bfa_sfp_meminfo(void); + +void bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, + void *dev, struct bfa_trc_mod_s *trcmod); + +void bfa_sfp_memclaim(struct bfa_sfp_s *diag, u8 *dm_kva, u64 dm_pa); +void bfa_sfp_intr(void *bfaarg, struct bfi_mbmsg_s *msg); + +bfa_status_t bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem, + bfa_cb_sfp_t cbfn, void *cbarg); + +bfa_status_t bfa_sfp_media(struct bfa_sfp_s *sfp, + enum bfa_defs_sfp_media_e *media, + bfa_cb_sfp_t cbfn, void *cbarg); + +bfa_status_t bfa_sfp_speed(struct bfa_sfp_s *sfp, + enum bfa_port_speed portspeed, + bfa_cb_sfp_t cbfn, void *cbarg); + +/* + * Flash module specific + */ +typedef void (*bfa_cb_flash_t) (void *cbarg, bfa_status_t status); + +struct bfa_flash_s { + struct bfa_ioc_s *ioc; /* back pointer to ioc */ + struct bfa_trc_mod_s *trcmod; + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 op_busy; /* operation busy flag */ + u32 residue; /* residual length */ + u32 offset; /* offset */ + bfa_status_t status; /* status */ + u8 *dbuf_kva; /* dma buf virtual address */ + u64 dbuf_pa; /* dma buf physical address */ + struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ + bfa_cb_flash_t cbfn; /* user callback function */ + void *cbarg; /* user callback arg */ + u8 *ubuf; /* user supplied buffer */ + struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */ + u32 addr_off; /* partition address offset */ + struct bfa_mbox_cmd_s mb; /* mailbox */ + struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */ + struct bfa_mem_dma_s flash_dma; +}; + +#define BFA_FLASH(__bfa) (&(__bfa)->modules.flash) +#define BFA_MEM_FLASH_DMA(__bfa) (&(BFA_FLASH(__bfa)->flash_dma)) + +bfa_status_t bfa_flash_get_attr(struct bfa_flash_s *flash, + struct bfa_flash_attr_s *attr, + bfa_cb_flash_t cbfn, void *cbarg); +bfa_status_t bfa_flash_erase_part(struct bfa_flash_s *flash, + enum bfa_flash_part_type type, u8 instance, + bfa_cb_flash_t cbfn, void *cbarg); +bfa_status_t bfa_flash_update_part(struct bfa_flash_s *flash, + enum bfa_flash_part_type type, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_flash_t cbfn, void *cbarg); +bfa_status_t bfa_flash_read_part(struct bfa_flash_s *flash, + enum bfa_flash_part_type type, u8 instance, void *buf, + u32 len, u32 offset, bfa_cb_flash_t cbfn, void *cbarg); +u32 bfa_flash_meminfo(bfa_boolean_t mincfg); +void bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, + void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg); +void bfa_flash_memclaim(struct bfa_flash_s *flash, + u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg); + +/* + * DIAG module specific + */ + +typedef void (*bfa_cb_diag_t) (void *cbarg, bfa_status_t status); +typedef void (*bfa_cb_diag_beacon_t) (void *dev, bfa_boolean_t beacon, + bfa_boolean_t link_e2e_beacon); + +/* + * Firmware ping test results + */ +struct bfa_diag_results_fwping { + u32 data; /* store the corrupted data */ + u32 status; + u32 dmastatus; + u8 rsvd[4]; +}; + +struct bfa_diag_qtest_result_s { + u32 status; + u16 count; /* sucessful queue test count */ + u8 queue; + u8 rsvd; /* 64-bit align */ +}; + +/* + * Firmware ping test results + */ +struct bfa_diag_fwping_s { + struct bfa_diag_results_fwping *result; + bfa_cb_diag_t cbfn; + void *cbarg; + u32 data; + u8 lock; + u8 rsv[3]; + u32 status; + u32 count; + struct bfa_mbox_cmd_s mbcmd; + u8 *dbuf_kva; /* dma buf virtual address */ + u64 dbuf_pa; /* dma buf physical address */ +}; + +/* + * Temperature sensor query results + */ +struct bfa_diag_results_tempsensor_s { + u32 status; + u16 temp; /* 10-bit A/D value */ + u16 brd_temp; /* 9-bit board temp */ + u8 ts_junc; /* show junction tempsensor */ + u8 ts_brd; /* show board tempsensor */ + u8 rsvd[6]; /* keep 8 bytes alignment */ +}; + +struct bfa_diag_tsensor_s { + bfa_cb_diag_t cbfn; + void *cbarg; + struct bfa_diag_results_tempsensor_s *temp; + u8 lock; + u8 rsv[3]; + u32 status; + struct bfa_mbox_cmd_s mbcmd; +}; + +struct bfa_diag_sfpshow_s { + struct sfp_mem_s *sfpmem; + bfa_cb_diag_t cbfn; + void *cbarg; + u8 lock; + u8 static_data; + u8 rsv[2]; + u32 status; + struct bfa_mbox_cmd_s mbcmd; + u8 *dbuf_kva; /* dma buf virtual address */ + u64 dbuf_pa; /* dma buf physical address */ +}; + +struct bfa_diag_led_s { + struct bfa_mbox_cmd_s mbcmd; + bfa_boolean_t lock; /* 1: ledtest is operating */ +}; + +struct bfa_diag_beacon_s { + struct bfa_mbox_cmd_s mbcmd; + bfa_boolean_t state; /* port beacon state */ + bfa_boolean_t link_e2e; /* link beacon state */ +}; + +struct bfa_diag_s { + void *dev; + struct bfa_ioc_s *ioc; + struct bfa_trc_mod_s *trcmod; + struct bfa_diag_fwping_s fwping; + struct bfa_diag_tsensor_s tsensor; + struct bfa_diag_sfpshow_s sfpshow; + struct bfa_diag_led_s ledtest; + struct bfa_diag_beacon_s beacon; + void *result; + struct bfa_timer_s timer; + bfa_cb_diag_beacon_t cbfn_beacon; + bfa_cb_diag_t cbfn; + void *cbarg; + u8 block; + u8 timer_active; + u8 rsvd[2]; + u32 status; + struct bfa_ioc_notify_s ioc_notify; + struct bfa_mem_dma_s diag_dma; }; +#define BFA_DIAG_MOD(__bfa) (&(__bfa)->modules.diag_mod) +#define BFA_MEM_DIAG_DMA(__bfa) (&(BFA_DIAG_MOD(__bfa)->diag_dma)) + +u32 bfa_diag_meminfo(void); +void bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa); +void bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev, + bfa_cb_diag_beacon_t cbfn_beacon, + struct bfa_trc_mod_s *trcmod); +bfa_status_t bfa_diag_reg_read(struct bfa_diag_s *diag, u32 offset, + u32 len, u32 *buf, u32 force); +bfa_status_t bfa_diag_reg_write(struct bfa_diag_s *diag, u32 offset, + u32 len, u32 value, u32 force); +bfa_status_t bfa_diag_tsensor_query(struct bfa_diag_s *diag, + struct bfa_diag_results_tempsensor_s *result, + bfa_cb_diag_t cbfn, void *cbarg); +bfa_status_t bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, + u32 pattern, struct bfa_diag_results_fwping *result, + bfa_cb_diag_t cbfn, void *cbarg); +bfa_status_t bfa_diag_sfpshow(struct bfa_diag_s *diag, + struct sfp_mem_s *sfpmem, u8 static_data, + bfa_cb_diag_t cbfn, void *cbarg); +bfa_status_t bfa_diag_memtest(struct bfa_diag_s *diag, + struct bfa_diag_memtest_s *memtest, u32 pattern, + struct bfa_diag_memtest_result *result, + bfa_cb_diag_t cbfn, void *cbarg); +bfa_status_t bfa_diag_ledtest(struct bfa_diag_s *diag, + struct bfa_diag_ledtest_s *ledtest); +bfa_status_t bfa_diag_beacon_port(struct bfa_diag_s *diag, + bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon, + u32 sec); + +/* + * PHY module specific + */ +typedef void (*bfa_cb_phy_t) (void *cbarg, bfa_status_t status); + +struct bfa_phy_s { + struct bfa_ioc_s *ioc; /* back pointer to ioc */ + struct bfa_trc_mod_s *trcmod; /* trace module */ + u8 instance; /* port instance */ + u8 op_busy; /* operation busy flag */ + u8 rsv[2]; + u32 residue; /* residual length */ + u32 offset; /* offset */ + bfa_status_t status; /* status */ + u8 *dbuf_kva; /* dma buf virtual address */ + u64 dbuf_pa; /* dma buf physical address */ + struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ + bfa_cb_phy_t cbfn; /* user callback function */ + void *cbarg; /* user callback arg */ + u8 *ubuf; /* user supplied buffer */ + struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */ + u32 addr_off; /* phy address offset */ + struct bfa_mbox_cmd_s mb; /* mailbox */ + struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */ + struct bfa_mem_dma_s phy_dma; +}; +#define BFA_PHY(__bfa) (&(__bfa)->modules.phy) +#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma)) + +bfa_boolean_t bfa_phy_busy(struct bfa_ioc_s *ioc); +bfa_status_t bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance, + struct bfa_phy_attr_s *attr, + bfa_cb_phy_t cbfn, void *cbarg); +bfa_status_t bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance, + struct bfa_phy_stats_s *stats, + bfa_cb_phy_t cbfn, void *cbarg); +bfa_status_t bfa_phy_update(struct bfa_phy_s *phy, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_phy_t cbfn, void *cbarg); +bfa_status_t bfa_phy_read(struct bfa_phy_s *phy, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_phy_t cbfn, void *cbarg); + +u32 bfa_phy_meminfo(bfa_boolean_t mincfg); +void bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, + void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg); +void bfa_phy_memclaim(struct bfa_phy_s *phy, + u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg); +void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg); + +/* + * Driver Config( dconf) specific + */ +#define BFI_DCONF_SIGNATURE 0xabcdabcd +#define BFI_DCONF_VERSION 1 + +#pragma pack(1) +struct bfa_dconf_hdr_s { + u32 signature; + u32 version; +}; + +struct bfa_dconf_s { + struct bfa_dconf_hdr_s hdr; + struct bfa_lunmask_cfg_s lun_mask; +}; +#pragma pack() + +struct bfa_dconf_mod_s { + bfa_sm_t sm; + u8 instance; + bfa_boolean_t flashdone; + bfa_boolean_t read_data_valid; + bfa_boolean_t min_cfg; + struct bfa_timer_s timer; + struct bfa_s *bfa; + void *bfad; + void *trcmod; + struct bfa_dconf_s *dconf; + struct bfa_mem_kva_s kva_seg; +}; + +#define BFA_DCONF_MOD(__bfa) \ + (&(__bfa)->modules.dconf_mod) +#define BFA_MEM_DCONF_KVA(__bfa) (&(BFA_DCONF_MOD(__bfa)->kva_seg)) +#define bfa_dconf_read_data_valid(__bfa) \ + (BFA_DCONF_MOD(__bfa)->read_data_valid) +#define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */ + +void bfa_dconf_modinit(struct bfa_s *bfa); +void bfa_dconf_modexit(struct bfa_s *bfa); +bfa_status_t bfa_dconf_update(struct bfa_s *bfa); + +/* + * IOC specfic macros + */ #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id) #define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva) #define bfa_ioc_portid(__ioc) ((__ioc)->port_id) +#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen) +#define bfa_ioc_is_cna(__ioc) \ + ((bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_FCoE) || \ + (bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_LL)) #define bfa_ioc_fetch_stats(__ioc, __stats) \ (((__stats)->drv_stats) = (__ioc)->stats) #define bfa_ioc_clr_stats(__ioc) \ @@ -287,12 +752,9 @@ struct bfa_ioc_hwif_s { #define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) #define BFA_IOC_FWIMG_MINSZ (16 * 1024) -#define BFA_IOC_FWIMG_TYPE(__ioc) \ - (((__ioc)->ctdev) ? \ - (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \ - BFI_IMAGE_CB_FC) -#define BFA_IOC_FW_SMEM_SIZE(__ioc) \ - (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE) +#define BFA_IOC_FW_SMEM_SIZE(__ioc) \ + ((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB) \ + ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE) #define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) #define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) #define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) @@ -305,7 +767,7 @@ void bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs); void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc); void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len); -void bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg); +bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg); void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); @@ -315,40 +777,49 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, #define bfa_ioc_pll_init_asic(__ioc) \ ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ - (__ioc)->fcmode)) + (__ioc)->asic_mode)) bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc); -bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode); -bfa_boolean_t bfa_ioc_ct_pll_init_complete(void __iomem *rb); -bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode); +bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode mode); +bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode); +bfa_status_t bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode); -#define bfa_ioc_isr_mode_set(__ioc, __msix) \ - ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) +#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \ + if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \ + ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \ +} while (0) #define bfa_ioc_ownership_reset(__ioc) \ ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) +#define bfa_ioc_get_fcmode(__ioc) ((__ioc)->fcmode) +#define bfa_ioc_lpu_read_stat(__ioc) do { \ + if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \ + ((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \ +} while (0) - -void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc); void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc); +void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc); +void bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc); +void bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc); void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod); void bfa_ioc_auto_recover(bfa_boolean_t auto_recover); void bfa_ioc_detach(struct bfa_ioc_s *ioc); void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, - enum bfi_mclass mc); + enum bfi_pcifn_class clscode); void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa); void bfa_ioc_enable(struct bfa_ioc_s *ioc); void bfa_ioc_disable(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc); void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, - u32 boot_param); + u32 boot_env); void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg); void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); +bfa_boolean_t bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc); void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc); @@ -372,17 +843,43 @@ bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen); bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf, u32 *offset, int *buflen); -void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc); -bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg); void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr); bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr); +void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event); bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats); bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc); /* + * asic block configuration related APIs + */ +u32 bfa_ablk_meminfo(void); +void bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa); +void bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc); +bfa_status_t bfa_ablk_query(struct bfa_ablk_s *ablk, + struct bfa_ablk_cfg_s *ablk_cfg, + bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, + enum bfa_mode_s mode, int max_pf, int max_vf, + bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, + enum bfa_mode_s mode, int max_pf, int max_vf, + bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, + u8 port, enum bfi_pcifn_class personality, int bw, + bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn, + bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw, + bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, + bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, + bfa_ablk_cbfn_t cbfn, void *cbarg); + +/* * bfa mfg wwn API functions */ mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc); @@ -391,50 +888,64 @@ mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc); /* * F/W Image Size & Chunk */ -extern u32 bfi_image_ct_fc_size; -extern u32 bfi_image_ct_cna_size; -extern u32 bfi_image_cb_fc_size; -extern u32 *bfi_image_ct_fc; -extern u32 *bfi_image_ct_cna; -extern u32 *bfi_image_cb_fc; +extern u32 bfi_image_cb_size; +extern u32 bfi_image_ct_size; +extern u32 bfi_image_ct2_size; +extern u32 *bfi_image_cb; +extern u32 *bfi_image_ct; +extern u32 *bfi_image_ct2; static inline u32 * -bfi_image_ct_fc_get_chunk(u32 off) -{ return (u32 *)(bfi_image_ct_fc + off); } +bfi_image_cb_get_chunk(u32 off) +{ + return (u32 *)(bfi_image_cb + off); +} static inline u32 * -bfi_image_ct_cna_get_chunk(u32 off) -{ return (u32 *)(bfi_image_ct_cna + off); } +bfi_image_ct_get_chunk(u32 off) +{ + return (u32 *)(bfi_image_ct + off); +} static inline u32 * -bfi_image_cb_fc_get_chunk(u32 off) -{ return (u32 *)(bfi_image_cb_fc + off); } +bfi_image_ct2_get_chunk(u32 off) +{ + return (u32 *)(bfi_image_ct2 + off); +} static inline u32* -bfa_cb_image_get_chunk(int type, u32 off) +bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off) { - switch (type) { - case BFI_IMAGE_CT_FC: - return bfi_image_ct_fc_get_chunk(off); break; - case BFI_IMAGE_CT_CNA: - return bfi_image_ct_cna_get_chunk(off); break; - case BFI_IMAGE_CB_FC: - return bfi_image_cb_fc_get_chunk(off); break; - default: return NULL; + switch (asic_gen) { + case BFI_ASIC_GEN_CB: + return bfi_image_cb_get_chunk(off); + break; + case BFI_ASIC_GEN_CT: + return bfi_image_ct_get_chunk(off); + break; + case BFI_ASIC_GEN_CT2: + return bfi_image_ct2_get_chunk(off); + break; + default: + return NULL; } } static inline u32 -bfa_cb_image_get_size(int type) +bfa_cb_image_get_size(enum bfi_asic_gen asic_gen) { - switch (type) { - case BFI_IMAGE_CT_FC: - return bfi_image_ct_fc_size; break; - case BFI_IMAGE_CT_CNA: - return bfi_image_ct_cna_size; break; - case BFI_IMAGE_CB_FC: - return bfi_image_cb_fc_size; break; - default: return 0; + switch (asic_gen) { + case BFI_ASIC_GEN_CB: + return bfi_image_cb_size; + break; + case BFI_ASIC_GEN_CT: + return bfi_image_ct_size; + break; + case BFI_ASIC_GEN_CT2: + return bfi_image_ct2_size; + break; + default: + return 0; } } diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c index 89ae4c8f95a..30df8a28471 100644 --- a/drivers/scsi/bfa/bfa_ioc_cb.c +++ b/drivers/scsi/bfa/bfa_ioc_cb.c @@ -17,7 +17,7 @@ #include "bfad_drv.h" #include "bfa_ioc.h" -#include "bfi_cbreg.h" +#include "bfi_reg.h" #include "bfa_defs.h" BFA_TRC_FILE(CNA, IOC_CB); @@ -69,21 +69,6 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc) static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc) { - struct bfi_ioc_image_hdr_s fwhdr; - uint32_t fwstate = readl(ioc->ioc_regs.ioc_fwstate); - - if (fwstate == BFI_IOC_UNINIT) - return BFA_TRUE; - - bfa_ioc_fwver_get(ioc, &fwhdr); - - if (swab32(fwhdr.exec) == BFI_BOOT_TYPE_NORMAL) - return BFA_TRUE; - - bfa_trc(ioc, fwstate); - bfa_trc(ioc, fwhdr.exec); - writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); - return BFA_TRUE; } @@ -98,7 +83,7 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc) static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc) { - writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); + writel(~0U, ioc->ioc_regs.err_set); readl(ioc->ioc_regs.err_set); } @@ -152,8 +137,8 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc) */ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); - ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_400_CTL_REG); - ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_212_CTL_REG); + ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG); + ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG); /* * IOC semaphore registers and serialization @@ -285,18 +270,18 @@ bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc) } bfa_status_t -bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode) +bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode) { u32 pll_sclk, pll_fclk; - pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN | - __APP_PLL_212_P0_1(3U) | - __APP_PLL_212_JITLMT0_1(3U) | - __APP_PLL_212_CNTLMT0_1(3U); - pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN | - __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) | - __APP_PLL_400_JITLMT0_1(3U) | - __APP_PLL_400_CNTLMT0_1(3U); + pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN | + __APP_PLL_SCLK_P0_1(3U) | + __APP_PLL_SCLK_JITLMT0_1(3U) | + __APP_PLL_SCLK_CNTLMT0_1(3U); + pll_fclk = __APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN | + __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | + __APP_PLL_LCLK_JITLMT0_1(3U) | + __APP_PLL_LCLK_CNTLMT0_1(3U); writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); @@ -305,24 +290,24 @@ bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode) writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); - writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG); - writel(__APP_PLL_212_BYPASS | __APP_PLL_212_LOGIC_SOFT_RESET, - rb + APP_PLL_212_CTL_REG); - writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG); - writel(__APP_PLL_400_BYPASS | __APP_PLL_400_LOGIC_SOFT_RESET, - rb + APP_PLL_400_CTL_REG); + writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG); + writel(__APP_PLL_SCLK_BYPASS | __APP_PLL_SCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_SCLK_CTL_REG); + writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG); + writel(__APP_PLL_LCLK_BYPASS | __APP_PLL_LCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_LCLK_CTL_REG); udelay(2); - writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG); - writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG); - writel(pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET, - rb + APP_PLL_212_CTL_REG); - writel(pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET, - rb + APP_PLL_400_CTL_REG); + writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG); + writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG); + writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_LCLK_CTL_REG); udelay(2000); writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); - writel(pll_sclk, (rb + APP_PLL_212_CTL_REG)); - writel(pll_fclk, (rb + APP_PLL_400_CTL_REG)); + writel(pll_sclk, (rb + APP_PLL_SCLK_CTL_REG)); + writel(pll_fclk, (rb + APP_PLL_LCLK_CTL_REG)); return BFA_STATUS_OK; } diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c index 93612520f0d..d1b8f0caaa7 100644 --- a/drivers/scsi/bfa/bfa_ioc_ct.c +++ b/drivers/scsi/bfa/bfa_ioc_ct.c @@ -17,7 +17,7 @@ #include "bfad_drv.h" #include "bfa_ioc.h" -#include "bfi_ctreg.h" +#include "bfi_reg.h" #include "bfa_defs.h" BFA_TRC_FILE(CNA, IOC_CT); @@ -36,9 +36,6 @@ BFA_TRC_FILE(CNA, IOC_CT); */ static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc); -static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc); -static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc); -static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc); @@ -48,29 +45,7 @@ static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc); static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc); static struct bfa_ioc_hwif_s hwif_ct; - -/* - * Called from bfa_ioc_attach() to map asic specific calls. - */ -void -bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc) -{ - hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init; - hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock; - hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock; - hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; - hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; - hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; - hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; - hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; - hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start; - hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; - hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; - hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; - hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete; - - ioc->ioc_hwif = &hwif_ct; -} +static struct bfa_ioc_hwif_s hwif_ct2; /* * Return true if firmware of current driver matches the running firmware. @@ -83,15 +58,9 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) struct bfi_ioc_image_hdr_s fwhdr; /* - * Firmware match check is relevant only for CNA. - */ - if (!ioc->cna) - return BFA_TRUE; - - /* * If bios boot (flash based) -- do not increment usage count */ - if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < + if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < BFA_IOC_FWIMG_MINSZ) return BFA_TRUE; @@ -103,6 +72,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) */ if (usecnt == 0) { writel(1, ioc->ioc_regs.ioc_usage_reg); + readl(ioc->ioc_regs.ioc_usage_sem_reg); writel(1, ioc->ioc_regs.ioc_usage_sem_reg); writel(0, ioc->ioc_regs.ioc_fail_sync); bfa_trc(ioc, usecnt); @@ -122,6 +92,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) */ bfa_ioc_fwver_get(ioc, &fwhdr); if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { + readl(ioc->ioc_regs.ioc_usage_sem_reg); writel(1, ioc->ioc_regs.ioc_usage_sem_reg); bfa_trc(ioc, usecnt); return BFA_FALSE; @@ -132,6 +103,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) */ usecnt++; writel(usecnt, ioc->ioc_regs.ioc_usage_reg); + readl(ioc->ioc_regs.ioc_usage_sem_reg); writel(1, ioc->ioc_regs.ioc_usage_sem_reg); bfa_trc(ioc, usecnt); return BFA_TRUE; @@ -143,15 +115,9 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) u32 usecnt; /* - * Firmware lock is relevant only for CNA. - */ - if (!ioc->cna) - return; - - /* * If bios boot (flash based) -- do not decrement usage count */ - if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < + if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < BFA_IOC_FWIMG_MINSZ) return; @@ -166,6 +132,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) writel(usecnt, ioc->ioc_regs.ioc_usage_reg); bfa_trc(ioc, usecnt); + readl(ioc->ioc_regs.ioc_usage_sem_reg); writel(1, ioc->ioc_regs.ioc_usage_sem_reg); } @@ -175,14 +142,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc) { - if (ioc->cna) { + if (bfa_ioc_is_cna(ioc)) { writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); /* Wait for halt to take effect */ readl(ioc->ioc_regs.ll_halt); readl(ioc->ioc_regs.alt_ll_halt); } else { - writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); + writel(~0U, ioc->ioc_regs.err_set); readl(ioc->ioc_regs.err_set); } } @@ -190,7 +157,7 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc) /* * Host to LPU mailbox message addresses */ -static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { +static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = { { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, @@ -200,21 +167,31 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { /* * Host <-> LPU mailbox command/status registers - port 0 */ -static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = { - { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT }, - { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT }, - { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT }, - { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT } +static struct { u32 hfn, lpu; } ct_p0reg[] = { + { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, + { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT }, + { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT }, + { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } }; /* * Host <-> LPU mailbox command/status registers - port 1 */ -static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = { - { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT }, - { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT }, - { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT }, - { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT } +static struct { u32 hfn, lpu; } ct_p1reg[] = { + { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT }, + { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }, + { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT }, + { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT } +}; + +static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; } + ct2_reg[] = { + { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, + CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT, + CT2_HOSTFN_LPU0_READ_STAT}, + { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, + CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT, + CT2_HOSTFN_LPU1_READ_STAT}, }; static void @@ -225,24 +202,24 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) rb = bfa_ioc_bar0(ioc); - ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; - ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; - ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; + ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; + ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; + ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; if (ioc->port_id == 0) { ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; - ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; - ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; + ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; } else { ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; - ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; - ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; + ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; } @@ -252,8 +229,8 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) */ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); - ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG); - ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG); + ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG); + ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG); /* * IOC semaphore registers and serialization @@ -276,6 +253,64 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) ioc->ioc_regs.err_set = (rb + ERR_SET_REG); } +static void +bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc) +{ + void __iomem *rb; + int port = bfa_ioc_portid(ioc); + + rb = bfa_ioc_bar0(ioc); + + ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; + ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; + ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; + ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; + ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; + + if (port == 0) { + ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; + } else { + ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG); + ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG); + ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; + } + + /* + * PSS control registers + */ + ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); + ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); + ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG); + ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG); + + /* + * IOC semaphore registers and serialization + */ + ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG); + ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG); + ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG); + ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT); + ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC); + + /* + * sram memory access + */ + ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); + ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; + + /* + * err set reg : for notification of hb failure in fcmode + */ + ioc->ioc_regs.err_set = (rb + ERR_SET_REG); +} + /* * Initialize IOC to port mapping. */ @@ -298,6 +333,19 @@ bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc) bfa_trc(ioc, ioc->port_id); } +static void +bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32; + + r32 = readl(rb + CT2_HOSTFN_PERSONALITY0); + ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); + + bfa_trc(ioc, bfa_ioc_pcifn(ioc)); + bfa_trc(ioc, ioc->port_id); +} + /* * Set interrupt mode for a function: INTX or MSIX */ @@ -316,7 +364,7 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) /* * If already in desired mode, do not change anything */ - if (!msix && mode) + if ((!msix && mode) || (msix && !mode)) return; if (msix) @@ -331,6 +379,20 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) writel(r32, rb + FNC_PERS_REG); } +bfa_boolean_t +bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc) +{ + u32 r32; + + r32 = readl(ioc->ioc_regs.lpu_read_stat); + if (r32) { + writel(1, ioc->ioc_regs.lpu_read_stat); + return BFA_TRUE; + } + + return BFA_FALSE; +} + /* * Cleanup hw semaphore and usecnt registers */ @@ -338,9 +400,10 @@ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) { - if (ioc->cna) { + if (bfa_ioc_is_cna(ioc)) { bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); writel(0, ioc->ioc_regs.ioc_usage_reg); + readl(ioc->ioc_regs.ioc_usage_sem_reg); writel(1, ioc->ioc_regs.ioc_usage_sem_reg); } @@ -449,32 +512,99 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc) return BFA_FALSE; } +/** + * Called from bfa_ioc_attach() to map asic specific calls. + */ +static void +bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif) +{ + hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock; + hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock; + hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail; + hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset; + hwif->ioc_sync_start = bfa_ioc_ct_sync_start; + hwif->ioc_sync_join = bfa_ioc_ct_sync_join; + hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave; + hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack; + hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete; +} + +/** + * Called from bfa_ioc_attach() to map asic specific calls. + */ +void +bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc) +{ + bfa_ioc_set_ctx_hwif(ioc, &hwif_ct); + + hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init; + hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; + hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; + hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; + ioc->ioc_hwif = &hwif_ct; +} + +/** + * Called from bfa_ioc_attach() to map asic specific calls. + */ +void +bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc) +{ + bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2); + + hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init; + hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init; + hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port; + hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat; + hwif_ct2.ioc_isr_mode_set = NULL; + ioc->ioc_hwif = &hwif_ct2; +} + /* - * Check the firmware state to know if pll_init has been completed already + * Workaround for MSI-X resource allocation for catapult-2 with no asic block */ -bfa_boolean_t -bfa_ioc_ct_pll_init_complete(void __iomem *rb) +#define HOSTFN_MSIX_DEFAULT 64 +#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 +#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c +#define __MSIX_VT_NUMVT__MK 0x003ff800 +#define __MSIX_VT_NUMVT__SH 11 +#define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH) +#define __MSIX_VT_OFST_ 0x000007ff +void +bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc) { - if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) || - (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP)) - return BFA_TRUE; + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32; - return BFA_FALSE; + r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT); + if (r32 & __MSIX_VT_NUMVT__MK) { + writel(r32 & __MSIX_VT_OFST_, + rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); + return; + } + + writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) | + HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), + rb + HOSTFN_MSIX_VT_OFST_NUMVT); + writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), + rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); } bfa_status_t -bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode) +bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode) { u32 pll_sclk, pll_fclk, r32; + bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC); + + pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST | + __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) | + __APP_PLL_SCLK_JITLMT0_1(3U) | + __APP_PLL_SCLK_CNTLMT0_1(1U); + pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST | + __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | + __APP_PLL_LCLK_JITLMT0_1(3U) | + __APP_PLL_LCLK_CNTLMT0_1(1U); - pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST | - __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) | - __APP_PLL_312_JITLMT0_1(3U) | - __APP_PLL_312_CNTLMT0_1(1U); - pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST | - __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) | - __APP_PLL_425_JITLMT0_1(3U) | - __APP_PLL_425_CNTLMT0_1(1U); if (fcmode) { writel(0, (rb + OP_MODE)); writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 | @@ -491,20 +621,21 @@ bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode) writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); - writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET, - rb + APP_PLL_312_CTL_REG); - writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET, - rb + APP_PLL_425_CTL_REG); - writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE, - rb + APP_PLL_312_CTL_REG); - writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE, - rb + APP_PLL_425_CTL_REG); + writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_LCLK_CTL_REG); + writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET | + __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET | + __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); readl(rb + HOSTFN0_INT_MSK); udelay(2000); writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); - writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG); - writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG); + writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); + if (!fcmode) { writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); @@ -524,3 +655,206 @@ bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode) writel(0, (rb + MBIST_CTL_REG)); return BFA_STATUS_OK; } + +static void +bfa_ioc_ct2_sclk_init(void __iomem *rb) +{ + u32 r32; + + /* + * put s_clk PLL and PLL FSM in reset + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN); + r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS | + __APP_PLL_SCLK_LOGIC_SOFT_RESET); + writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * Ignore mode and program for the max clock (which is FC16) + * Firmware/NFC will do the PLL init appropiately + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); + writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * while doing PLL init dont clock gate ethernet subsystem + */ + r32 = readl((rb + CT2_CHIP_MISC_PRG)); + writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG)); + + r32 = readl((rb + CT2_PCIE_MISC_REG)); + writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG)); + + /* + * set sclk value + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL | + __APP_PLL_SCLK_CLK_DIV2); + writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * poll for s_clk lock or delay 1ms + */ + udelay(1000); +} + +static void +bfa_ioc_ct2_lclk_init(void __iomem *rb) +{ + u32 r32; + + /* + * put l_clk PLL and PLL FSM in reset + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN); + r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS | + __APP_PLL_LCLK_LOGIC_SOFT_RESET); + writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* + * set LPU speed (set for FC16 which will work for other modes) + */ + r32 = readl((rb + CT2_CHIP_MISC_PRG)); + writel(r32, (rb + CT2_CHIP_MISC_PRG)); + + /* + * set LPU half speed (set for FC16 which will work for other modes) + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* + * set lclk for mode (set for FC16) + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED); + r32 |= 0x20c1731b; + writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* + * poll for s_clk lock or delay 1ms + */ + udelay(1000); +} + +static void +bfa_ioc_ct2_mem_init(void __iomem *rb) +{ + u32 r32; + + r32 = readl((rb + PSS_CTL_REG)); + r32 &= ~__PSS_LMEM_RESET; + writel(r32, (rb + PSS_CTL_REG)); + udelay(1000); + + writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG)); + udelay(1000); + writel(0, (rb + CT2_MBIST_CTL_REG)); +} + +void +bfa_ioc_ct2_mac_reset(void __iomem *rb) +{ + u32 r32; + + bfa_ioc_ct2_sclk_init(rb); + bfa_ioc_ct2_lclk_init(rb); + + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, + (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, + (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* put port0, port1 MAC & AHB in reset */ + writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), + rb + CT2_CSI_MAC_CONTROL_REG(0)); + writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), + rb + CT2_CSI_MAC_CONTROL_REG(1)); +} + +#define CT2_NFC_MAX_DELAY 1000 +bfa_status_t +bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) +{ + u32 wgn, r32; + int i; + + /* + * Initialize PLL if not already done by NFC + */ + wgn = readl(rb + CT2_WGN_STATUS); + if (!(wgn & __GLBL_PF_VF_CFG_RDY)) { + writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG); + for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { + r32 = readl(rb + CT2_NFC_CSR_SET_REG); + if (r32 & __NFC_CONTROLLER_HALTED) + break; + udelay(1000); + } + } + + /* + * Mask the interrupts and clear any + * pending interrupts. + */ + writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); + writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); + + r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); + if (r32 == 1) { + writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); + readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); + } + r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); + if (r32 == 1) { + writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); + readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); + } + + bfa_ioc_ct2_mac_reset(rb); + bfa_ioc_ct2_sclk_init(rb); + bfa_ioc_ct2_lclk_init(rb); + + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, + (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, + (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* + * Announce flash device presence, if flash was corrupted. + */ + if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { + r32 = readl((rb + PSS_GPIO_OUT_REG)); + writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG)); + r32 = readl((rb + PSS_GPIO_OE_REG)); + writel(r32 | 1, (rb + PSS_GPIO_OE_REG)); + } + + bfa_ioc_ct2_mem_init(rb); + + writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); + writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); + return BFA_STATUS_OK; +} diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h index ab79ff6fdee..2d36e482383 100644 --- a/drivers/scsi/bfa/bfa_modules.h +++ b/drivers/scsi/bfa/bfa_modules.h @@ -29,14 +29,22 @@ #include "bfa_port.h" struct bfa_modules_s { + struct bfa_fcdiag_s fcdiag; /* fcdiag module */ struct bfa_fcport_s fcport; /* fc port module */ struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */ struct bfa_lps_mod_s lps_mod; /* fcxp module */ struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */ struct bfa_rport_mod_s rport_mod; /* remote port module */ - struct bfa_fcpim_mod_s fcpim_mod; /* FCP initiator module */ + struct bfa_fcp_mod_s fcp_mod; /* FCP initiator module */ struct bfa_sgpg_mod_s sgpg_mod; /* SG page module */ struct bfa_port_s port; /* Physical port module */ + struct bfa_ablk_s ablk; /* ASIC block config module */ + struct bfa_cee_s cee; /* CEE Module */ + struct bfa_sfp_s sfp; /* SFP module */ + struct bfa_flash_s flash; /* flash module */ + struct bfa_diag_s diag_mod; /* diagnostics module */ + struct bfa_phy_s phy; /* phy module */ + struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */ }; /* @@ -51,17 +59,16 @@ enum { BFA_TRC_HAL_IOCFC_CB = 5, }; - /* * Macro to define a new BFA module */ #define BFA_MODULE(__mod) \ static void bfa_ ## __mod ## _meminfo( \ - struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, \ - u32 *dm_len); \ + struct bfa_iocfc_cfg_s *cfg, \ + struct bfa_meminfo_s *meminfo, \ + struct bfa_s *bfa); \ static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \ void *bfad, struct bfa_iocfc_cfg_s *cfg, \ - struct bfa_meminfo_s *meminfo, \ struct bfa_pcidev_s *pcidev); \ static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \ static void bfa_ ## __mod ## _start(struct bfa_s *bfa); \ @@ -87,11 +94,11 @@ enum { * can leave entry points as NULL) */ struct bfa_module_s { - void (*meminfo) (struct bfa_iocfc_cfg_s *cfg, u32 *km_len, - u32 *dm_len); + void (*meminfo) (struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, + struct bfa_s *bfa); void (*attach) (struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev); void (*detach) (struct bfa_s *bfa); void (*start) (struct bfa_s *bfa); @@ -109,19 +116,22 @@ struct bfa_s { struct bfa_timer_mod_s timer_mod; /* timer module */ struct bfa_modules_s modules; /* BFA modules */ struct list_head comp_q; /* pending completions */ - bfa_boolean_t rme_process; /* RME processing enabled */ + bfa_boolean_t queue_process; /* queue processing enabled */ struct list_head reqq_waitq[BFI_IOC_MAX_CQS]; bfa_boolean_t fcs; /* FCS is attached to BFA */ struct bfa_msix_s msix; + int bfa_aen_seq; }; extern bfa_boolean_t bfa_auto_recover; +extern struct bfa_module_s hal_mod_fcdiag; extern struct bfa_module_s hal_mod_sgpg; extern struct bfa_module_s hal_mod_fcport; extern struct bfa_module_s hal_mod_fcxp; extern struct bfa_module_s hal_mod_lps; extern struct bfa_module_s hal_mod_uf; extern struct bfa_module_s hal_mod_rport; -extern struct bfa_module_s hal_mod_fcpim; +extern struct bfa_module_s hal_mod_fcp; +extern struct bfa_module_s hal_mod_dconf; #endif /* __BFA_MODULES_H__ */ diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c index 3f8e9d6066e..95e4ad8759a 100644 --- a/drivers/scsi/bfa/bfa_port.c +++ b/drivers/scsi/bfa/bfa_port.c @@ -24,8 +24,6 @@ BFA_TRC_FILE(CNA, PORT); -#define bfa_ioc_portid(__ioc) ((__ioc)->port_id) - static void bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats) { @@ -236,6 +234,12 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, { struct bfi_port_generic_req_s *m; + /* If port is PBC disabled, return error */ + if (port->pbc_disabled) { + bfa_trc(port, BFA_STATUS_PBC); + return BFA_STATUS_PBC; + } + if (bfa_ioc_is_disabled(port->ioc)) { bfa_trc(port, BFA_STATUS_IOC_DISABLED); return BFA_STATUS_IOC_DISABLED; @@ -280,6 +284,12 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, { struct bfi_port_generic_req_s *m; + /* If port is PBC disabled, return error */ + if (port->pbc_disabled) { + bfa_trc(port, BFA_STATUS_PBC); + return BFA_STATUS_PBC; + } + if (bfa_ioc_is_disabled(port->ioc)) { bfa_trc(port, BFA_STATUS_IOC_DISABLED); return BFA_STATUS_IOC_DISABLED; @@ -387,32 +397,43 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn, } /* - * bfa_port_hbfail() + * bfa_port_notify() * + * Port module IOC event handler * * @param[in] Pointer to the Port module data structure. + * @param[in] IOC event structure * * @return void */ void -bfa_port_hbfail(void *arg) +bfa_port_notify(void *arg, enum bfa_ioc_event_e event) { struct bfa_port_s *port = (struct bfa_port_s *) arg; - /* Fail any pending get_stats/clear_stats requests */ - if (port->stats_busy) { - if (port->stats_cbfn) - port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED); - port->stats_cbfn = NULL; - port->stats_busy = BFA_FALSE; - } - - /* Clear any enable/disable is pending */ - if (port->endis_pending) { - if (port->endis_cbfn) - port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED); - port->endis_cbfn = NULL; - port->endis_pending = BFA_FALSE; + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + /* Fail any pending get_stats/clear_stats requests */ + if (port->stats_busy) { + if (port->stats_cbfn) + port->stats_cbfn(port->stats_cbarg, + BFA_STATUS_FAILED); + port->stats_cbfn = NULL; + port->stats_busy = BFA_FALSE; + } + + /* Clear any enable/disable is pending */ + if (port->endis_pending) { + if (port->endis_cbfn) + port->endis_cbfn(port->endis_cbarg, + BFA_STATUS_FAILED); + port->endis_cbfn = NULL; + port->endis_pending = BFA_FALSE; + } + break; + default: + break; } } @@ -445,10 +466,12 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, port->endis_pending = BFA_FALSE; port->stats_cbfn = NULL; port->endis_cbfn = NULL; + port->pbc_disabled = BFA_FALSE; bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port); - bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port); - list_add_tail(&port->hbfail.qe, &port->ioc->hb_notify_q); + bfa_q_qe_init(&port->ioc_notify); + bfa_ioc_notify_init(&port->ioc_notify, bfa_port_notify, port); + list_add_tail(&port->ioc_notify.qe, &port->ioc->notify_q); /* * initialize time stamp for stats reset @@ -458,3 +481,368 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, bfa_trc(port, 0); } + +/* + * CEE module specific definitions + */ + +/* + * bfa_cee_get_attr_isr() + * + * @brief CEE ISR for get-attributes responses from f/w + * + * @param[in] cee - Pointer to the CEE module + * status - Return status from the f/w + * + * @return void + */ +static void +bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status) +{ + struct bfa_cee_lldp_cfg_s *lldp_cfg = &cee->attr->lldp_remote; + + cee->get_attr_status = status; + bfa_trc(cee, 0); + if (status == BFA_STATUS_OK) { + bfa_trc(cee, 0); + memcpy(cee->attr, cee->attr_dma.kva, + sizeof(struct bfa_cee_attr_s)); + lldp_cfg->time_to_live = be16_to_cpu(lldp_cfg->time_to_live); + lldp_cfg->enabled_system_cap = + be16_to_cpu(lldp_cfg->enabled_system_cap); + } + cee->get_attr_pending = BFA_FALSE; + if (cee->cbfn.get_attr_cbfn) { + bfa_trc(cee, 0); + cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status); + } +} + +/* + * bfa_cee_get_stats_isr() + * + * @brief CEE ISR for get-stats responses from f/w + * + * @param[in] cee - Pointer to the CEE module + * status - Return status from the f/w + * + * @return void + */ +static void +bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status) +{ + u32 *buffer; + int i; + + cee->get_stats_status = status; + bfa_trc(cee, 0); + if (status == BFA_STATUS_OK) { + bfa_trc(cee, 0); + memcpy(cee->stats, cee->stats_dma.kva, + sizeof(struct bfa_cee_stats_s)); + /* swap the cee stats */ + buffer = (u32 *)cee->stats; + for (i = 0; i < (sizeof(struct bfa_cee_stats_s) / + sizeof(u32)); i++) + buffer[i] = cpu_to_be32(buffer[i]); + } + cee->get_stats_pending = BFA_FALSE; + bfa_trc(cee, 0); + if (cee->cbfn.get_stats_cbfn) { + bfa_trc(cee, 0); + cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status); + } +} + +/* + * bfa_cee_reset_stats_isr() + * + * @brief CEE ISR for reset-stats responses from f/w + * + * @param[in] cee - Pointer to the CEE module + * status - Return status from the f/w + * + * @return void + */ +static void +bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status) +{ + cee->reset_stats_status = status; + cee->reset_stats_pending = BFA_FALSE; + if (cee->cbfn.reset_stats_cbfn) + cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status); +} + +/* + * bfa_cee_meminfo() + * + * @brief Returns the size of the DMA memory needed by CEE module + * + * @param[in] void + * + * @return Size of DMA region + */ +u32 +bfa_cee_meminfo(void) +{ + return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ) + + BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ); +} + +/* + * bfa_cee_mem_claim() + * + * @brief Initialized CEE DMA Memory + * + * @param[in] cee CEE module pointer + * dma_kva Kernel Virtual Address of CEE DMA Memory + * dma_pa Physical Address of CEE DMA Memory + * + * @return void + */ +void +bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa) +{ + cee->attr_dma.kva = dma_kva; + cee->attr_dma.pa = dma_pa; + cee->stats_dma.kva = dma_kva + BFA_ROUNDUP( + sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ); + cee->stats_dma.pa = dma_pa + BFA_ROUNDUP( + sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ); + cee->attr = (struct bfa_cee_attr_s *) dma_kva; + cee->stats = (struct bfa_cee_stats_s *) (dma_kva + BFA_ROUNDUP( + sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ)); +} + +/* + * bfa_cee_get_attr() + * + * @brief + * Send the request to the f/w to fetch CEE attributes. + * + * @param[in] Pointer to the CEE module data structure. + * + * @return Status + */ + +bfa_status_t +bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr, + bfa_cee_get_attr_cbfn_t cbfn, void *cbarg) +{ + struct bfi_cee_get_req_s *cmd; + + WARN_ON((cee == NULL) || (cee->ioc == NULL)); + bfa_trc(cee, 0); + if (!bfa_ioc_is_operational(cee->ioc)) { + bfa_trc(cee, 0); + return BFA_STATUS_IOC_FAILURE; + } + if (cee->get_attr_pending == BFA_TRUE) { + bfa_trc(cee, 0); + return BFA_STATUS_DEVBUSY; + } + cee->get_attr_pending = BFA_TRUE; + cmd = (struct bfi_cee_get_req_s *) cee->get_cfg_mb.msg; + cee->attr = attr; + cee->cbfn.get_attr_cbfn = cbfn; + cee->cbfn.get_attr_cbarg = cbarg; + bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ, + bfa_ioc_portid(cee->ioc)); + bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa); + bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb); + + return BFA_STATUS_OK; +} + +/* + * bfa_cee_get_stats() + * + * @brief + * Send the request to the f/w to fetch CEE statistics. + * + * @param[in] Pointer to the CEE module data structure. + * + * @return Status + */ + +bfa_status_t +bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats, + bfa_cee_get_stats_cbfn_t cbfn, void *cbarg) +{ + struct bfi_cee_get_req_s *cmd; + + WARN_ON((cee == NULL) || (cee->ioc == NULL)); + + if (!bfa_ioc_is_operational(cee->ioc)) { + bfa_trc(cee, 0); + return BFA_STATUS_IOC_FAILURE; + } + if (cee->get_stats_pending == BFA_TRUE) { + bfa_trc(cee, 0); + return BFA_STATUS_DEVBUSY; + } + cee->get_stats_pending = BFA_TRUE; + cmd = (struct bfi_cee_get_req_s *) cee->get_stats_mb.msg; + cee->stats = stats; + cee->cbfn.get_stats_cbfn = cbfn; + cee->cbfn.get_stats_cbarg = cbarg; + bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ, + bfa_ioc_portid(cee->ioc)); + bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa); + bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb); + + return BFA_STATUS_OK; +} + +/* + * bfa_cee_reset_stats() + * + * @brief Clears CEE Stats in the f/w. + * + * @param[in] Pointer to the CEE module data structure. + * + * @return Status + */ + +bfa_status_t +bfa_cee_reset_stats(struct bfa_cee_s *cee, + bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg) +{ + struct bfi_cee_reset_stats_s *cmd; + + WARN_ON((cee == NULL) || (cee->ioc == NULL)); + if (!bfa_ioc_is_operational(cee->ioc)) { + bfa_trc(cee, 0); + return BFA_STATUS_IOC_FAILURE; + } + if (cee->reset_stats_pending == BFA_TRUE) { + bfa_trc(cee, 0); + return BFA_STATUS_DEVBUSY; + } + cee->reset_stats_pending = BFA_TRUE; + cmd = (struct bfi_cee_reset_stats_s *) cee->reset_stats_mb.msg; + cee->cbfn.reset_stats_cbfn = cbfn; + cee->cbfn.reset_stats_cbarg = cbarg; + bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS, + bfa_ioc_portid(cee->ioc)); + bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb); + + return BFA_STATUS_OK; +} + +/* + * bfa_cee_isrs() + * + * @brief Handles Mail-box interrupts for CEE module. + * + * @param[in] Pointer to the CEE module data structure. + * + * @return void + */ + +void +bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m) +{ + union bfi_cee_i2h_msg_u *msg; + struct bfi_cee_get_rsp_s *get_rsp; + struct bfa_cee_s *cee = (struct bfa_cee_s *) cbarg; + msg = (union bfi_cee_i2h_msg_u *) m; + get_rsp = (struct bfi_cee_get_rsp_s *) m; + bfa_trc(cee, msg->mh.msg_id); + switch (msg->mh.msg_id) { + case BFI_CEE_I2H_GET_CFG_RSP: + bfa_trc(cee, get_rsp->cmd_status); + bfa_cee_get_attr_isr(cee, get_rsp->cmd_status); + break; + case BFI_CEE_I2H_GET_STATS_RSP: + bfa_cee_get_stats_isr(cee, get_rsp->cmd_status); + break; + case BFI_CEE_I2H_RESET_STATS_RSP: + bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status); + break; + default: + WARN_ON(1); + } +} + +/* + * bfa_cee_notify() + * + * @brief CEE module IOC event handler. + * + * @param[in] Pointer to the CEE module data structure. + * @param[in] IOC event type + * + * @return void + */ + +void +bfa_cee_notify(void *arg, enum bfa_ioc_event_e event) +{ + struct bfa_cee_s *cee = (struct bfa_cee_s *) arg; + + bfa_trc(cee, event); + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (cee->get_attr_pending == BFA_TRUE) { + cee->get_attr_status = BFA_STATUS_FAILED; + cee->get_attr_pending = BFA_FALSE; + if (cee->cbfn.get_attr_cbfn) { + cee->cbfn.get_attr_cbfn( + cee->cbfn.get_attr_cbarg, + BFA_STATUS_FAILED); + } + } + if (cee->get_stats_pending == BFA_TRUE) { + cee->get_stats_status = BFA_STATUS_FAILED; + cee->get_stats_pending = BFA_FALSE; + if (cee->cbfn.get_stats_cbfn) { + cee->cbfn.get_stats_cbfn( + cee->cbfn.get_stats_cbarg, + BFA_STATUS_FAILED); + } + } + if (cee->reset_stats_pending == BFA_TRUE) { + cee->reset_stats_status = BFA_STATUS_FAILED; + cee->reset_stats_pending = BFA_FALSE; + if (cee->cbfn.reset_stats_cbfn) { + cee->cbfn.reset_stats_cbfn( + cee->cbfn.reset_stats_cbarg, + BFA_STATUS_FAILED); + } + } + break; + + default: + break; + } +} + +/* + * bfa_cee_attach() + * + * @brief CEE module-attach API + * + * @param[in] cee - Pointer to the CEE module data structure + * ioc - Pointer to the ioc module data structure + * dev - Pointer to the device driver module data structure + * The device driver specific mbox ISR functions have + * this pointer as one of the parameters. + * + * @return void + */ +void +bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc, + void *dev) +{ + WARN_ON(cee == NULL); + cee->dev = dev; + cee->ioc = ioc; + + bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee); + bfa_q_qe_init(&cee->ioc_notify); + bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee); + list_add_tail(&cee->ioc_notify.qe, &cee->ioc->notify_q); +} diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h index c4ee9db6b47..947f897328d 100644 --- a/drivers/scsi/bfa/bfa_port.h +++ b/drivers/scsi/bfa/bfa_port.h @@ -43,12 +43,16 @@ struct bfa_port_s { bfa_port_endis_cbfn_t endis_cbfn; void *endis_cbarg; bfa_status_t endis_status; - struct bfa_ioc_hbfail_notify_s hbfail; + struct bfa_ioc_notify_s ioc_notify; + bfa_boolean_t pbc_disabled; + struct bfa_mem_dma_s port_dma; }; +#define BFA_MEM_PORT_DMA(__bfa) (&((__bfa)->modules.port.port_dma)) + void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, void *dev, struct bfa_trc_mod_s *trcmod); -void bfa_port_hbfail(void *arg); +void bfa_port_notify(void *arg, enum bfa_ioc_event_e event); bfa_status_t bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats, @@ -62,4 +66,58 @@ bfa_status_t bfa_port_disable(struct bfa_port_s *port, u32 bfa_port_meminfo(void); void bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa); + +/* + * CEE declaration + */ +typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, bfa_status_t status); +typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, bfa_status_t status); +typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, bfa_status_t status); + +struct bfa_cee_cbfn_s { + bfa_cee_get_attr_cbfn_t get_attr_cbfn; + void *get_attr_cbarg; + bfa_cee_get_stats_cbfn_t get_stats_cbfn; + void *get_stats_cbarg; + bfa_cee_reset_stats_cbfn_t reset_stats_cbfn; + void *reset_stats_cbarg; +}; + +struct bfa_cee_s { + void *dev; + bfa_boolean_t get_attr_pending; + bfa_boolean_t get_stats_pending; + bfa_boolean_t reset_stats_pending; + bfa_status_t get_attr_status; + bfa_status_t get_stats_status; + bfa_status_t reset_stats_status; + struct bfa_cee_cbfn_s cbfn; + struct bfa_ioc_notify_s ioc_notify; + struct bfa_trc_mod_s *trcmod; + struct bfa_cee_attr_s *attr; + struct bfa_cee_stats_s *stats; + struct bfa_dma_s attr_dma; + struct bfa_dma_s stats_dma; + struct bfa_ioc_s *ioc; + struct bfa_mbox_cmd_s get_cfg_mb; + struct bfa_mbox_cmd_s get_stats_mb; + struct bfa_mbox_cmd_s reset_stats_mb; + struct bfa_mem_dma_s cee_dma; +}; + +#define BFA_MEM_CEE_DMA(__bfa) (&((__bfa)->modules.cee.cee_dma)) + +u32 bfa_cee_meminfo(void); +void bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa); +void bfa_cee_attach(struct bfa_cee_s *cee, + struct bfa_ioc_s *ioc, void *dev); +bfa_status_t bfa_cee_get_attr(struct bfa_cee_s *cee, + struct bfa_cee_attr_s *attr, + bfa_cee_get_attr_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_cee_get_stats(struct bfa_cee_s *cee, + struct bfa_cee_stats_s *stats, + bfa_cee_get_stats_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_cee_reset_stats(struct bfa_cee_s *cee, + bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg); + #endif /* __BFA_PORT_H__ */ diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index 16d9a5f61c1..aa8a0eaf91f 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -16,11 +16,13 @@ */ #include "bfad_drv.h" +#include "bfad_im.h" #include "bfa_plog.h" #include "bfa_cs.h" #include "bfa_modules.h" BFA_TRC_FILE(HAL, FCXP); +BFA_MODULE(fcdiag); BFA_MODULE(fcxp); BFA_MODULE(sgpg); BFA_MODULE(lps); @@ -113,11 +115,10 @@ static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, /* * forward declarations for LPS functions */ -static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, - u32 *dm_len); +static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *minfo, struct bfa_s *bfa); static void bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev); static void bfa_lps_detach(struct bfa_s *bfa); static void bfa_lps_start(struct bfa_s *bfa); @@ -125,6 +126,7 @@ static void bfa_lps_stop(struct bfa_s *bfa); static void bfa_lps_iocdisable(struct bfa_s *bfa); static void bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp); +static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count); static void bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp); static void bfa_lps_reqq_resume(void *lps_arg); @@ -430,51 +432,17 @@ bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, */ static void -claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi) -{ - u8 *dm_kva = NULL; - u64 dm_pa; - u32 buf_pool_sz; - - dm_kva = bfa_meminfo_dma_virt(mi); - dm_pa = bfa_meminfo_dma_phys(mi); - - buf_pool_sz = mod->req_pld_sz * mod->num_fcxps; - - /* - * Initialize the fcxp req payload list - */ - mod->req_pld_list_kva = dm_kva; - mod->req_pld_list_pa = dm_pa; - dm_kva += buf_pool_sz; - dm_pa += buf_pool_sz; - memset(mod->req_pld_list_kva, 0, buf_pool_sz); - - /* - * Initialize the fcxp rsp payload list - */ - buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps; - mod->rsp_pld_list_kva = dm_kva; - mod->rsp_pld_list_pa = dm_pa; - dm_kva += buf_pool_sz; - dm_pa += buf_pool_sz; - memset(mod->rsp_pld_list_kva, 0, buf_pool_sz); - - bfa_meminfo_dma_virt(mi) = dm_kva; - bfa_meminfo_dma_phys(mi) = dm_pa; -} - -static void -claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi) +claim_fcxps_mem(struct bfa_fcxp_mod_s *mod) { u16 i; struct bfa_fcxp_s *fcxp; - fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi); + fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod); memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps); INIT_LIST_HEAD(&mod->fcxp_free_q); INIT_LIST_HEAD(&mod->fcxp_active_q); + INIT_LIST_HEAD(&mod->fcxp_unused_q); mod->fcxp_list = fcxp; @@ -489,40 +457,53 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi) fcxp = fcxp + 1; } - bfa_meminfo_kva(mi) = (void *)fcxp; + bfa_mem_kva_curp(mod) = (void *)fcxp; } static void -bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, - u32 *dm_len) +bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, + struct bfa_s *bfa) { - u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs; + struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa); + struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa); + struct bfa_mem_dma_s *seg_ptr; + u16 nsegs, idx, per_seg_fcxp; + u16 num_fcxps = cfg->fwcfg.num_fcxp_reqs; + u32 per_fcxp_sz; - if (num_fcxp_reqs == 0) + if (num_fcxps == 0) return; - /* - * Account for req/rsp payload - */ - *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs; if (cfg->drvcfg.min_cfg) - *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs; + per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ; else - *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs; + per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ; - /* - * Account for fcxp structs - */ - *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs; + /* dma memory */ + nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz); + per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz); + + bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) { + if (num_fcxps >= per_seg_fcxp) { + num_fcxps -= per_seg_fcxp; + bfa_mem_dma_setup(minfo, seg_ptr, + per_seg_fcxp * per_fcxp_sz); + } else + bfa_mem_dma_setup(minfo, seg_ptr, + num_fcxps * per_fcxp_sz); + } + + /* kva memory */ + bfa_mem_kva_setup(minfo, fcxp_kva, + cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s)); } static void bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) + struct bfa_pcidev_s *pcidev) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); - memset(mod, 0, sizeof(struct bfa_fcxp_mod_s)); mod->bfa = bfa; mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs; @@ -535,8 +516,7 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, INIT_LIST_HEAD(&mod->wait_q); - claim_fcxp_req_rsp_mem(mod, meminfo); - claim_fcxps_mem(mod, meminfo); + claim_fcxps_mem(mod); } static void @@ -561,6 +541,9 @@ bfa_fcxp_iocdisable(struct bfa_s *bfa) struct bfa_fcxp_s *fcxp; struct list_head *qe, *qen; + /* Enqueue unused fcxp resources to free_q */ + list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q); + list_for_each_safe(qe, qen, &mod->fcxp_active_q) { fcxp = (struct bfa_fcxp_s *) qe; if (fcxp->caller == NULL) { @@ -750,23 +733,6 @@ hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp) } static void -hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa) -{ - union bfi_addr_u sga_zero = { {0} }; - - sge->sg_len = reqlen; - sge->flags = BFI_SGE_DATA_LAST; - bfa_dma_addr_set(sge[0].sga, req_pa); - bfa_sge_to_be(sge); - sge++; - - sge->sga = sga_zero; - sge->sg_len = reqlen; - sge->flags = BFI_SGE_PGDLEN; - bfa_sge_to_be(sge); -} - -static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp, struct fchs_s *fchs) { @@ -846,7 +812,7 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req) struct bfa_rport_s *rport = reqi->bfa_rport; bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ, - bfa_lpuid(bfa)); + bfa_fn_lpu(bfa)); send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag); if (rport) { @@ -860,7 +826,7 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req) } send_req->vf_id = cpu_to_be16(reqi->vf_id); - send_req->lp_tag = reqi->lp_tag; + send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag); send_req->class = reqi->class; send_req->rsp_timeout = rspi->rsp_timeout; send_req->cts = reqi->cts; @@ -873,18 +839,16 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req) * setup req sgles */ if (fcxp->use_ireqbuf == 1) { - hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len, + bfa_alen_set(&send_req->req_alen, reqi->req_tot_len, BFA_FCXP_REQ_PLD_PA(fcxp)); } else { if (fcxp->nreq_sgles > 0) { WARN_ON(fcxp->nreq_sgles != 1); - hal_fcxp_set_local_sges(send_req->req_sge, - reqi->req_tot_len, - fcxp->req_sga_cbfn(fcxp->caller, - 0)); + bfa_alen_set(&send_req->req_alen, reqi->req_tot_len, + fcxp->req_sga_cbfn(fcxp->caller, 0)); } else { WARN_ON(reqi->req_tot_len != 0); - hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); + bfa_alen_set(&send_req->rsp_alen, 0, 0); } } @@ -894,25 +858,23 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req) if (fcxp->use_irspbuf == 1) { WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ); - hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen, + bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen, BFA_FCXP_RSP_PLD_PA(fcxp)); - } else { if (fcxp->nrsp_sgles > 0) { WARN_ON(fcxp->nrsp_sgles != 1); - hal_fcxp_set_local_sges(send_req->rsp_sge, - rspi->rsp_maxlen, - fcxp->rsp_sga_cbfn(fcxp->caller, - 0)); + bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen, + fcxp->rsp_sga_cbfn(fcxp->caller, 0)); + } else { WARN_ON(rspi->rsp_maxlen != 0); - hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); + bfa_alen_set(&send_req->rsp_alen, 0, 0); } } hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs); - bfa_reqq_produce(bfa, BFA_REQQ_FCXP); + bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh); bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP)); bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP)); @@ -978,8 +940,8 @@ bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp) void *reqbuf; WARN_ON(fcxp->use_ireqbuf != 1); - reqbuf = ((u8 *)mod->req_pld_list_kva) + - fcxp->fcxp_tag * mod->req_pld_sz; + reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag, + mod->req_pld_sz + mod->rsp_pld_sz); return reqbuf; } @@ -1002,13 +964,15 @@ void * bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp) { struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; - void *rspbuf; + void *fcxp_buf; WARN_ON(fcxp->use_irspbuf != 1); - rspbuf = ((u8 *)mod->rsp_pld_list_kva) + - fcxp->fcxp_tag * mod->rsp_pld_sz; - return rspbuf; + fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag, + mod->req_pld_sz + mod->rsp_pld_sz); + + /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */ + return ((u8 *) fcxp_buf) + mod->req_pld_sz; } /* @@ -1181,6 +1145,18 @@ bfa_fcxp_get_maxrsp(struct bfa_s *bfa) return mod->rsp_pld_sz; } +void +bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw) +{ + struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); + struct list_head *qe; + int i; + + for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) { + bfa_q_deq_tail(&mod->fcxp_free_q, &qe); + list_add_tail(qe, &mod->fcxp_unused_q); + } +} /* * BFA LPS state machine functions @@ -1192,7 +1168,7 @@ bfa_fcxp_get_maxrsp(struct bfa_s *bfa) static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) { - bfa_trc(lps->bfa, lps->lp_tag); + bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { @@ -1244,7 +1220,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) { - bfa_trc(lps->bfa, lps->lp_tag); + bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { @@ -1278,6 +1254,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) break; case BFA_LPS_SM_OFFLINE: + case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); break; @@ -1297,7 +1274,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) { - bfa_trc(lps->bfa, lps->lp_tag); + bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { @@ -1306,6 +1283,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) break; case BFA_LPS_SM_OFFLINE: + case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); bfa_reqq_wcancel(&lps->wqe); break; @@ -1329,7 +1307,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event) { - bfa_trc(lps->bfa, lps->lp_tag); + bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { @@ -1378,7 +1356,7 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event) static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event) { - bfa_trc(lps->bfa, lps->lp_tag); + bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { @@ -1420,7 +1398,7 @@ bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event) static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event) { - bfa_trc(lps->bfa, lps->lp_tag); + bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { @@ -1430,6 +1408,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event) break; case BFA_LPS_SM_OFFLINE: + case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); break; @@ -1444,7 +1423,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event) static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event) { - bfa_trc(lps->bfa, lps->lp_tag); + bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { @@ -1454,6 +1433,7 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event) break; case BFA_LPS_SM_OFFLINE: + case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); bfa_reqq_wcancel(&lps->wqe); break; @@ -1473,13 +1453,17 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event) * return memory requirement */ static void -bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, - u32 *dm_len) +bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, + struct bfa_s *bfa) { + struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa); + if (cfg->drvcfg.min_cfg) - *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS; + bfa_mem_kva_setup(minfo, lps_kva, + sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS); else - *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS; + bfa_mem_kva_setup(minfo, lps_kva, + sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS); } /* @@ -1487,28 +1471,28 @@ bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, */ static void bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) + struct bfa_pcidev_s *pcidev) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; int i; - memset(mod, 0, sizeof(struct bfa_lps_mod_s)); mod->num_lps = BFA_LPS_MAX_LPORTS; if (cfg->drvcfg.min_cfg) mod->num_lps = BFA_LPS_MIN_LPORTS; else mod->num_lps = BFA_LPS_MAX_LPORTS; - mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo); + mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod); - bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s); + bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s); INIT_LIST_HEAD(&mod->lps_free_q); INIT_LIST_HEAD(&mod->lps_active_q); + INIT_LIST_HEAD(&mod->lps_login_q); for (i = 0; i < mod->num_lps; i++, lps++) { lps->bfa = bfa; - lps->lp_tag = (u8) i; + lps->bfa_tag = (u8) i; lps->reqq = BFA_REQQ_LPS; bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps); list_add_tail(&lps->qe, &mod->lps_free_q); @@ -1544,6 +1528,11 @@ bfa_lps_iocdisable(struct bfa_s *bfa) lps = (struct bfa_lps_s *) qe; bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); } + list_for_each_safe(qe, qen, &mod->lps_login_q) { + lps = (struct bfa_lps_s *) qe; + bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); + } + list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q); } /* @@ -1555,12 +1544,13 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp) struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; - WARN_ON(rsp->lp_tag >= mod->num_lps); - lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); + WARN_ON(rsp->bfa_tag >= mod->num_lps); + lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag); lps->status = rsp->status; switch (rsp->status) { case BFA_STATUS_OK: + lps->fw_tag = rsp->fw_tag; lps->fport = rsp->f_port; if (lps->fport) lps->lp_pid = rsp->lp_pid; @@ -1572,6 +1562,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp) lps->lp_mac = rsp->lp_mac; lps->brcd_switch = rsp->brcd_switch; lps->fcf_mac = rsp->fcf_mac; + lps->pr_bbscn = rsp->bb_scn; break; @@ -1586,14 +1577,46 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp) break; + case BFA_STATUS_VPORT_MAX: + if (!rsp->ext_status) + bfa_lps_no_res(lps, rsp->ext_status); + break; + default: /* Nothing to do with other status */ break; } + list_del(&lps->qe); + list_add_tail(&lps->qe, &mod->lps_active_q); bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); } +static void +bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count) +{ + struct bfa_s *bfa = first_lps->bfa; + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct list_head *qe, *qe_next; + struct bfa_lps_s *lps; + + bfa_trc(bfa, count); + + qe = bfa_q_next(first_lps); + + while (count && qe) { + qe_next = bfa_q_next(qe); + lps = (struct bfa_lps_s *)qe; + bfa_trc(bfa, lps->bfa_tag); + lps->status = first_lps->status; + list_del(&lps->qe); + list_add_tail(&lps->qe, &mod->lps_active_q); + bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); + qe = qe_next; + count--; + } +} + /* * Firmware logout response */ @@ -1603,8 +1626,8 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp) struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; - WARN_ON(rsp->lp_tag >= mod->num_lps); - lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); + WARN_ON(rsp->bfa_tag >= mod->num_lps); + lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag); bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); } @@ -1618,7 +1641,7 @@ bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl) struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; - lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag); + lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag); bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); } @@ -1653,23 +1676,27 @@ bfa_lps_free(struct bfa_lps_s *lps) static void bfa_lps_send_login(struct bfa_lps_s *lps) { + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa); struct bfi_lps_login_req_s *m; m = bfa_reqq_next(lps->bfa, lps->reqq); WARN_ON(!m); bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ, - bfa_lpuid(lps->bfa)); + bfa_fn_lpu(lps->bfa)); - m->lp_tag = lps->lp_tag; + m->bfa_tag = lps->bfa_tag; m->alpa = lps->alpa; m->pdu_size = cpu_to_be16(lps->pdusz); m->pwwn = lps->pwwn; m->nwwn = lps->nwwn; m->fdisc = lps->fdisc; m->auth_en = lps->auth_en; + m->bb_scn = lps->bb_scn; - bfa_reqq_produce(lps->bfa, lps->reqq); + bfa_reqq_produce(lps->bfa, lps->reqq, m->mh); + list_del(&lps->qe); + list_add_tail(&lps->qe, &mod->lps_login_q); } /* @@ -1684,11 +1711,11 @@ bfa_lps_send_logout(struct bfa_lps_s *lps) WARN_ON(!m); bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ, - bfa_lpuid(lps->bfa)); + bfa_fn_lpu(lps->bfa)); - m->lp_tag = lps->lp_tag; + m->fw_tag = lps->fw_tag; m->port_name = lps->pwwn; - bfa_reqq_produce(lps->bfa, lps->reqq); + bfa_reqq_produce(lps->bfa, lps->reqq, m->mh); } /* @@ -1703,11 +1730,11 @@ bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps) WARN_ON(!m); bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ, - bfa_lpuid(lps->bfa)); + bfa_fn_lpu(lps->bfa)); - m->lp_tag = lps->lp_tag; + m->fw_tag = lps->fw_tag; m->lp_pid = lps->lp_pid; - bfa_reqq_produce(lps->bfa, lps->reqq); + bfa_reqq_produce(lps->bfa, lps->reqq, m->mh); } /* @@ -1859,7 +1886,7 @@ bfa_lps_delete(struct bfa_lps_s *lps) */ void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz, - wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en) + wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn) { lps->uarg = uarg; lps->alpa = alpa; @@ -1868,6 +1895,7 @@ bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz, lps->nwwn = nwwn; lps->fdisc = BFA_FALSE; lps->auth_en = auth_en; + lps->bb_scn = bb_scn; bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); } @@ -1898,6 +1926,13 @@ bfa_lps_fdisclogo(struct bfa_lps_s *lps) bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); } +u8 +bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + + return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag; +} /* * Return lport services tag given the pid @@ -1911,7 +1946,7 @@ bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid) for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) { if (lps->lp_pid == pid) - return lps->lp_tag; + return lps->bfa_tag; } /* Return base port tag anyway */ @@ -1936,7 +1971,7 @@ bfa_lps_get_base_pid(struct bfa_s *bfa) void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid) { - bfa_trc(lps->bfa, lps->lp_tag); + bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, n2n_pid); lps->lp_pid = n2n_pid; @@ -1955,15 +1990,15 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m) msg.msg = m; switch (m->mhdr.msg_id) { - case BFI_LPS_H2I_LOGIN_RSP: + case BFI_LPS_I2H_LOGIN_RSP: bfa_lps_login_rsp(bfa, msg.login_rsp); break; - case BFI_LPS_H2I_LOGOUT_RSP: + case BFI_LPS_I2H_LOGOUT_RSP: bfa_lps_logout_rsp(bfa, msg.logout_rsp); break; - case BFI_LPS_H2I_CVL_EVENT: + case BFI_LPS_I2H_CVL_EVENT: bfa_lps_rx_cvl_event(bfa, msg.cvl_event); break; @@ -1973,6 +2008,24 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m) } } +static void +bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event) +{ + struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; + struct bfa_aen_entry_s *aen_entry; + + bfad_get_aen_entry(bfad, aen_entry); + if (!aen_entry) + return; + + aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa); + aen_entry->aen_data.port.pwwn = fcport->pwwn; + + /* Send the AEN notification */ + bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq, + BFA_AEN_CAT_PORT, event); +} + /* * FC PORT state machine functions */ @@ -2061,6 +2114,7 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port disabled: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_LINKUP: @@ -2121,6 +2175,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port disabled: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_STOP: @@ -2174,6 +2229,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port online: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE); + + /* If QoS is enabled and it is not online, send AEN */ + if (fcport->cfg.qos_enabled && + fcport->qos_attr.state != BFA_QOS_ONLINE) + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG); break; case BFA_FCPORT_SM_LINKDOWN: @@ -2200,6 +2261,7 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port disabled: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_STOP: @@ -2245,8 +2307,10 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port offline: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port disabled: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_LINKDOWN: @@ -2256,26 +2320,32 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); wwn2str(pwwn_buf, fcport->pwwn); - if (BFA_PORT_IS_DISABLED(fcport->bfa)) + if (BFA_PORT_IS_DISABLED(fcport->bfa)) { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port offline: WWN = %s\n", pwwn_buf); - else + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); + } else { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Base port (WWN = %s) " "lost fabric connectivity\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + } break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); bfa_fcport_reset_linkinfo(fcport); wwn2str(pwwn_buf, fcport->pwwn); - if (BFA_PORT_IS_DISABLED(fcport->bfa)) + if (BFA_PORT_IS_DISABLED(fcport->bfa)) { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port offline: WWN = %s\n", pwwn_buf); - else + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); + } else { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Base port (WWN = %s) " "lost fabric connectivity\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + } break; case BFA_FCPORT_SM_HWFAIL: @@ -2283,13 +2353,16 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, bfa_fcport_reset_linkinfo(fcport); bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); wwn2str(pwwn_buf, fcport->pwwn); - if (BFA_PORT_IS_DISABLED(fcport->bfa)) + if (BFA_PORT_IS_DISABLED(fcport->bfa)) { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port offline: WWN = %s\n", pwwn_buf); - else + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); + } else { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Base port (WWN = %s) " "lost fabric connectivity\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); + } break; default: @@ -2420,6 +2493,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port enabled: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); break; case BFA_FCPORT_SM_STOP: @@ -2474,6 +2548,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port enabled: WWN = %s\n", pwwn_buf); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); break; case BFA_FCPORT_SM_DISABLE: @@ -2777,10 +2852,12 @@ bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event) BFA_CACHELINE_SZ)) static void -bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, - u32 *dm_len) +bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, + struct bfa_s *bfa) { - *dm_len += FCPORT_STATS_DMA_SZ; + struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa); + + bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ); } static void @@ -2792,23 +2869,14 @@ bfa_fcport_qresume(void *cbarg) } static void -bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo) +bfa_fcport_mem_claim(struct bfa_fcport_s *fcport) { - u8 *dm_kva; - u64 dm_pa; - - dm_kva = bfa_meminfo_dma_virt(meminfo); - dm_pa = bfa_meminfo_dma_phys(meminfo); - - fcport->stats_kva = dm_kva; - fcport->stats_pa = dm_pa; - fcport->stats = (union bfa_fcport_stats_u *) dm_kva; + struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma; - dm_kva += FCPORT_STATS_DMA_SZ; - dm_pa += FCPORT_STATS_DMA_SZ; - - bfa_meminfo_dma_virt(meminfo) = dm_kva; - bfa_meminfo_dma_phys(meminfo) = dm_pa; + fcport->stats_kva = bfa_mem_dma_virt(fcport_dma); + fcport->stats_pa = bfa_mem_dma_phys(fcport_dma); + fcport->stats = (union bfa_fcport_stats_u *) + bfa_mem_dma_virt(fcport_dma); } /* @@ -2816,18 +2884,17 @@ bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo) */ static void bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) + struct bfa_pcidev_s *pcidev) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); struct bfa_port_cfg_s *port_cfg = &fcport->cfg; struct bfa_fcport_ln_s *ln = &fcport->ln; struct timeval tv; - memset(fcport, 0, sizeof(struct bfa_fcport_s)); fcport->bfa = bfa; ln->fcport = fcport; - bfa_fcport_mem_claim(fcport, meminfo); + bfa_fcport_mem_claim(fcport); bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); @@ -2848,6 +2915,9 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS; + INIT_LIST_HEAD(&fcport->stats_pending_q); + INIT_LIST_HEAD(&fcport->statsclr_pending_q); + bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport); } @@ -2921,6 +2991,7 @@ bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport) { fcport->speed = BFA_PORT_SPEED_UNKNOWN; fcport->topology = BFA_PORT_TOPOLOGY_NONE; + fcport->bbsc_op_state = BFA_FALSE; } /* @@ -2948,7 +3019,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport) } bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ, - bfa_lpuid(fcport->bfa)); + bfa_fn_lpu(fcport->bfa)); m->nwwn = fcport->nwwn; m->pwwn = fcport->pwwn; m->port_cfg = fcport->cfg; @@ -2962,7 +3033,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport) /* * queue I/O message to firmware */ - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh); return BFA_TRUE; } @@ -2991,13 +3062,13 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport) } bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ, - bfa_lpuid(fcport->bfa)); + bfa_fn_lpu(fcport->bfa)); m->msgtag = fcport->msgtag; /* * queue I/O message to firmware */ - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh); return BFA_TRUE; } @@ -3029,13 +3100,14 @@ bfa_fcport_send_txcredit(void *port_cbarg) } bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ, - bfa_lpuid(fcport->bfa)); + bfa_fn_lpu(fcport->bfa)); m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit); + m->bb_scn = fcport->cfg.bb_scn; /* * queue I/O message to firmware */ - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh); } static void @@ -3074,30 +3146,38 @@ bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d, static void __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete) { - struct bfa_fcport_s *fcport = cbarg; + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg; + struct bfa_cb_pending_q_s *cb; + struct list_head *qe, *qen; + union bfa_fcport_stats_u *ret; if (complete) { - if (fcport->stats_status == BFA_STATUS_OK) { - struct timeval tv; - - /* Swap FC QoS or FCoE stats */ - if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { - bfa_fcport_qos_stats_swap( - &fcport->stats_ret->fcqos, - &fcport->stats->fcqos); - } else { - bfa_fcport_fcoe_stats_swap( - &fcport->stats_ret->fcoe, - &fcport->stats->fcoe); - - do_gettimeofday(&tv); - fcport->stats_ret->fcoe.secs_reset = + struct timeval tv; + if (fcport->stats_status == BFA_STATUS_OK) + do_gettimeofday(&tv); + + list_for_each_safe(qe, qen, &fcport->stats_pending_q) { + bfa_q_deq(&fcport->stats_pending_q, &qe); + cb = (struct bfa_cb_pending_q_s *)qe; + if (fcport->stats_status == BFA_STATUS_OK) { + ret = (union bfa_fcport_stats_u *)cb->data; + /* Swap FC QoS or FCoE stats */ + if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) + bfa_fcport_qos_stats_swap(&ret->fcqos, + &fcport->stats->fcqos); + else { + bfa_fcport_fcoe_stats_swap(&ret->fcoe, + &fcport->stats->fcoe); + ret->fcoe.secs_reset = tv.tv_sec - fcport->stats_reset_time; + } } + bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe, + fcport->stats_status); } - fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); + fcport->stats_status = BFA_STATUS_OK; } else { - fcport->stats_busy = BFA_FALSE; + INIT_LIST_HEAD(&fcport->stats_pending_q); fcport->stats_status = BFA_STATUS_OK; } } @@ -3115,8 +3195,7 @@ bfa_fcport_stats_get_timeout(void *cbarg) } fcport->stats_status = BFA_STATUS_ETIMER; - bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get, - fcport); + __bfa_cb_fcport_stats_get(fcport, BFA_TRUE); } static void @@ -3139,14 +3218,16 @@ bfa_fcport_send_stats_get(void *cbarg) memset(msg, 0, sizeof(struct bfi_fcport_req_s)); bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ, - bfa_lpuid(fcport->bfa)); - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); + bfa_fn_lpu(fcport->bfa)); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh); } static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) { - struct bfa_fcport_s *fcport = cbarg; + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + struct bfa_cb_pending_q_s *cb; + struct list_head *qe, *qen; if (complete) { struct timeval tv; @@ -3156,10 +3237,15 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) */ do_gettimeofday(&tv); fcport->stats_reset_time = tv.tv_sec; - - fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); + list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) { + bfa_q_deq(&fcport->statsclr_pending_q, &qe); + cb = (struct bfa_cb_pending_q_s *)qe; + bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe, + fcport->stats_status); + } + fcport->stats_status = BFA_STATUS_OK; } else { - fcport->stats_busy = BFA_FALSE; + INIT_LIST_HEAD(&fcport->statsclr_pending_q); fcport->stats_status = BFA_STATUS_OK; } } @@ -3177,8 +3263,7 @@ bfa_fcport_stats_clr_timeout(void *cbarg) } fcport->stats_status = BFA_STATUS_ETIMER; - bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, - __bfa_cb_fcport_stats_clr, fcport); + __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE); } static void @@ -3201,8 +3286,8 @@ bfa_fcport_send_stats_clear(void *cbarg) memset(msg, 0, sizeof(struct bfi_fcport_req_s)); bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ, - bfa_lpuid(fcport->bfa)); - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); + bfa_fn_lpu(fcport->bfa)); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh); } /* @@ -3329,6 +3414,9 @@ bfa_fcport_init(struct bfa_s *bfa) fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); + if (bfa_fcport_is_pbcdisabled(bfa)) + bfa->modules.port.pbc_disabled = BFA_TRUE; + WARN_ON(!fcport->cfg.maxfrsize); WARN_ON(!fcport->cfg.rx_bbcredit); WARN_ON(!fcport->speed_sup); @@ -3371,6 +3459,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) fcport->use_flash_cfg = BFA_FALSE; } + if (fcport->cfg.qos_enabled) + fcport->qos_attr.state = BFA_QOS_OFFLINE; + else + fcport->qos_attr.state = BFA_QOS_DISABLED; + bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); } break; @@ -3395,28 +3488,26 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) /* * check for timer pop before processing the rsp */ - if (fcport->stats_busy == BFA_FALSE || - fcport->stats_status == BFA_STATUS_ETIMER) + if (list_empty(&fcport->stats_pending_q) || + (fcport->stats_status == BFA_STATUS_ETIMER)) break; bfa_timer_stop(&fcport->timer); fcport->stats_status = i2hmsg.pstatsget_rsp->status; - bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, - __bfa_cb_fcport_stats_get, fcport); + __bfa_cb_fcport_stats_get(fcport, BFA_TRUE); break; case BFI_FCPORT_I2H_STATS_CLEAR_RSP: /* * check for timer pop before processing the rsp */ - if (fcport->stats_busy == BFA_FALSE || - fcport->stats_status == BFA_STATUS_ETIMER) + if (list_empty(&fcport->statsclr_pending_q) || + (fcport->stats_status == BFA_STATUS_ETIMER)) break; bfa_timer_stop(&fcport->timer); fcport->stats_status = BFA_STATUS_OK; - bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, - __bfa_cb_fcport_stats_clr, fcport); + __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE); break; case BFI_FCPORT_I2H_ENABLE_AEN: @@ -3453,6 +3544,9 @@ bfa_fcport_enable(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + if (bfa_fcport_is_pbcdisabled(bfa)) + return BFA_STATUS_PBC; + if (bfa_ioc_is_disabled(&bfa->ioc)) return BFA_STATUS_IOC_DISABLED; @@ -3466,6 +3560,8 @@ bfa_fcport_enable(struct bfa_s *bfa) bfa_status_t bfa_fcport_disable(struct bfa_s *bfa) { + if (bfa_fcport_is_pbcdisabled(bfa)) + return BFA_STATUS_PBC; if (bfa_ioc_is_disabled(&bfa->ioc)) return BFA_STATUS_IOC_DISABLED; @@ -3474,6 +3570,21 @@ bfa_fcport_disable(struct bfa_s *bfa) return BFA_STATUS_OK; } +/* If PBC is disabled on port, return error */ +bfa_status_t +bfa_fcport_is_pbcdisabled(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; + + if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) { + bfa_trc(bfa, fcport->pwwn); + return BFA_STATUS_PBC; + } + return BFA_STATUS_OK; +} + /* * Configure port speed. */ @@ -3491,6 +3602,28 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed) return BFA_STATUS_UNSUPP_SPEED; } + /* For Mezz card, port speed entered needs to be checked */ + if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) { + if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) { + /* For CT2, 1G is not supported */ + if ((speed == BFA_PORT_SPEED_1GBPS) && + (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) + return BFA_STATUS_UNSUPP_SPEED; + + /* Already checked for Auto Speed and Max Speed supp */ + if (!(speed == BFA_PORT_SPEED_1GBPS || + speed == BFA_PORT_SPEED_2GBPS || + speed == BFA_PORT_SPEED_4GBPS || + speed == BFA_PORT_SPEED_8GBPS || + speed == BFA_PORT_SPEED_16GBPS || + speed == BFA_PORT_SPEED_AUTO)) + return BFA_STATUS_UNSUPP_SPEED; + } else { + if (speed != BFA_PORT_SPEED_10GBPS) + return BFA_STATUS_UNSUPP_SPEED; + } + } + fcport->cfg.speed = speed; return BFA_STATUS_OK; @@ -3624,11 +3757,14 @@ bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa) } void -bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) +bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); fcport->cfg.tx_bbcredit = (u8)tx_bbcredit; + fcport->cfg.bb_scn = bb_scn; + if (bb_scn) + fcport->bbsc_op_state = BFA_TRUE; bfa_fcport_send_txcredit(fcport); } @@ -3675,16 +3811,23 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) /* beacon attributes */ attr->beacon = fcport->beacon; attr->link_e2e_beacon = fcport->link_e2e_beacon; - attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled; - attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa); attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm); - if (bfa_ioc_is_disabled(&fcport->bfa->ioc)) - attr->port_state = BFA_PORT_ST_IOCDIS; - else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) - attr->port_state = BFA_PORT_ST_FWMISMATCH; + attr->bbsc_op_status = fcport->bbsc_op_state; + + /* PBC Disabled State */ + if (bfa_fcport_is_pbcdisabled(bfa)) + attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED; + else { + if (bfa_ioc_is_disabled(&fcport->bfa->ioc)) + attr->port_state = BFA_PORT_ST_IOCDIS; + else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) + attr->port_state = BFA_PORT_ST_FWMISMATCH; + else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc)) + attr->port_state = BFA_PORT_ST_ACQ_ADDR; + } /* FCoE vlan */ attr->fcoe_vlan = fcport->fcoe_vlan; @@ -3696,25 +3839,25 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) * Fetch port statistics (FCQoS or FCoE). */ bfa_status_t -bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, - bfa_cb_port_t cbfn, void *cbarg) +bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - if (fcport->stats_busy) { - bfa_trc(bfa, fcport->stats_busy); - return BFA_STATUS_DEVBUSY; - } + if (bfa_ioc_is_disabled(&bfa->ioc)) + return BFA_STATUS_IOC_DISABLED; - fcport->stats_busy = BFA_TRUE; - fcport->stats_ret = stats; - fcport->stats_cbfn = cbfn; - fcport->stats_cbarg = cbarg; + if (!list_empty(&fcport->statsclr_pending_q)) + return BFA_STATUS_DEVBUSY; - bfa_fcport_send_stats_get(fcport); + if (list_empty(&fcport->stats_pending_q)) { + list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q); + bfa_fcport_send_stats_get(fcport); + bfa_timer_start(bfa, &fcport->timer, + bfa_fcport_stats_get_timeout, + fcport, BFA_FCPORT_STATS_TOV); + } else + list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q); - bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout, - fcport, BFA_FCPORT_STATS_TOV); return BFA_STATUS_OK; } @@ -3722,27 +3865,25 @@ bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, * Reset port statistics (FCQoS or FCoE). */ bfa_status_t -bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg) +bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - if (fcport->stats_busy) { - bfa_trc(bfa, fcport->stats_busy); + if (!list_empty(&fcport->stats_pending_q)) return BFA_STATUS_DEVBUSY; - } - - fcport->stats_busy = BFA_TRUE; - fcport->stats_cbfn = cbfn; - fcport->stats_cbarg = cbarg; - bfa_fcport_send_stats_clear(fcport); + if (list_empty(&fcport->statsclr_pending_q)) { + list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q); + bfa_fcport_send_stats_clear(fcport); + bfa_timer_start(bfa, &fcport->timer, + bfa_fcport_stats_clr_timeout, + fcport, BFA_FCPORT_STATS_TOV); + } else + list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q); - bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout, - fcport, BFA_FCPORT_STATS_TOV); return BFA_STATUS_OK; } - /* * Fetch port attributes. */ @@ -3766,6 +3907,18 @@ bfa_fcport_is_ratelim(struct bfa_s *bfa) } /* + * Enable/Disable FAA feature in port config + */ +void +bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, state); + fcport->cfg.faa_state = state; +} + +/* * Get default minimum ratelim speed */ enum bfa_port_speed @@ -3778,6 +3931,22 @@ bfa_fcport_get_ratelim_speed(struct bfa_s *bfa) } +void +bfa_fcport_beacon(void *dev, bfa_boolean_t beacon, + bfa_boolean_t link_e2e_beacon) +{ + struct bfa_s *bfa = dev; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, beacon); + bfa_trc(bfa, link_e2e_beacon); + bfa_trc(bfa, fcport->beacon); + bfa_trc(bfa, fcport->link_e2e_beacon); + + fcport->beacon = beacon; + fcport->link_e2e_beacon = link_e2e_beacon; +} + bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa) { @@ -3797,6 +3966,14 @@ bfa_fcport_is_qos_enabled(struct bfa_s *bfa) return fcport->cfg.qos_enabled; } +bfa_boolean_t +bfa_fcport_is_trunk_enabled(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->cfg.trunked; +} + /* * Rport State machine functions */ @@ -4286,18 +4463,22 @@ bfa_rport_qresume(void *cbarg) } static void -bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, - u32 *dm_len) +bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, + struct bfa_s *bfa) { + struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa); + if (cfg->fwcfg.num_rports < BFA_RPORT_MIN) cfg->fwcfg.num_rports = BFA_RPORT_MIN; - *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s); + /* kva memory */ + bfa_mem_kva_setup(minfo, rport_kva, + cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s)); } static void bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) + struct bfa_pcidev_s *pcidev) { struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); struct bfa_rport_s *rp; @@ -4305,8 +4486,9 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, INIT_LIST_HEAD(&mod->rp_free_q); INIT_LIST_HEAD(&mod->rp_active_q); + INIT_LIST_HEAD(&mod->rp_unused_q); - rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo); + rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod); mod->rps_list = rp; mod->num_rports = cfg->fwcfg.num_rports; @@ -4331,7 +4513,7 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, /* * consume memory */ - bfa_meminfo_kva(meminfo) = (u8 *) rp; + bfa_mem_kva_curp(mod) = (u8 *) rp; } static void @@ -4356,6 +4538,9 @@ bfa_rport_iocdisable(struct bfa_s *bfa) struct bfa_rport_s *rport; struct list_head *qe, *qen; + /* Enqueue unused rport resources to free_q */ + list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q); + list_for_each_safe(qe, qen, &mod->rp_active_q) { rport = (struct bfa_rport_s *) qe; bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL); @@ -4399,11 +4584,11 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp) } bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ, - bfa_lpuid(rp->bfa)); + bfa_fn_lpu(rp->bfa)); m->bfa_handle = rp->rport_tag; m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz); m->pid = rp->rport_info.pid; - m->lp_tag = rp->rport_info.lp_tag; + m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag); m->local_pid = rp->rport_info.local_pid; m->fc_class = rp->rport_info.fc_class; m->vf_en = rp->rport_info.vf_en; @@ -4413,7 +4598,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp) /* * queue I/O message to firmware */ - bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); + bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh); return BFA_TRUE; } @@ -4432,13 +4617,13 @@ bfa_rport_send_fwdelete(struct bfa_rport_s *rp) } bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ, - bfa_lpuid(rp->bfa)); + bfa_fn_lpu(rp->bfa)); m->fw_handle = rp->fw_handle; /* * queue I/O message to firmware */ - bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); + bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh); return BFA_TRUE; } @@ -4457,14 +4642,14 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp) } bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ, - bfa_lpuid(rp->bfa)); + bfa_fn_lpu(rp->bfa)); m->fw_handle = rp->fw_handle; m->speed = (u8)rp->rport_info.speed; /* * queue I/O message to firmware */ - bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); + bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh); return BFA_TRUE; } @@ -4492,6 +4677,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m) rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle); rp->fw_handle = msg.create_rsp->fw_handle; rp->qos_attr = msg.create_rsp->qos_attr; + bfa_rport_set_lunmask(bfa, rp); WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); break; @@ -4499,6 +4685,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m) case BFI_RPORT_I2H_DELETE_RSP: rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle); WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); + bfa_rport_unset_lunmask(bfa, rp); bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); break; @@ -4514,7 +4701,18 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m) } } +void +bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw) +{ + struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); + struct list_head *qe; + int i; + for (i = 0; i < (mod->num_rports - num_rport_fw); i++) { + bfa_q_deq_tail(&mod->rp_free_q, &qe); + list_add_tail(qe, &mod->rp_unused_q); + } +} /* * bfa_rport_api @@ -4568,6 +4766,37 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed) bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED); } +/* Set Rport LUN Mask */ +void +bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp) +{ + struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa); + wwn_t lp_wwn, rp_wwn; + u8 lp_tag = (u8)rp->rport_info.lp_tag; + + rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn; + lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn; + + BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask = + rp->lun_mask = BFA_TRUE; + bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag); +} + +/* Unset Rport LUN mask */ +void +bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp) +{ + struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa); + wwn_t lp_wwn, rp_wwn; + + rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn; + lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn; + + BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask = + rp->lun_mask = BFA_FALSE; + bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, + BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID); +} /* * SGPG related functions @@ -4577,26 +4806,51 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed) * Compute and return memory needed by FCP(im) module. */ static void -bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, - u32 *dm_len) +bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, + struct bfa_s *bfa) { + struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa); + struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa); + struct bfa_mem_dma_s *seg_ptr; + u16 nsegs, idx, per_seg_sgpg, num_sgpg; + u32 sgpg_sz = sizeof(struct bfi_sgpg_s); + if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN) cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; + else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX) + cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX; - *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s); - *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s); -} + num_sgpg = cfg->drvcfg.num_sgpgs; + + nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz); + per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz); + + bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) { + if (num_sgpg >= per_seg_sgpg) { + num_sgpg -= per_seg_sgpg; + bfa_mem_dma_setup(minfo, seg_ptr, + per_seg_sgpg * sgpg_sz); + } else + bfa_mem_dma_setup(minfo, seg_ptr, + num_sgpg * sgpg_sz); + } + /* kva memory */ + bfa_mem_kva_setup(minfo, sgpg_kva, + cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s)); +} static void bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev) + struct bfa_pcidev_s *pcidev) { struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); - int i; struct bfa_sgpg_s *hsgpg; struct bfi_sgpg_s *sgpg; u64 align_len; + struct bfa_mem_dma_s *seg_ptr; + u32 sgpg_sz = sizeof(struct bfi_sgpg_s); + u16 i, idx, nsegs, per_seg_sgpg, num_sgpg; union { u64 pa; @@ -4608,39 +4862,45 @@ bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, bfa_trc(bfa, cfg->drvcfg.num_sgpgs); - mod->num_sgpgs = cfg->drvcfg.num_sgpgs; - mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo); - align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa); - mod->sgpg_arr_pa += align_len; - mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) + - align_len); - mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) + - align_len); - - hsgpg = mod->hsgpg_arr; - sgpg = mod->sgpg_arr; - sgpg_pa.pa = mod->sgpg_arr_pa; - mod->free_sgpgs = mod->num_sgpgs; - - WARN_ON(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)); - - for (i = 0; i < mod->num_sgpgs; i++) { - memset(hsgpg, 0, sizeof(*hsgpg)); - memset(sgpg, 0, sizeof(*sgpg)); - - hsgpg->sgpg = sgpg; - sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa); - hsgpg->sgpg_pa = sgpg_pa_tmp.addr; - list_add_tail(&hsgpg->qe, &mod->sgpg_q); - - hsgpg++; - sgpg++; - sgpg_pa.pa += sizeof(struct bfi_sgpg_s); + mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs; + + num_sgpg = cfg->drvcfg.num_sgpgs; + nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz); + + /* dma/kva mem claim */ + hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod); + + bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) { + + if (!bfa_mem_dma_virt(seg_ptr)) + break; + + align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) - + bfa_mem_dma_phys(seg_ptr); + + sgpg = (struct bfi_sgpg_s *) + (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len); + sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len; + WARN_ON(sgpg_pa.pa & (sgpg_sz - 1)); + + per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz; + + for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) { + memset(hsgpg, 0, sizeof(*hsgpg)); + memset(sgpg, 0, sizeof(*sgpg)); + + hsgpg->sgpg = sgpg; + sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa); + hsgpg->sgpg_pa = sgpg_pa_tmp.addr; + list_add_tail(&hsgpg->qe, &mod->sgpg_q); + + sgpg++; + hsgpg++; + sgpg_pa.pa += sgpg_sz; + } } - bfa_meminfo_kva(minfo) = (u8 *) hsgpg; - bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg; - bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa; + bfa_mem_kva_curp(mod) = (u8 *) hsgpg; } static void @@ -4782,31 +5042,13 @@ __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete) } static void -claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) -{ - u32 uf_pb_tot_sz; - - ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi); - ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi); - uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs), - BFA_DMA_ALIGN_SZ); - - bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz; - bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz; - - memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz); -} - -static void -claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) +claim_uf_post_msgs(struct bfa_uf_mod_s *ufm) { struct bfi_uf_buf_post_s *uf_bp_msg; - struct bfi_sge_s *sge; - union bfi_addr_u sga_zero = { {0} }; u16 i; u16 buf_len; - ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi); + ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm); uf_bp_msg = ufm->uf_buf_posts; for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs; @@ -4817,28 +5059,18 @@ claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) buf_len = sizeof(struct bfa_uf_buf_s); uf_bp_msg->buf_len = cpu_to_be16(buf_len); bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST, - bfa_lpuid(ufm->bfa)); - - sge = uf_bp_msg->sge; - sge[0].sg_len = buf_len; - sge[0].flags = BFI_SGE_DATA_LAST; - bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i)); - bfa_sge_to_be(sge); - - sge[1].sg_len = buf_len; - sge[1].flags = BFI_SGE_PGDLEN; - sge[1].sga = sga_zero; - bfa_sge_to_be(&sge[1]); + bfa_fn_lpu(ufm->bfa)); + bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i)); } /* * advance pointer beyond consumed memory */ - bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg; + bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg; } static void -claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) +claim_ufs(struct bfa_uf_mod_s *ufm) { u16 i; struct bfa_uf_s *uf; @@ -4846,7 +5078,7 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) /* * Claim block of memory for UF list */ - ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi); + ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm); /* * Initialize UFs and queue it in UF free queue @@ -4855,8 +5087,8 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) memset(uf, 0, sizeof(struct bfa_uf_s)); uf->bfa = ufm->bfa; uf->uf_tag = i; - uf->pb_len = sizeof(struct bfa_uf_buf_s); - uf->buf_kva = (void *)&ufm->uf_pbs_kva[i]; + uf->pb_len = BFA_PER_UF_DMA_SZ; + uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ); uf->buf_pa = ufm_pbs_pa(ufm, i); list_add_tail(&uf->qe, &ufm->uf_free_q); } @@ -4864,48 +5096,57 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) /* * advance memory pointer */ - bfa_meminfo_kva(mi) = (u8 *) uf; + bfa_mem_kva_curp(ufm) = (u8 *) uf; } static void -uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) +uf_mem_claim(struct bfa_uf_mod_s *ufm) { - claim_uf_pbs(ufm, mi); - claim_ufs(ufm, mi); - claim_uf_post_msgs(ufm, mi); + claim_ufs(ufm); + claim_uf_post_msgs(ufm); } static void -bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len) +bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, + struct bfa_s *bfa) { - u32 num_ufs = cfg->fwcfg.num_uf_bufs; - - /* - * dma-able memory for UF posted bufs - */ - *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs), - BFA_DMA_ALIGN_SZ); + struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); + struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa); + u32 num_ufs = cfg->fwcfg.num_uf_bufs; + struct bfa_mem_dma_s *seg_ptr; + u16 nsegs, idx, per_seg_uf = 0; + + nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ); + per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ); + + bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) { + if (num_ufs >= per_seg_uf) { + num_ufs -= per_seg_uf; + bfa_mem_dma_setup(minfo, seg_ptr, + per_seg_uf * BFA_PER_UF_DMA_SZ); + } else + bfa_mem_dma_setup(minfo, seg_ptr, + num_ufs * BFA_PER_UF_DMA_SZ); + } - /* - * kernel Virtual memory for UFs and UF buf post msg copies - */ - *ndm_len += sizeof(struct bfa_uf_s) * num_ufs; - *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs; + /* kva memory */ + bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs * + (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s))); } static void bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) + struct bfa_pcidev_s *pcidev) { struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); - memset(ufm, 0, sizeof(struct bfa_uf_mod_s)); ufm->bfa = bfa; ufm->num_ufs = cfg->fwcfg.num_uf_bufs; INIT_LIST_HEAD(&ufm->uf_free_q); INIT_LIST_HEAD(&ufm->uf_posted_q); + INIT_LIST_HEAD(&ufm->uf_unused_q); - uf_mem_claim(ufm, meminfo); + uf_mem_claim(ufm); } static void @@ -4939,7 +5180,7 @@ bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf) memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag], sizeof(struct bfi_uf_buf_post_s)); - bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP); + bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh); bfa_trc(ufm->bfa, uf->uf_tag); @@ -4963,11 +5204,15 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m) { struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); u16 uf_tag = m->buf_tag; - struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag]; struct bfa_uf_s *uf = &ufm->uf_list[uf_tag]; - u8 *buf = &uf_buf->d[0]; + struct bfa_uf_buf_s *uf_buf; + uint8_t *buf; struct fchs_s *fchs; + uf_buf = (struct bfa_uf_buf_s *) + bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len); + buf = &uf_buf->d[0]; + m->frm_len = be16_to_cpu(m->frm_len); m->xfr_len = be16_to_cpu(m->xfr_len); @@ -5008,6 +5253,9 @@ bfa_uf_iocdisable(struct bfa_s *bfa) struct bfa_uf_s *uf; struct list_head *qe, *qen; + /* Enqueue unused uf resources to free_q */ + list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q); + list_for_each_safe(qe, qen, &ufm->uf_posted_q) { uf = (struct bfa_uf_s *) qe; list_del(&uf->qe); @@ -5072,4 +5320,433 @@ bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) } } +void +bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw) +{ + struct bfa_uf_mod_s *mod = BFA_UF_MOD(bfa); + struct list_head *qe; + int i; + + for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) { + bfa_q_deq_tail(&mod->uf_free_q, &qe); + list_add_tail(qe, &mod->uf_unused_q); + } +} + +/* + * BFA fcdiag module + */ +#define BFA_DIAG_QTEST_TOV 1000 /* msec */ + +/* + * Set port status to busy + */ +static void +bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa); + + if (fcdiag->lb.lock) + fcport->diag_busy = BFA_TRUE; + else + fcport->diag_busy = BFA_FALSE; +} + +static void +bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, + struct bfa_s *bfa) +{ +} + +static void +bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_pcidev_s *pcidev) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + fcdiag->bfa = bfa; + fcdiag->trcmod = bfa->trcmod; + /* The common DIAG attach bfa_diag_attach() will do all memory claim */ +} + +static void +bfa_fcdiag_iocdisable(struct bfa_s *bfa) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + bfa_trc(fcdiag, fcdiag->lb.lock); + if (fcdiag->lb.lock) { + fcdiag->lb.status = BFA_STATUS_IOC_FAILURE; + fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status); + fcdiag->lb.lock = 0; + bfa_fcdiag_set_busy_status(fcdiag); + } +} + +static void +bfa_fcdiag_detach(struct bfa_s *bfa) +{ +} + +static void +bfa_fcdiag_start(struct bfa_s *bfa) +{ +} + +static void +bfa_fcdiag_stop(struct bfa_s *bfa) +{ +} + +static void +bfa_fcdiag_queuetest_timeout(void *cbarg) +{ + struct bfa_fcdiag_s *fcdiag = cbarg; + struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result; + + bfa_trc(fcdiag, fcdiag->qtest.all); + bfa_trc(fcdiag, fcdiag->qtest.count); + + fcdiag->qtest.timer_active = 0; + + res->status = BFA_STATUS_ETIMER; + res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count; + if (fcdiag->qtest.all) + res->queue = fcdiag->qtest.all; + + bfa_trc(fcdiag, BFA_STATUS_ETIMER); + fcdiag->qtest.status = BFA_STATUS_ETIMER; + fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status); + fcdiag->qtest.lock = 0; +} + +static bfa_status_t +bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag) +{ + u32 i; + struct bfi_diag_qtest_req_s *req; + + req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue); + if (!req) + return BFA_STATUS_DEVBUSY; + + /* build host command */ + bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST, + bfa_fn_lpu(fcdiag->bfa)); + + for (i = 0; i < BFI_LMSG_PL_WSZ; i++) + req->data[i] = QTEST_PAT_DEFAULT; + + bfa_trc(fcdiag, fcdiag->qtest.queue); + /* ring door bell */ + bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh); + return BFA_STATUS_OK; +} + +static void +bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag, + bfi_diag_qtest_rsp_t *rsp) +{ + struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result; + bfa_status_t status = BFA_STATUS_OK; + int i; + + /* Check timer, should still be active */ + if (!fcdiag->qtest.timer_active) { + bfa_trc(fcdiag, fcdiag->qtest.timer_active); + return; + } + + /* update count */ + fcdiag->qtest.count--; + + /* Check result */ + for (i = 0; i < BFI_LMSG_PL_WSZ; i++) { + if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) { + res->status = BFA_STATUS_DATACORRUPTED; + break; + } + } + + if (res->status == BFA_STATUS_OK) { + if (fcdiag->qtest.count > 0) { + status = bfa_fcdiag_queuetest_send(fcdiag); + if (status == BFA_STATUS_OK) + return; + else + res->status = status; + } else if (fcdiag->qtest.all > 0 && + fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) { + fcdiag->qtest.count = QTEST_CNT_DEFAULT; + fcdiag->qtest.queue++; + status = bfa_fcdiag_queuetest_send(fcdiag); + if (status == BFA_STATUS_OK) + return; + else + res->status = status; + } + } + + /* Stop timer when we comp all queue */ + if (fcdiag->qtest.timer_active) { + bfa_timer_stop(&fcdiag->qtest.timer); + fcdiag->qtest.timer_active = 0; + } + res->queue = fcdiag->qtest.queue; + res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count; + bfa_trc(fcdiag, res->count); + bfa_trc(fcdiag, res->status); + fcdiag->qtest.status = res->status; + fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status); + fcdiag->qtest.lock = 0; +} + +static void +bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag, + struct bfi_diag_lb_rsp_s *rsp) +{ + struct bfa_diag_loopback_result_s *res = fcdiag->lb.result; + + res->numtxmfrm = be32_to_cpu(rsp->res.numtxmfrm); + res->numosffrm = be32_to_cpu(rsp->res.numosffrm); + res->numrcvfrm = be32_to_cpu(rsp->res.numrcvfrm); + res->badfrminf = be32_to_cpu(rsp->res.badfrminf); + res->badfrmnum = be32_to_cpu(rsp->res.badfrmnum); + res->status = rsp->res.status; + fcdiag->lb.status = rsp->res.status; + bfa_trc(fcdiag, fcdiag->lb.status); + fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status); + fcdiag->lb.lock = 0; + bfa_fcdiag_set_busy_status(fcdiag); +} + +static bfa_status_t +bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag, + struct bfa_diag_loopback_s *loopback) +{ + struct bfi_diag_lb_req_s *lb_req; + + lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG); + if (!lb_req) + return BFA_STATUS_DEVBUSY; + + /* build host command */ + bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK, + bfa_fn_lpu(fcdiag->bfa)); + + lb_req->lb_mode = loopback->lb_mode; + lb_req->speed = loopback->speed; + lb_req->loopcnt = loopback->loopcnt; + lb_req->pattern = loopback->pattern; + + /* ring door bell */ + bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh); + + bfa_trc(fcdiag, loopback->lb_mode); + bfa_trc(fcdiag, loopback->speed); + bfa_trc(fcdiag, loopback->loopcnt); + bfa_trc(fcdiag, loopback->pattern); + return BFA_STATUS_OK; +} + +/* + * cpe/rme intr handler + */ +void +bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + + switch (msg->mhdr.msg_id) { + case BFI_DIAG_I2H_LOOPBACK: + bfa_fcdiag_loopback_comp(fcdiag, + (struct bfi_diag_lb_rsp_s *) msg); + break; + case BFI_DIAG_I2H_QTEST: + bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg); + break; + default: + bfa_trc(fcdiag, msg->mhdr.msg_id); + WARN_ON(1); + } +} + +/* + * Loopback test + * + * @param[in] *bfa - bfa data struct + * @param[in] opmode - port operation mode + * @param[in] speed - port speed + * @param[in] lpcnt - loop count + * @param[in] pat - pattern to build packet + * @param[in] *result - pt to bfa_diag_loopback_result_t data struct + * @param[in] cbfn - callback function + * @param[in] cbarg - callback functioin arg + * + * @param[out] + */ +bfa_status_t +bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode, + enum bfa_port_speed speed, u32 lpcnt, u32 pat, + struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn, + void *cbarg) +{ + struct bfa_diag_loopback_s loopback; + struct bfa_port_attr_s attr; + bfa_status_t status; + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + + if (!bfa_iocfc_is_operational(bfa)) + return BFA_STATUS_IOC_NON_OP; + + /* if port is PBC disabled, return error */ + if (bfa_fcport_is_pbcdisabled(bfa)) { + bfa_trc(fcdiag, BFA_STATUS_PBC); + return BFA_STATUS_PBC; + } + + if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) { + bfa_trc(fcdiag, opmode); + return BFA_STATUS_PORT_NOT_DISABLED; + } + + /* + * Check if input speed is supported by the port mode + */ + if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) { + if (!(speed == BFA_PORT_SPEED_1GBPS || + speed == BFA_PORT_SPEED_2GBPS || + speed == BFA_PORT_SPEED_4GBPS || + speed == BFA_PORT_SPEED_8GBPS || + speed == BFA_PORT_SPEED_16GBPS || + speed == BFA_PORT_SPEED_AUTO)) { + bfa_trc(fcdiag, speed); + return BFA_STATUS_UNSUPP_SPEED; + } + bfa_fcport_get_attr(bfa, &attr); + bfa_trc(fcdiag, attr.speed_supported); + if (speed > attr.speed_supported) + return BFA_STATUS_UNSUPP_SPEED; + } else { + if (speed != BFA_PORT_SPEED_10GBPS) { + bfa_trc(fcdiag, speed); + return BFA_STATUS_UNSUPP_SPEED; + } + } + + /* For Mezz card, port speed entered needs to be checked */ + if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) { + if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) { + if ((speed == BFA_PORT_SPEED_1GBPS) && + (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) + return BFA_STATUS_UNSUPP_SPEED; + if (!(speed == BFA_PORT_SPEED_1GBPS || + speed == BFA_PORT_SPEED_2GBPS || + speed == BFA_PORT_SPEED_4GBPS || + speed == BFA_PORT_SPEED_8GBPS || + speed == BFA_PORT_SPEED_16GBPS || + speed == BFA_PORT_SPEED_AUTO)) + return BFA_STATUS_UNSUPP_SPEED; + } else { + if (speed != BFA_PORT_SPEED_10GBPS) + return BFA_STATUS_UNSUPP_SPEED; + } + } + + /* check to see if there is another destructive diag cmd running */ + if (fcdiag->lb.lock) { + bfa_trc(fcdiag, fcdiag->lb.lock); + return BFA_STATUS_DEVBUSY; + } + + fcdiag->lb.lock = 1; + loopback.lb_mode = opmode; + loopback.speed = speed; + loopback.loopcnt = lpcnt; + loopback.pattern = pat; + fcdiag->lb.result = result; + fcdiag->lb.cbfn = cbfn; + fcdiag->lb.cbarg = cbarg; + memset(result, 0, sizeof(struct bfa_diag_loopback_result_s)); + bfa_fcdiag_set_busy_status(fcdiag); + + /* Send msg to fw */ + status = bfa_fcdiag_loopback_send(fcdiag, &loopback); + return status; +} + +/* + * DIAG queue test command + * + * @param[in] *bfa - bfa data struct + * @param[in] force - 1: don't do ioc op checking + * @param[in] queue - queue no. to test + * @param[in] *result - pt to bfa_diag_qtest_result_t data struct + * @param[in] cbfn - callback function + * @param[in] *cbarg - callback functioin arg + * + * @param[out] + */ +bfa_status_t +bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue, + struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn, + void *cbarg) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + bfa_status_t status; + bfa_trc(fcdiag, force); + bfa_trc(fcdiag, queue); + + if (!force && !bfa_iocfc_is_operational(bfa)) + return BFA_STATUS_IOC_NON_OP; + + /* check to see if there is another destructive diag cmd running */ + if (fcdiag->qtest.lock) { + bfa_trc(fcdiag, fcdiag->qtest.lock); + return BFA_STATUS_DEVBUSY; + } + + /* Initialization */ + fcdiag->qtest.lock = 1; + fcdiag->qtest.cbfn = cbfn; + fcdiag->qtest.cbarg = cbarg; + fcdiag->qtest.result = result; + fcdiag->qtest.count = QTEST_CNT_DEFAULT; + + /* Init test results */ + fcdiag->qtest.result->status = BFA_STATUS_OK; + fcdiag->qtest.result->count = 0; + + /* send */ + if (queue < BFI_IOC_MAX_CQS) { + fcdiag->qtest.result->queue = (u8)queue; + fcdiag->qtest.queue = (u8)queue; + fcdiag->qtest.all = 0; + } else { + fcdiag->qtest.result->queue = 0; + fcdiag->qtest.queue = 0; + fcdiag->qtest.all = 1; + } + status = bfa_fcdiag_queuetest_send(fcdiag); + + /* Start a timer */ + if (status == BFA_STATUS_OK) { + bfa_timer_start(bfa, &fcdiag->qtest.timer, + bfa_fcdiag_queuetest_timeout, fcdiag, + BFA_DIAG_QTEST_TOV); + fcdiag->qtest.timer_active = 1; + } + return status; +} +/* + * DIAG PLB is running + * + * @param[in] *bfa - bfa data struct + * + * @param[out] + */ +bfa_status_t +bfa_fcdiag_lb_is_running(struct bfa_s *bfa) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK; +} diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h index 5902a45c080..95adb86d376 100644 --- a/drivers/scsi/bfa/bfa_svc.h +++ b/drivers/scsi/bfa/bfa_svc.h @@ -26,6 +26,7 @@ * Scatter-gather DMA related defines */ #define BFA_SGPG_MIN (16) +#define BFA_SGPG_MAX (8192) /* * Alignment macro for SG page allocation @@ -54,17 +55,21 @@ struct bfa_sgpg_s { */ #define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1) +/* Max SGPG dma segs required */ +#define BFA_SGPG_DMA_SEGS \ + BFI_MEM_DMA_NSEGS(BFA_SGPG_MAX, (uint32_t)sizeof(struct bfi_sgpg_s)) + struct bfa_sgpg_mod_s { struct bfa_s *bfa; int num_sgpgs; /* number of SG pages */ int free_sgpgs; /* number of free SG pages */ - struct bfa_sgpg_s *hsgpg_arr; /* BFA SG page array */ - struct bfi_sgpg_s *sgpg_arr; /* actual SG page array */ - u64 sgpg_arr_pa; /* SG page array DMA addr */ struct list_head sgpg_q; /* queue of free SG pages */ struct list_head sgpg_wait_q; /* wait queue for SG pages */ + struct bfa_mem_dma_s dma_seg[BFA_SGPG_DMA_SEGS]; + struct bfa_mem_kva_s kva_seg; }; #define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod) +#define BFA_MEM_SGPG_KVA(__bfa) (&(BFA_SGPG_MOD(__bfa)->kva_seg)) bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs); @@ -79,26 +84,32 @@ void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe); * FCXP related defines */ #define BFA_FCXP_MIN (1) +#define BFA_FCXP_MAX (256) #define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256) #define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256) +/* Max FCXP dma segs required */ +#define BFA_FCXP_DMA_SEGS \ + BFI_MEM_DMA_NSEGS(BFA_FCXP_MAX, \ + (u32)BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ) + struct bfa_fcxp_mod_s { struct bfa_s *bfa; /* backpointer to BFA */ struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */ u16 num_fcxps; /* max num FCXP requests */ struct list_head fcxp_free_q; /* free FCXPs */ struct list_head fcxp_active_q; /* active FCXPs */ - void *req_pld_list_kva; /* list of FCXP req pld */ - u64 req_pld_list_pa; /* list of FCXP req pld */ - void *rsp_pld_list_kva; /* list of FCXP resp pld */ - u64 rsp_pld_list_pa; /* list of FCXP resp pld */ struct list_head wait_q; /* wait queue for free fcxp */ + struct list_head fcxp_unused_q; /* unused fcxps */ u32 req_pld_sz; u32 rsp_pld_sz; + struct bfa_mem_dma_s dma_seg[BFA_FCXP_DMA_SEGS]; + struct bfa_mem_kva_s kva_seg; }; #define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod) #define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag]) +#define BFA_MEM_FCXP_KVA(__bfa) (&(BFA_FCXP_MOD(__bfa)->kva_seg)) typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp, void *cb_arg, bfa_status_t req_status, @@ -206,13 +217,15 @@ struct bfa_fcxp_wqe_s { #define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs)) #define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp)) -#define BFA_FCXP_REQ_PLD_PA(_fcxp) \ - ((_fcxp)->fcxp_mod->req_pld_list_pa + \ - ((_fcxp)->fcxp_mod->req_pld_sz * (_fcxp)->fcxp_tag)) +#define BFA_FCXP_REQ_PLD_PA(_fcxp) \ + bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag, \ + (_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz) -#define BFA_FCXP_RSP_PLD_PA(_fcxp) \ - ((_fcxp)->fcxp_mod->rsp_pld_list_pa + \ - ((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag)) +/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */ +#define BFA_FCXP_RSP_PLD_PA(_fcxp) \ + (bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag, \ + (_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz) + \ + (_fcxp)->fcxp_mod->req_pld_sz) void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); @@ -238,10 +251,13 @@ struct bfa_rport_mod_s { struct bfa_rport_s *rps_list; /* list of rports */ struct list_head rp_free_q; /* free bfa_rports */ struct list_head rp_active_q; /* free bfa_rports */ + struct list_head rp_unused_q; /* unused bfa rports */ u16 num_rports; /* number of rports */ + struct bfa_mem_kva_s kva_seg; }; #define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod) +#define BFA_MEM_RPORT_KVA(__bfa) (&(BFA_RPORT_MOD(__bfa)->kva_seg)) /* * Convert rport tag to RPORT @@ -254,6 +270,7 @@ struct bfa_rport_mod_s { * protected functions */ void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); +void bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw); /* * BFA rport information. @@ -280,6 +297,7 @@ struct bfa_rport_s { void *rport_drv; /* fcs/driver rport object */ u16 fw_handle; /* firmware rport handle */ u16 rport_tag; /* BFA rport tag */ + u8 lun_mask; /* LUN mask flag */ struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */ @@ -298,7 +316,7 @@ struct bfa_rport_s { */ #define BFA_UF_MIN (4) - +#define BFA_UF_MAX (256) struct bfa_uf_s { struct list_head qe; /* queue element */ @@ -326,36 +344,41 @@ struct bfa_uf_s { */ typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf); +#define BFA_UF_BUFSZ (2 * 1024 + 256) + +struct bfa_uf_buf_s { + u8 d[BFA_UF_BUFSZ]; +}; + +#define BFA_PER_UF_DMA_SZ \ + (u32)BFA_ROUNDUP(sizeof(struct bfa_uf_buf_s), BFA_DMA_ALIGN_SZ) + +/* Max UF dma segs required */ +#define BFA_UF_DMA_SEGS BFI_MEM_DMA_NSEGS(BFA_UF_MAX, BFA_PER_UF_DMA_SZ) + struct bfa_uf_mod_s { struct bfa_s *bfa; /* back pointer to BFA */ struct bfa_uf_s *uf_list; /* array of UFs */ u16 num_ufs; /* num unsolicited rx frames */ struct list_head uf_free_q; /* free UFs */ struct list_head uf_posted_q; /* UFs posted to IOC */ - struct bfa_uf_buf_s *uf_pbs_kva; /* list UF bufs request pld */ - u64 uf_pbs_pa; /* phy addr for UF bufs */ + struct list_head uf_unused_q; /* unused UF's */ struct bfi_uf_buf_post_s *uf_buf_posts; /* pre-built UF post msgs */ bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */ void *cbarg; /* uf receive handler arg */ + struct bfa_mem_dma_s dma_seg[BFA_UF_DMA_SEGS]; + struct bfa_mem_kva_s kva_seg; }; #define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod) +#define BFA_MEM_UF_KVA(__bfa) (&(BFA_UF_MOD(__bfa)->kva_seg)) #define ufm_pbs_pa(_ufmod, _uftag) \ - ((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag)) + bfa_mem_get_dmabuf_pa(_ufmod, _uftag, BFA_PER_UF_DMA_SZ) void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); - -#define BFA_UF_BUFSZ (2 * 1024 + 256) - -/* - * @todo private - */ -struct bfa_uf_buf_s { - u8 d[BFA_UF_BUFSZ]; -}; - +void bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw); /* * LPS - bfa lport login/logout service interface @@ -364,7 +387,8 @@ struct bfa_lps_s { struct list_head qe; /* queue element */ struct bfa_s *bfa; /* parent bfa instance */ bfa_sm_t sm; /* finite state machine */ - u8 lp_tag; /* lport tag */ + u8 bfa_tag; /* lport tag */ + u8 fw_tag; /* lport fw tag */ u8 reqq; /* lport request queue */ u8 alpa; /* ALPA for loop topologies */ u32 lp_pid; /* lport port ID */ @@ -377,8 +401,11 @@ struct bfa_lps_s { bfa_status_t status; /* login status */ u16 pdusz; /* max receive PDU size */ u16 pr_bbcred; /* BB_CREDIT from peer */ + u8 pr_bbscn; /* BB_SCN from peer */ + u8 bb_scn; /* local BB_SCN */ u8 lsrjt_rsn; /* LSRJT reason */ u8 lsrjt_expl; /* LSRJT explanation */ + u8 lun_mask; /* LUN mask flag */ wwn_t pwwn; /* port wwn of lport */ wwn_t nwwn; /* node wwn of lport */ wwn_t pr_pwwn; /* port wwn of lport peer */ @@ -395,12 +422,15 @@ struct bfa_lps_s { struct bfa_lps_mod_s { struct list_head lps_free_q; struct list_head lps_active_q; + struct list_head lps_login_q; struct bfa_lps_s *lps_arr; int num_lps; + struct bfa_mem_kva_s kva_seg; }; #define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod) #define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag]) +#define BFA_MEM_LPS_KVA(__bfa) (&(BFA_LPS_MOD(__bfa)->kva_seg)) /* * external functions @@ -413,7 +443,6 @@ void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); */ #define BFA_FCPORT(_bfa) (&((_bfa)->modules.port)) -typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status); /* * Link notification data structure @@ -467,21 +496,22 @@ struct bfa_fcport_s { u8 *stats_kva; u64 stats_pa; union bfa_fcport_stats_u *stats; - union bfa_fcport_stats_u *stats_ret; /* driver stats location */ bfa_status_t stats_status; /* stats/statsclr status */ - bfa_boolean_t stats_busy; /* outstanding stats/statsclr */ + struct list_head stats_pending_q; + struct list_head statsclr_pending_q; bfa_boolean_t stats_qfull; u32 stats_reset_time; /* stats reset time stamp */ - bfa_cb_port_t stats_cbfn; /* driver callback function */ - void *stats_cbarg; /* *!< user callback arg */ bfa_boolean_t diag_busy; /* diag busy status */ bfa_boolean_t beacon; /* port beacon status */ bfa_boolean_t link_e2e_beacon; /* link beacon status */ + bfa_boolean_t bbsc_op_state; /* Cred recov Oper State */ struct bfa_fcport_trunk_s trunk; u16 fcoe_vlan; + struct bfa_mem_dma_s fcport_dma; }; #define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport) +#define BFA_MEM_FCPORT_DMA(__bfa) (&(BFA_FCPORT_MOD(__bfa)->fcport_dma)) /* * protected functions @@ -515,15 +545,19 @@ void bfa_fcport_event_register(struct bfa_s *bfa, bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); -void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit); +void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn); bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa); +void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon, + bfa_boolean_t link_e2e_beacon); bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa); bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, - union bfa_fcport_stats_u *stats, - bfa_cb_port_t cbfn, void *cbarg); -bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, - void *cbarg); + struct bfa_cb_pending_q_s *cb); +bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, + struct bfa_cb_pending_q_s *cb); bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa); +bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa); +bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); +void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state); /* * bfa rport API functions @@ -542,6 +576,19 @@ void bfa_cb_rport_qos_scn_prio(void *rport, struct bfa_rport_qos_attr_s new_qos_attr); /* + * Rport LUN masking related + */ +#define BFA_RPORT_TAG_INVALID 0xffff +#define BFA_LP_TAG_INVALID 0xff +void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp); +void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp); +bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp); +wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp); +struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id, + wwn_t *lpwwn, wwn_t rpwwn); +void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn); + +/* * bfa fcxp API functions */ struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa, @@ -577,6 +624,7 @@ void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp); u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp); u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa); +void bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw); static inline void * bfa_uf_get_frmbuf(struct bfa_uf_s *uf) @@ -606,11 +654,12 @@ struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa); void bfa_lps_delete(struct bfa_lps_s *lps); void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz, wwn_t pwwn, wwn_t nwwn, - bfa_boolean_t auth_en); + bfa_boolean_t auth_en, u8 bb_scn); void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn, wwn_t nwwn); void bfa_lps_fdisclogo(struct bfa_lps_s *lps); void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, u32 n2n_pid); +u8 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag); u32 bfa_lps_get_base_pid(struct bfa_s *bfa); u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid); void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status); @@ -618,4 +667,57 @@ void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status); void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg); void bfa_cb_lps_cvl_event(void *bfad, void *uarg); +/* FAA specific APIs */ +bfa_status_t bfa_faa_enable(struct bfa_s *bfa, + bfa_cb_iocfc_t cbfn, void *cbarg); +bfa_status_t bfa_faa_disable(struct bfa_s *bfa, + bfa_cb_iocfc_t cbfn, void *cbarg); +bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, + bfa_cb_iocfc_t cbfn, void *cbarg); + +/* + * FC DIAG data structure + */ +struct bfa_fcdiag_qtest_s { + struct bfa_diag_qtest_result_s *result; + bfa_cb_diag_t cbfn; + void *cbarg; + struct bfa_timer_s timer; + u32 status; + u32 count; + u8 lock; + u8 queue; + u8 all; + u8 timer_active; +}; + +struct bfa_fcdiag_lb_s { + bfa_cb_diag_t cbfn; + void *cbarg; + void *result; + bfa_boolean_t lock; + u32 status; +}; + +struct bfa_fcdiag_s { + struct bfa_s *bfa; /* Back pointer to BFA */ + struct bfa_trc_mod_s *trcmod; + struct bfa_fcdiag_lb_s lb; + struct bfa_fcdiag_qtest_s qtest; +}; + +#define BFA_FCDIAG_MOD(__bfa) (&(__bfa)->modules.fcdiag) + +void bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg); + +bfa_status_t bfa_fcdiag_loopback(struct bfa_s *bfa, + enum bfa_port_opmode opmode, + enum bfa_port_speed speed, u32 lpcnt, u32 pat, + struct bfa_diag_loopback_result_s *result, + bfa_cb_diag_t cbfn, void *cbarg); +bfa_status_t bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 ignore, + u32 queue, struct bfa_diag_qtest_result_s *result, + bfa_cb_diag_t cbfn, void *cbarg); +bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa); + #endif /* __BFA_SVC_H__ */ diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 59b5e9b61d7..66fb72531b3 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -56,14 +56,15 @@ int fdmi_enable = BFA_TRUE; int pcie_max_read_reqsz; int bfa_debugfs_enable = 1; int msix_disable_cb = 0, msix_disable_ct = 0; +int max_xfer_size = BFAD_MAX_SECTORS >> 1; /* Firmware releated */ -u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size; -u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc; +u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; +u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; -#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin" -#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin" -#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin" +#define BFAD_FW_FILE_CB "cbfw.bin" +#define BFAD_FW_FILE_CT "ctfw.bin" +#define BFAD_FW_FILE_CT2 "ct2fw.bin" static u32 *bfad_load_fwimg(struct pci_dev *pdev); static void bfad_free_fwimg(void); @@ -71,18 +72,18 @@ static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, u32 *bfi_image_size, char *fw_name); static const char *msix_name_ct[] = { + "ctrl", "cpe0", "cpe1", "cpe2", "cpe3", - "rme0", "rme1", "rme2", "rme3", - "ctrl" }; + "rme0", "rme1", "rme2", "rme3" }; static const char *msix_name_cb[] = { "cpe0", "cpe1", "cpe2", "cpe3", "rme0", "rme1", "rme2", "rme3", "eemc", "elpu0", "elpu1", "epss", "mlpu" }; -MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC); -MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA); -MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC); +MODULE_FIRMWARE(BFAD_FW_FILE_CB); +MODULE_FIRMWARE(BFAD_FW_FILE_CT); +MODULE_FIRMWARE(BFAD_FW_FILE_CT2); module_param(os_name, charp, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(os_name, "OS name of the hba host machine"); @@ -144,6 +145,9 @@ MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 " module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1," " Range[false:0|true:1]"); +module_param(max_xfer_size, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(max_xfer_size, "default=32MB," + " Range[64k|128k|256k|512k|1024k|2048k]"); static void bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event); @@ -527,28 +531,26 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) void bfad_hal_mem_release(struct bfad_s *bfad) { - int i; struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; - struct bfa_mem_elem_s *meminfo_elem; - - for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { - meminfo_elem = &hal_meminfo->meminfo[i]; - if (meminfo_elem->kva != NULL) { - switch (meminfo_elem->mem_type) { - case BFA_MEM_TYPE_KVA: - vfree(meminfo_elem->kva); - break; - case BFA_MEM_TYPE_DMA: - dma_free_coherent(&bfad->pcidev->dev, - meminfo_elem->mem_len, - meminfo_elem->kva, - (dma_addr_t) meminfo_elem->dma); - break; - default: - WARN_ON(1); - break; - } - } + struct bfa_mem_dma_s *dma_info, *dma_elem; + struct bfa_mem_kva_s *kva_info, *kva_elem; + struct list_head *dm_qe, *km_qe; + + dma_info = &hal_meminfo->dma_info; + kva_info = &hal_meminfo->kva_info; + + /* Iterate through the KVA meminfo queue */ + list_for_each(km_qe, &kva_info->qe) { + kva_elem = (struct bfa_mem_kva_s *) km_qe; + vfree(kva_elem->kva); + } + + /* Iterate through the DMA meminfo queue */ + list_for_each(dm_qe, &dma_info->qe) { + dma_elem = (struct bfa_mem_dma_s *) dm_qe; + dma_free_coherent(&bfad->pcidev->dev, + dma_elem->mem_len, dma_elem->kva, + (dma_addr_t) dma_elem->dma); } memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s)); @@ -563,15 +565,15 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg) bfa_cfg->fwcfg.num_ioim_reqs = num_ios; if (num_tms > 0) bfa_cfg->fwcfg.num_tskim_reqs = num_tms; - if (num_fcxps > 0) + if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX) bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps; - if (num_ufbufs > 0) + if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX) bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs; if (reqq_size > 0) bfa_cfg->drvcfg.num_reqq_elems = reqq_size; if (rspq_size > 0) bfa_cfg->drvcfg.num_rspq_elems = rspq_size; - if (num_sgpgs > 0) + if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX) bfa_cfg->drvcfg.num_sgpgs = num_sgpgs; /* @@ -591,85 +593,46 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg) bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad) { - int i; struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; - struct bfa_mem_elem_s *meminfo_elem; - dma_addr_t phys_addr; - void *kva; + struct bfa_mem_dma_s *dma_info, *dma_elem; + struct bfa_mem_kva_s *kva_info, *kva_elem; + struct list_head *dm_qe, *km_qe; bfa_status_t rc = BFA_STATUS_OK; - int retry_count = 0; - int reset_value = 1; - int min_num_sgpgs = 512; + dma_addr_t phys_addr; bfa_cfg_get_default(&bfad->ioc_cfg); - -retry: bfad_update_hal_cfg(&bfad->ioc_cfg); bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs; - bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo); - - for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { - meminfo_elem = &hal_meminfo->meminfo[i]; - switch (meminfo_elem->mem_type) { - case BFA_MEM_TYPE_KVA: - kva = vmalloc(meminfo_elem->mem_len); - if (kva == NULL) { - bfad_hal_mem_release(bfad); - rc = BFA_STATUS_ENOMEM; - goto ext; - } - memset(kva, 0, meminfo_elem->mem_len); - meminfo_elem->kva = kva; - break; - case BFA_MEM_TYPE_DMA: - kva = dma_alloc_coherent(&bfad->pcidev->dev, - meminfo_elem->mem_len, &phys_addr, GFP_KERNEL); - if (kva == NULL) { - bfad_hal_mem_release(bfad); - /* - * If we cannot allocate with default - * num_sgpages try with half the value. - */ - if (num_sgpgs > min_num_sgpgs) { - printk(KERN_INFO - "bfad[%d]: memory allocation failed" - " with num_sgpgs: %d\n", - bfad->inst_no, num_sgpgs); - nextLowerInt(&num_sgpgs); - printk(KERN_INFO - "bfad[%d]: trying to allocate memory" - " with num_sgpgs: %d\n", - bfad->inst_no, num_sgpgs); - retry_count++; - goto retry; - } else { - if (num_sgpgs_parm > 0) - num_sgpgs = num_sgpgs_parm; - else { - reset_value = - (1 << retry_count); - num_sgpgs *= reset_value; - } - rc = BFA_STATUS_ENOMEM; - goto ext; - } - } - - if (num_sgpgs_parm > 0) - num_sgpgs = num_sgpgs_parm; - else { - reset_value = (1 << retry_count); - num_sgpgs *= reset_value; - } - - memset(kva, 0, meminfo_elem->mem_len); - meminfo_elem->kva = kva; - meminfo_elem->dma = phys_addr; - break; - default: - break; + bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa); + + dma_info = &hal_meminfo->dma_info; + kva_info = &hal_meminfo->kva_info; + + /* Iterate through the KVA meminfo queue */ + list_for_each(km_qe, &kva_info->qe) { + kva_elem = (struct bfa_mem_kva_s *) km_qe; + kva_elem->kva = vmalloc(kva_elem->mem_len); + if (kva_elem->kva == NULL) { + bfad_hal_mem_release(bfad); + rc = BFA_STATUS_ENOMEM; + goto ext; + } + memset(kva_elem->kva, 0, kva_elem->mem_len); + } + /* Iterate through the DMA meminfo queue */ + list_for_each(dm_qe, &dma_info->qe) { + dma_elem = (struct bfa_mem_dma_s *) dm_qe; + dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev, + dma_elem->mem_len, + &phys_addr, GFP_KERNEL); + if (dma_elem->kva == NULL) { + bfad_hal_mem_release(bfad); + rc = BFA_STATUS_ENOMEM; + goto ext; } + dma_elem->dma = phys_addr; + memset(dma_elem->kva, 0, dma_elem->mem_len); } ext: return rc; @@ -780,13 +743,17 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) pci_set_master(pdev); - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev); goto out_release_region; } + } bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); + bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2)); if (bfad->pci_bar0_kva == NULL) { printk(KERN_ERR "Fail to map bar0\n"); @@ -797,6 +764,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn); bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva; bfad->hal_pcidev.device_id = pdev->device; + bfad->hal_pcidev.ssid = pdev->subsystem_device; bfad->pci_name = pci_name(pdev); bfad->pci_attr.vendor_id = pdev->vendor; @@ -868,6 +836,7 @@ void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) { pci_iounmap(pdev, bfad->pci_bar0_kva); + pci_iounmap(pdev, bfad->pci_bar2_kva); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); @@ -908,12 +877,29 @@ bfad_drv_init(struct bfad_s *bfad) bfad->bfa_fcs.trcmod = bfad->trcmod; bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); bfad->bfa_fcs.fdmi_enabled = fdmi_enable; + bfa_fcs_init(&bfad->bfa_fcs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad->bfad_flags |= BFAD_DRV_INIT_DONE; + /* configure base port */ + rc = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); + if (rc != BFA_STATUS_OK) + goto out_cfg_pport_fail; + return BFA_STATUS_OK; +out_cfg_pport_fail: + /* fcs exit - on cfg pport failure */ + spin_lock_irqsave(&bfad->bfad_lock, flags); + init_completion(&bfad->comp); + bfad->pport.flags |= BFAD_PORT_DELETE; + bfa_fcs_exit(&bfad->bfa_fcs); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + wait_for_completion(&bfad->comp); + /* bfa detach - free hal memory */ + bfa_detach(&bfad->bfa); + bfad_hal_mem_release(bfad); out_hal_mem_alloc_failure: return BFA_STATUS_FAILED; } @@ -945,6 +931,7 @@ bfad_drv_start(struct bfad_s *bfad) spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_iocfc_start(&bfad->bfa); + bfa_fcs_pbc_vport_init(&bfad->bfa_fcs); bfa_fcs_fabric_modstart(&bfad->bfa_fcs); bfad->bfad_flags |= BFAD_HAL_START_DONE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); @@ -1032,6 +1019,12 @@ bfad_start_ops(struct bfad_s *bfad) { struct bfad_vport_s *vport, *vport_new; struct bfa_fcs_driver_info_s driver_info; + /* Limit min/max. xfer size to [64k-32MB] */ + if (max_xfer_size < BFAD_MIN_SECTORS >> 1) + max_xfer_size = BFAD_MIN_SECTORS >> 1; + if (max_xfer_size > BFAD_MAX_SECTORS >> 1) + max_xfer_size = BFAD_MAX_SECTORS >> 1; + /* Fill the driver_info info to fcs*/ memset(&driver_info, 0, sizeof(driver_info)); strncpy(driver_info.version, BFAD_DRIVER_VERSION, @@ -1049,19 +1042,19 @@ bfad_start_ops(struct bfad_s *bfad) { strncpy(driver_info.os_device_name, bfad->pci_name, sizeof(driver_info.os_device_name - 1)); - /* FCS INIT */ + /* FCS driver info init */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); - bfa_fcs_init(&bfad->bfa_fcs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); - retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); - if (retval != BFA_STATUS_OK) { - if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) - bfa_sm_set_state(bfad, bfad_sm_failed); - bfad_stop(bfad); - return BFA_STATUS_FAILED; - } + /* + * FCS update cfg - reset the pwwn/nwwn of fabric base logical port + * with values learned during bfa_init firmware GETATTR REQ. + */ + bfa_fcs_update_cfg(&bfad->bfa_fcs); + + /* Setup fc host fixed attribute if the lk supports */ + bfad_fc_host_init(bfad->pport.im_port); /* BFAD level FC4 IM specific resource allocation */ retval = bfad_im_probe(bfad); @@ -1233,8 +1226,8 @@ bfad_install_msix_handler(struct bfad_s *bfad) for (i = 0; i < bfad->nvec; i++) { sprintf(bfad->msix_tab[i].name, "bfa-%s-%s", bfad->pci_name, - ((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ? - msix_name_ct[i] : msix_name_cb[i])); + ((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ? + msix_name_cb[i] : msix_name_ct[i])); error = request_irq(bfad->msix_tab[i].msix.vector, (irq_handler_t) bfad_msix, 0, @@ -1248,6 +1241,9 @@ bfad_install_msix_handler(struct bfad_s *bfad) free_irq(bfad->msix_tab[j].msix.vector, &bfad->msix_tab[j]); + bfad->bfad_flags &= ~BFAD_MSIX_ON; + pci_disable_msix(bfad->pcidev); + return 1; } } @@ -1265,6 +1261,7 @@ bfad_setup_intr(struct bfad_s *bfad) u32 mask = 0, i, num_bit = 0, max_bit = 0; struct msix_entry msix_entries[MAX_MSIX_ENTRY]; struct pci_dev *pdev = bfad->pcidev; + u16 reg; /* Call BFA to get the msix map for this PCI function. */ bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); @@ -1272,8 +1269,8 @@ bfad_setup_intr(struct bfad_s *bfad) /* Set up the msix entry table */ bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); - if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) || - (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) { + if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) || + (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) { error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); if (error) { @@ -1294,6 +1291,13 @@ bfad_setup_intr(struct bfad_s *bfad) goto line_based; } + /* Disable INTX in MSI-X mode */ + pci_read_config_word(pdev, PCI_COMMAND, ®); + + if (!(reg & PCI_COMMAND_INTX_DISABLE)) + pci_write_config_word(pdev, PCI_COMMAND, + reg | PCI_COMMAND_INTX_DISABLE); + /* Save the vectors */ for (i = 0; i < bfad->nvec; i++) { bfa_trc(bfad, msix_entries[i].vector); @@ -1315,6 +1319,7 @@ line_based: /* Enable interrupt handler failed */ return 1; } + bfad->bfad_flags |= BFAD_INTX_ON; return error; } @@ -1331,7 +1336,7 @@ bfad_remove_intr(struct bfad_s *bfad) pci_disable_msix(bfad->pcidev); bfad->bfad_flags &= ~BFAD_MSIX_ON; - } else { + } else if (bfad->bfad_flags & BFAD_INTX_ON) { free_irq(bfad->pcidev->irq, bfad); } } @@ -1343,7 +1348,7 @@ int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { struct bfad_s *bfad; - int error = -ENODEV, retval; + int error = -ENODEV, retval, i; /* For single port cards - only claim function 0 */ if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) && @@ -1367,6 +1372,12 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) bfa_trc_init(bfad->trcmod); bfa_trc(bfad, bfad_inst); + /* AEN INIT */ + INIT_LIST_HEAD(&bfad->free_aen_q); + INIT_LIST_HEAD(&bfad->active_aen_q); + for (i = 0; i < BFA_AEN_MAX_ENTRY; i++) + list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q); + if (!(bfad_load_fwimg(pdev))) { kfree(bfad->trcmod); goto out_alloc_trace_failure; @@ -1501,6 +1512,14 @@ struct pci_device_id bfad_id_table[] = { .class = (PCI_CLASS_SERIAL_FIBER << 8), .class_mask = ~0, }, + { + .vendor = BFA_PCI_VENDOR_ID_BROCADE, + .device = BFA_PCI_DEVICE_ID_CT2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_SERIAL_FIBER << 8), + .class_mask = ~0, + }, {0, 0}, }; @@ -1594,33 +1613,33 @@ out: static u32 * bfad_load_fwimg(struct pci_dev *pdev) { - if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) { - if (bfi_image_ct_fc_size == 0) - bfad_read_firmware(pdev, &bfi_image_ct_fc, - &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC); - return bfi_image_ct_fc; - } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) { - if (bfi_image_ct_cna_size == 0) - bfad_read_firmware(pdev, &bfi_image_ct_cna, - &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA); - return bfi_image_ct_cna; + if (pdev->device == BFA_PCI_DEVICE_ID_CT2) { + if (bfi_image_ct2_size == 0) + bfad_read_firmware(pdev, &bfi_image_ct2, + &bfi_image_ct2_size, BFAD_FW_FILE_CT2); + return bfi_image_ct2; + } else if (bfa_asic_id_ct(pdev->device)) { + if (bfi_image_ct_size == 0) + bfad_read_firmware(pdev, &bfi_image_ct, + &bfi_image_ct_size, BFAD_FW_FILE_CT); + return bfi_image_ct; } else { - if (bfi_image_cb_fc_size == 0) - bfad_read_firmware(pdev, &bfi_image_cb_fc, - &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC); - return bfi_image_cb_fc; + if (bfi_image_cb_size == 0) + bfad_read_firmware(pdev, &bfi_image_cb, + &bfi_image_cb_size, BFAD_FW_FILE_CB); + return bfi_image_cb; } } static void bfad_free_fwimg(void) { - if (bfi_image_ct_fc_size && bfi_image_ct_fc) - vfree(bfi_image_ct_fc); - if (bfi_image_ct_cna_size && bfi_image_ct_cna) - vfree(bfi_image_ct_cna); - if (bfi_image_cb_fc_size && bfi_image_cb_fc) - vfree(bfi_image_cb_fc); + if (bfi_image_ct2_size && bfi_image_ct2) + vfree(bfi_image_ct2); + if (bfi_image_ct_size && bfi_image_ct) + vfree(bfi_image_ct); + if (bfi_image_cb_size && bfi_image_cb) + vfree(bfi_image_cb); } module_init(bfad_init); diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index a94ea423543..9d95844ab46 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -218,6 +218,9 @@ bfad_im_get_host_speed(struct Scsi_Host *shost) case BFA_PORT_SPEED_10GBPS: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; + case BFA_PORT_SPEED_16GBPS: + fc_host_speed(shost) = FC_PORTSPEED_16GBIT; + break; case BFA_PORT_SPEED_8GBPS: fc_host_speed(shost) = FC_PORTSPEED_8GBIT; break; @@ -580,6 +583,8 @@ struct fc_function_template bfad_im_fc_function_template = { .vport_create = bfad_im_vport_create, .vport_delete = bfad_im_vport_delete, .vport_disable = bfad_im_vport_disable, + .bsg_request = bfad_im_bsg_request, + .bsg_timeout = bfad_im_bsg_timeout, }; struct fc_function_template bfad_im_vport_fc_function_template = { @@ -674,8 +679,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr, struct bfad_s *bfad = im_port->bfad; char model[BFA_ADAPTER_MODEL_NAME_LEN]; char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; + int nports = 0; bfa_get_adapter_model(&bfad->bfa, model); + nports = bfa_get_nports(&bfad->bfa); if (!strcmp(model, "Brocade-425")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 4Gbps PCIe dual port FC HBA"); @@ -684,10 +691,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr, "Brocade 8Gbps PCIe dual port FC HBA"); else if (!strcmp(model, "Brocade-42B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "HP 4Gbps PCIe dual port FC HBA"); + "Brocade 4Gbps PCIe dual port FC HBA for HP"); else if (!strcmp(model, "Brocade-82B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "HP 8Gbps PCIe dual port FC HBA"); + "Brocade 8Gbps PCIe dual port FC HBA for HP"); else if (!strcmp(model, "Brocade-1010")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 10Gbps single port CNA"); @@ -696,7 +703,7 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr, "Brocade 10Gbps dual port CNA"); else if (!strcmp(model, "Brocade-1007")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 10Gbps CNA"); + "Brocade 10Gbps CNA for IBM Blade Center"); else if (!strcmp(model, "Brocade-415")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 4Gbps PCIe single port FC HBA"); @@ -705,17 +712,45 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr, "Brocade 8Gbps PCIe single port FC HBA"); else if (!strcmp(model, "Brocade-41B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "HP 4Gbps PCIe single port FC HBA"); + "Brocade 4Gbps PCIe single port FC HBA for HP"); else if (!strcmp(model, "Brocade-81B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "HP 8Gbps PCIe single port FC HBA"); + "Brocade 8Gbps PCIe single port FC HBA for HP"); else if (!strcmp(model, "Brocade-804")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "HP Bladesystem C-class 8Gbps FC HBA"); - else if (!strcmp(model, "Brocade-902")) + "Brocade 8Gbps FC HBA for HP Bladesystem C-class"); + else if (!strcmp(model, "Brocade-902") || + !strcmp(model, "Brocade-1741")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 10Gbps CNA"); - else + "Brocade 10Gbps CNA for Dell M-Series Blade Servers"); + else if (strstr(model, "Brocade-1560")) { + if (nports == 1) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 16Gbps PCIe single port FC HBA"); + else + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 16Gbps PCIe dual port FC HBA"); + } else if (strstr(model, "Brocade-1710")) { + if (nports == 1) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 10Gbps single port CNA"); + else + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 10Gbps dual port CNA"); + } else if (strstr(model, "Brocade-1860")) { + if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc)) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 10Gbps single port CNA"); + else if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 16Gbps PCIe single port FC HBA"); + else if (nports == 2 && bfa_ioc_is_cna(&bfad->bfa.ioc)) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 10Gbps dual port CNA"); + else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 16Gbps PCIe dual port FC HBA"); + } else snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Invalid Model"); diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c new file mode 100644 index 00000000000..06fc00caeb4 --- /dev/null +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -0,0 +1,3235 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/uaccess.h> +#include "bfad_drv.h" +#include "bfad_im.h" +#include "bfad_bsg.h" + +BFA_TRC_FILE(LDRV, BSG); + +int +bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + int rc = 0; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + /* If IOC is not in disabled state - return */ + if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_IOC_FAILURE; + return rc; + } + + init_completion(&bfad->enable_comp); + bfa_iocfc_enable(&bfad->bfa); + iocmd->status = BFA_STATUS_OK; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + wait_for_completion(&bfad->enable_comp); + + return rc; +} + +int +bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + int rc = 0; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (bfad->disable_active) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return -EBUSY; + } + + bfad->disable_active = BFA_TRUE; + init_completion(&bfad->disable_comp); + bfa_iocfc_disable(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + wait_for_completion(&bfad->disable_comp); + bfad->disable_active = BFA_FALSE; + iocmd->status = BFA_STATUS_OK; + + return rc; +} + +static int +bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd) +{ + int i; + struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd; + struct bfad_im_port_s *im_port; + struct bfa_port_attr_s pattr; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_fcport_get_attr(&bfad->bfa, &pattr); + iocmd->nwwn = pattr.nwwn; + iocmd->pwwn = pattr.pwwn; + iocmd->ioc_type = bfa_get_type(&bfad->bfa); + iocmd->mac = bfa_get_mac(&bfad->bfa); + iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa); + bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum); + iocmd->factorynwwn = pattr.factorynwwn; + iocmd->factorypwwn = pattr.factorypwwn; + iocmd->bfad_num = bfad->inst_no; + im_port = bfad->pport.im_port; + iocmd->host = im_port->shost->host_no; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + strcpy(iocmd->name, bfad->adapter_name); + strcpy(iocmd->port_name, bfad->port_name); + strcpy(iocmd->hwpath, bfad->pci_name); + + /* set adapter hw path */ + strcpy(iocmd->adapter_hwpath, bfad->pci_name); + i = strlen(iocmd->adapter_hwpath) - 1; + while (iocmd->adapter_hwpath[i] != '.') + i--; + iocmd->adapter_hwpath[i] = '\0'; + iocmd->status = BFA_STATUS_OK; + return 0; +} + +static int +bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + /* fill in driver attr info */ + strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME); + strncpy(iocmd->ioc_attr.driver_attr.driver_ver, + BFAD_DRIVER_VERSION, BFA_VERSION_LEN); + strcpy(iocmd->ioc_attr.driver_attr.fw_ver, + iocmd->ioc_attr.adapter_attr.fw_ver); + strcpy(iocmd->ioc_attr.driver_attr.bios_ver, + iocmd->ioc_attr.adapter_attr.optrom_ver); + + /* copy chip rev info first otherwise it will be overwritten */ + memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev, + sizeof(bfad->pci_attr.chip_rev)); + memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr, + sizeof(struct bfa_ioc_pci_attr_s)); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +int +bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd; + + bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats); + iocmd->status = BFA_STATUS_OK; + return 0; +} + +int +bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_ioc_fwstats_s *iocmd = + (struct bfa_bsg_ioc_fwstats_s *)cmd; + void *iocmd_bufptr; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_ioc_fwstats_s), + sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + goto out; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + goto out; + } +out: + bfa_trc(bfad, 0x6666); + return 0; +} + +int +bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + + if (v_cmd == IOCMD_IOC_RESET_STATS) { + bfa_ioc_clear_stats(&bfad->bfa); + iocmd->status = BFA_STATUS_OK; + } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) { + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + } + + return 0; +} + +int +bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +{ + struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd; + + if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME) + strcpy(bfad->adapter_name, iocmd->name); + else if (v_cmd == IOCMD_IOC_SET_PORT_NAME) + strcpy(bfad->port_name, iocmd->name); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +int +bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd; + + iocmd->status = BFA_STATUS_OK; + bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr); + + return 0; +} + +int +bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +int +bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_port_enable(&bfad->bfa.modules.port, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + return 0; + } + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + return 0; +} + +int +bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_port_disable(&bfad->bfa.modules.port, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + return 0; + } + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + return 0; +} + +static int +bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd; + struct bfa_lport_attr_s port_attr; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr); + bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE) + iocmd->attr.pid = port_attr.pid; + else + iocmd->attr.pid = 0; + + iocmd->attr.port_type = port_attr.port_type; + iocmd->attr.loopback = port_attr.loopback; + iocmd->attr.authfail = port_attr.authfail; + strncpy(iocmd->attr.port_symname.symname, + port_attr.port_cfg.sym_name.symname, + sizeof(port_attr.port_cfg.sym_name.symname)); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +int +bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd; + struct bfad_hal_comp fcomp; + void *iocmd_bufptr; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_port_stats_s), + sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s); + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port, + iocmd_bufptr, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + goto out; + } + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + return 0; + } + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + return 0; +} + +int +bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd) +{ + struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (v_cmd == IOCMD_PORT_CFG_TOPO) + cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param); + else if (v_cmd == IOCMD_PORT_CFG_SPEED) + cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param); + else if (v_cmd == IOCMD_PORT_CFG_ALPA) + cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param); + else if (v_cmd == IOCMD_PORT_CLR_ALPA) + cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +int +bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_port_cfg_maxfrsize_s *iocmd = + (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +int +bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { + if (v_cmd == IOCMD_PORT_BBSC_ENABLE) + fcport->cfg.bb_scn_state = BFA_TRUE; + else if (v_cmd == IOCMD_PORT_BBSC_DISABLE) + fcport->cfg.bb_scn_state = BFA_FALSE; + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +static int +bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_fcs_lport_s *fcs_port; + struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +int +bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_fcs_lport_s *fcs_port; + struct bfa_bsg_lport_stats_s *iocmd = + (struct bfa_bsg_lport_stats_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +int +bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_fcs_lport_s *fcs_port; + struct bfa_bsg_reset_stats_s *iocmd = + (struct bfa_bsg_reset_stats_s *)cmd; + struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); + struct list_head *qe, *qen; + struct bfa_itnim_s *itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->vpwwn); + if (fcs_port == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + bfa_fcs_lport_clear_stats(fcs_port); + /* clear IO stats from all active itnims */ + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag) + continue; + bfa_itnim_clear_stats(itnim); + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +int +bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_fcs_lport_s *fcs_port; + struct bfa_bsg_lport_iostats_s *iocmd = + (struct bfa_bsg_lport_iostats_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats, + fcs_port->lp_tag); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +int +bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_lport_get_rports_s *iocmd = + (struct bfa_bsg_lport_get_rports_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + unsigned long flags; + void *iocmd_bufptr; + + if (iocmd->nrports == 0) + return -EINVAL; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_lport_get_rports_s), + sizeof(wwn_t) * iocmd->nrports) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + + sizeof(struct bfa_bsg_lport_get_rports_s); + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, 0); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + bfa_fcs_lport_get_rports(fcs_port, (wwn_t *)iocmd_bufptr, + &iocmd->nrports); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +int +bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_rport_s *fcs_rport; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); + if (fcs_rport == NULL) { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + goto out; + } + + bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_rport_scsi_addr_s *iocmd = + (struct bfa_bsg_rport_scsi_addr_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_itnim_s *fcs_itnim; + struct bfad_itnim_s *drv_itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); + if (fcs_itnim == NULL) { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + goto out; + } + + drv_itnim = fcs_itnim->itnim_drv; + + if (drv_itnim && drv_itnim->im_port) + iocmd->host = drv_itnim->im_port->shost->host_no; + else { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + goto out; + } + + iocmd->target = drv_itnim->scsi_tgt_id; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + iocmd->bus = 0; + iocmd->lun = 0; + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +int +bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_rport_stats_s *iocmd = + (struct bfa_bsg_rport_stats_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_rport_s *fcs_rport; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); + if (fcs_rport == NULL) { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + goto out; + } + + memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats, + sizeof(struct bfa_rport_stats_s)); + memcpy((void *)&iocmd->stats.hal_stats, + (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats), + sizeof(struct bfa_rport_hal_stats_s)); + + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +int +bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_rport_reset_stats_s *iocmd = + (struct bfa_bsg_rport_reset_stats_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_rport_s *fcs_rport; + struct bfa_rport_s *rport; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); + if (fcs_rport == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + goto out; + } + + memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s)); + rport = bfa_fcs_rport_get_halrport(fcs_rport); + memset(&rport->stats, 0, sizeof(rport->stats)); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +int +bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_rport_set_speed_s *iocmd = + (struct bfa_bsg_rport_set_speed_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_rport_s *fcs_rport; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (fcs_port == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + goto out; + } + + fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); + if (fcs_rport == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + goto out; + } + + fcs_rport->rpf.assigned_speed = iocmd->speed; + /* Set this speed in f/w only if the RPSC speed is not available */ + if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN) + bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +int +bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_fcs_vport_s *fcs_vport; + struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->vpwwn); + if (fcs_vport == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_VWWN; + goto out; + } + + bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +int +bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_fcs_vport_s *fcs_vport; + struct bfa_bsg_vport_stats_s *iocmd = + (struct bfa_bsg_vport_stats_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->vpwwn); + if (fcs_vport == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_VWWN; + goto out; + } + + memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats, + sizeof(struct bfa_vport_stats_s)); + memcpy((void *)&iocmd->vport_stats.port_stats, + (void *)&fcs_vport->lport.stats, + sizeof(struct bfa_lport_stats_s)); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +int +bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_fcs_vport_s *fcs_vport; + struct bfa_bsg_reset_stats_s *iocmd = + (struct bfa_bsg_reset_stats_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->vpwwn); + if (fcs_vport == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_VWWN; + goto out; + } + + memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); + memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s)); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +static int +bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_fabric_get_lports_s *iocmd = + (struct bfa_bsg_fabric_get_lports_s *)cmd; + bfa_fcs_vf_t *fcs_vf; + uint32_t nports = iocmd->nports; + unsigned long flags; + void *iocmd_bufptr; + + if (nports == 0) { + iocmd->status = BFA_STATUS_EINVAL; + goto out; + } + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_fabric_get_lports_s), + sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + goto out; + } + + iocmd_bufptr = (char *)iocmd + + sizeof(struct bfa_bsg_fabric_get_lports_s); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); + if (fcs_vf == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_VFID; + goto out; + } + bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + iocmd->nports = nports; + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +int +bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + + if (cmd == IOCMD_RATELIM_ENABLE) + fcport->cfg.ratelimit = BFA_TRUE; + else if (cmd == IOCMD_RATELIM_DISABLE) + fcport->cfg.ratelimit = BFA_FALSE; + + if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) + fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; + + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; + + return 0; +} + +int +bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd) +{ + struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + + /* Auto and speeds greater than the supported speed, are invalid */ + if ((iocmd->speed == BFA_PORT_SPEED_AUTO) || + (iocmd->speed > fcport->speed_sup)) { + iocmd->status = BFA_STATUS_UNSUPP_SPEED; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; + } + + fcport->cfg.trl_def_speed = iocmd->speed; + iocmd->status = BFA_STATUS_OK; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +int +bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; + return 0; +} + +int +bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcpim_modstats_s *iocmd = + (struct bfa_bsg_fcpim_modstats_s *)cmd; + struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); + struct list_head *qe, *qen; + struct bfa_itnim_s *itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + /* accumulate IO stats from itnim */ + memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s)); + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats)); + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; + return 0; +} + +int +bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcpim_modstatsclr_s *iocmd = + (struct bfa_bsg_fcpim_modstatsclr_s *)cmd; + struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); + struct list_head *qe, *qen; + struct bfa_itnim_s *itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + bfa_itnim_clear_stats(itnim); + } + memset(&fcpim->del_itn_stats, 0, + sizeof(struct bfa_fcpim_del_itn_stats_s)); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; + return 0; +} + +int +bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcpim_del_itn_stats_s *iocmd = + (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd; + struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats, + sizeof(struct bfa_fcpim_del_itn_stats_s)); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +static int +bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->lpwwn); + if (!fcs_port) + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + else + iocmd->status = bfa_fcs_itnim_attr_get(fcs_port, + iocmd->rpwwn, &iocmd->attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +static int +bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_itnim_iostats_s *iocmd = + (struct bfa_bsg_itnim_iostats_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_itnim_s *itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->lpwwn); + if (!fcs_port) { + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + bfa_trc(bfad, 0); + } else { + itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); + if (itnim == NULL) + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + else { + iocmd->status = BFA_STATUS_OK; + memcpy((void *)&iocmd->iostats, (void *) + &(bfa_fcs_itnim_get_halitn(itnim)->stats), + sizeof(struct bfa_itnim_iostats_s)); + } + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +static int +bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_rport_reset_stats_s *iocmd = + (struct bfa_bsg_rport_reset_stats_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_itnim_s *itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->pwwn); + if (!fcs_port) + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + else { + itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); + if (itnim == NULL) + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + else { + iocmd->status = BFA_STATUS_OK; + bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn); + bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim)); + } + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_itnim_itnstats_s *iocmd = + (struct bfa_bsg_itnim_itnstats_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_itnim_s *itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->lpwwn); + if (!fcs_port) { + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + bfa_trc(bfad, 0); + } else { + itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); + if (itnim == NULL) + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + else { + iocmd->status = BFA_STATUS_OK; + bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn, + &iocmd->itnstats); + } + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +int +bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcport_enable(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +int +bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcport_disable(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +int +bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk, + &iocmd->pcifn_cfg, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk, + &iocmd->pcifn_id, iocmd->port, + iocmd->pcifn_class, iocmd->bandwidth, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk, + iocmd->pcifn_id, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk, + iocmd->pcifn_id, iocmd->bandwidth, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + bfa_trc(bfad, iocmd->status); +out: + return 0; +} + +int +bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_adapter_cfg_mode_s *iocmd = + (struct bfa_bsg_adapter_cfg_mode_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk, + iocmd->cfg.mode, iocmd->cfg.max_pf, + iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_port_cfg_mode_s *iocmd = + (struct bfa_bsg_port_cfg_mode_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk, + iocmd->instance, iocmd->cfg.mode, + iocmd->cfg.max_pf, iocmd->cfg.max_vf, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (cmd == IOCMD_FLASH_ENABLE_OPTROM) + iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk, + bfad_hcb_comp, &fcomp); + else + iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_faa_enable(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + struct bfad_hal_comp fcomp; + + init_completion(&fcomp.comp); + iocmd->status = BFA_STATUS_OK; + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_faa_enable(&bfad->bfa, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_faa_disable(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + struct bfad_hal_comp fcomp; + + init_completion(&fcomp.comp); + iocmd->status = BFA_STATUS_OK; + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_faa_disable(&bfad->bfa, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + iocmd->status = BFA_STATUS_OK; + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (iocmd->status != BFA_STATUS_OK) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len) +{ + struct bfa_bsg_cee_attr_s *iocmd = + (struct bfa_bsg_cee_attr_s *)cmd; + void *iocmd_bufptr; + struct bfad_hal_comp cee_comp; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_cee_attr_s), + sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s); + + cee_comp.status = 0; + init_completion(&cee_comp.comp); + mutex_lock(&bfad_mutex); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr, + bfad_hcb_comp, &cee_comp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + mutex_unlock(&bfad_mutex); + bfa_trc(bfad, 0x5555); + goto out; + } + wait_for_completion(&cee_comp.comp); + mutex_unlock(&bfad_mutex); +out: + return 0; +} + +int +bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_cee_stats_s *iocmd = + (struct bfa_bsg_cee_stats_s *)cmd; + void *iocmd_bufptr; + struct bfad_hal_comp cee_comp; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_cee_stats_s), + sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s); + + cee_comp.status = 0; + init_completion(&cee_comp.comp); + mutex_lock(&bfad_mutex); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr, + bfad_hcb_comp, &cee_comp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + mutex_unlock(&bfad_mutex); + bfa_trc(bfad, 0x5555); + goto out; + } + wait_for_completion(&cee_comp.comp); + mutex_unlock(&bfad_mutex); +out: + return 0; +} + +int +bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + bfa_trc(bfad, 0x5555); + return 0; +} + +int +bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_SFP_NOT_READY) + goto out; + + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_SFP_NOT_READY) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_flash_attr_s *iocmd = + (struct bfa_bsg_flash_attr_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type, + iocmd->instance, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; + void *iocmd_bufptr; + struct bfad_hal_comp fcomp; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_flash_s), + iocmd->bufsz) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), + iocmd->type, iocmd->instance, iocmd_bufptr, + iocmd->bufsz, 0, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; + struct bfad_hal_comp fcomp; + void *iocmd_bufptr; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_flash_s), + iocmd->bufsz) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type, + iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_get_temp_s *iocmd = + (struct bfa_bsg_diag_get_temp_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa), + &iocmd->result, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_memtest_s *iocmd = + (struct bfa_bsg_diag_memtest_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa), + &iocmd->memtest, iocmd->pat, + &iocmd->result, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_loopback_s *iocmd = + (struct bfa_bsg_diag_loopback_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode, + iocmd->speed, iocmd->lpcnt, iocmd->pat, + &iocmd->result, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_fwping_s *iocmd = + (struct bfa_bsg_diag_fwping_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt, + iocmd->pattern, &iocmd->result, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_OK) + goto out; + bfa_trc(bfad, 0x77771); + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force, + iocmd->queue, &iocmd->result, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_sfp_show_s *iocmd = + (struct bfa_bsg_sfp_show_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + bfa_trc(bfad, iocmd->status); +out: + return 0; +} + +int +bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa), + &iocmd->ledtest); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +int +bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_beacon_s *iocmd = + (struct bfa_bsg_diag_beacon_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa), + iocmd->beacon, iocmd->link_e2e_beacon, + iocmd->second); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +int +bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_diag_lb_stat_s *iocmd = + (struct bfa_bsg_diag_lb_stat_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc(bfad, iocmd->status); + + return 0; +} + +int +bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_phy_attr_s *iocmd = + (struct bfa_bsg_phy_attr_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance, + &iocmd->attr, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_phy_stats_s *iocmd = + (struct bfa_bsg_phy_stats_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance, + &iocmd->stats, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len) +{ + struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; + struct bfad_hal_comp fcomp; + void *iocmd_bufptr; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_phy_s), + iocmd->bufsz) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa), + iocmd->instance, iocmd_bufptr, iocmd->bufsz, + 0, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + if (iocmd->status != BFA_STATUS_OK) + goto out; +out: + return 0; +} + +int +bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_vhba_attr_s *iocmd = + (struct bfa_bsg_vhba_attr_s *)cmd; + struct bfa_vhba_attr_s *attr = &iocmd->attr; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + attr->pwwn = bfad->bfa.ioc.attr->pwwn; + attr->nwwn = bfad->bfa.ioc.attr->nwwn; + attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled; + attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa); + attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa); + iocmd->status = BFA_STATUS_OK; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +int +bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len) +{ + struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; + void *iocmd_bufptr; + struct bfad_hal_comp fcomp; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, + sizeof(struct bfa_bsg_phy_s), + iocmd->bufsz) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa), + iocmd->instance, iocmd_bufptr, iocmd->bufsz, + 0, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; + void *iocmd_bufptr; + + if (iocmd->bufsz < sizeof(struct bfa_plog_s)) { + bfa_trc(bfad, sizeof(struct bfa_plog_s)); + iocmd->status = BFA_STATUS_EINVAL; + goto out; + } + + iocmd->status = BFA_STATUS_OK; + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); + memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s)); +out: + return 0; +} + +#define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */ +int +bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd, + unsigned int payload_len) +{ + struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; + void *iocmd_bufptr; + unsigned long flags; + + if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s), + BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) { + iocmd->status = BFA_STATUS_VERSION_FAIL; + return 0; + } + + if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ || + !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) || + !IS_ALIGNED(iocmd->offset, sizeof(u32))) { + bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ); + iocmd->status = BFA_STATUS_EINVAL; + goto out; + } + + iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr, + (u32 *)&iocmd->offset, &iocmd->bufsz); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); +out: + return 0; +} + +int +bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + + if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) { + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR) + bfad->plog_buf.head = bfad->plog_buf.tail = 0; + else if (v_cmd == IOCMD_DEBUG_START_DTRC) + bfa_trc_init(bfad->trcmod); + else if (v_cmd == IOCMD_DEBUG_STOP_DTRC) + bfa_trc_stop(bfad->trcmod); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +int +bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd; + + if (iocmd->ctl == BFA_TRUE) + bfad->plog_buf.plog_enabled = 1; + else + bfad->plog_buf.plog_enabled = 0; + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +int +bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +{ + struct bfa_bsg_fcpim_profile_s *iocmd = + (struct bfa_bsg_fcpim_profile_s *)cmd; + struct timeval tv; + unsigned long flags; + + do_gettimeofday(&tv); + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (v_cmd == IOCMD_FCPIM_PROFILE_ON) + iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec); + else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF) + iocmd->status = bfa_fcpim_profile_off(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +static int +bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_itnim_ioprofile_s *iocmd = + (struct bfa_bsg_itnim_ioprofile_s *)cmd; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_itnim_s *itnim; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, + iocmd->vf_id, iocmd->lpwwn); + if (!fcs_port) + iocmd->status = BFA_STATUS_UNKNOWN_LWWN; + else { + itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); + if (itnim == NULL) + iocmd->status = BFA_STATUS_UNKNOWN_RWWN; + else + iocmd->status = bfa_itnim_get_ioprofile( + bfa_fcs_itnim_get_halitn(itnim), + &iocmd->ioprofile); + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +int +bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcport_stats_s *iocmd = + (struct bfa_bsg_fcport_stats_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + struct bfa_cb_pending_q_s cb_qe; + + init_completion(&fcomp.comp); + bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, + &fcomp, &iocmd->stats); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + goto out; + } + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + struct bfa_cb_pending_q_s cb_qe; + + init_completion(&fcomp.comp); + bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + goto out; + } + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), + BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn), + &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), + BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn), + &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd; + struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp; + struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled; + pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns; + pbcfg->speed = cfgrsp->pbc_cfg.port_speed; + memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun)); + iocmd->status = BFA_STATUS_OK; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + +int +bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), + BFA_FLASH_PART_PXECFG, + bfad->bfa.ioc.port_id, &iocmd->cfg, + sizeof(struct bfa_ethboot_cfg_s), 0, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), + BFA_FLASH_PART_PXECFG, + bfad->bfa.ioc.port_id, &iocmd->cfg, + sizeof(struct bfa_ethboot_cfg_s), 0, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + goto out; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + struct bfa_fcport_trunk_s *trunk = &fcport->trunk; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + + if (v_cmd == IOCMD_TRUNK_ENABLE) { + trunk->attr.state = BFA_TRUNK_OFFLINE; + bfa_fcport_disable(&bfad->bfa); + fcport->cfg.trunked = BFA_TRUE; + } else if (v_cmd == IOCMD_TRUNK_DISABLE) { + trunk->attr.state = BFA_TRUNK_DISABLED; + bfa_fcport_disable(&bfad->bfa); + fcport->cfg.trunked = BFA_FALSE; + } + + if (!bfa_fcport_is_disabled(&bfad->bfa)) + bfa_fcport_enable(&bfad->bfa); + + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +int +bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + struct bfa_fcport_trunk_s *trunk = &fcport->trunk; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + memcpy((void *)&iocmd->attr, (void *)&trunk->attr, + sizeof(struct bfa_trunk_attr_s)); + iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +int +bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { + if (v_cmd == IOCMD_QOS_ENABLE) + fcport->cfg.qos_enabled = BFA_TRUE; + else if (v_cmd == IOCMD_QOS_DISABLE) + fcport->cfg.qos_enabled = BFA_FALSE; + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +int +bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->attr.state = fcport->qos_attr.state; + iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +int +bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_qos_vc_attr_s *iocmd = + (struct bfa_bsg_qos_vc_attr_s *)cmd; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); + struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; + unsigned long flags; + u32 i = 0; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count); + iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit); + iocmd->attr.elp_opmode_flags = + be32_to_cpu(bfa_vc_attr->elp_opmode_flags); + + /* Individual VC info */ + while (i < iocmd->attr.total_vc_count) { + iocmd->attr.vc_info[i].vc_credit = + bfa_vc_attr->vc_info[i].vc_credit; + iocmd->attr.vc_info[i].borrow_credit = + bfa_vc_attr->vc_info[i].borrow_credit; + iocmd->attr.vc_info[i].priority = + bfa_vc_attr->vc_info[i].priority; + i++; + } + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + iocmd->status = BFA_STATUS_OK; + return 0; +} + +int +bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcport_stats_s *iocmd = + (struct bfa_bsg_fcport_stats_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + struct bfa_cb_pending_q_s cb_qe; + + init_completion(&fcomp.comp); + bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, + &fcomp, &iocmd->stats); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); + iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + goto out; + } + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + struct bfad_hal_comp fcomp; + unsigned long flags; + struct bfa_cb_pending_q_s cb_qe; + + init_completion(&fcomp.comp); + bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, + &fcomp, NULL); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); + iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + goto out; + } + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; +out: + return 0; +} + +int +bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_vf_stats_s *iocmd = + (struct bfa_bsg_vf_stats_s *)cmd; + struct bfa_fcs_fabric_s *fcs_vf; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); + if (fcs_vf == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_VFID; + goto out; + } + memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats, + sizeof(struct bfa_vf_stats_s)); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +int +bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_vf_reset_stats_s *iocmd = + (struct bfa_bsg_vf_reset_stats_s *)cmd; + struct bfa_fcs_fabric_s *fcs_vf; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); + if (fcs_vf == NULL) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_UNKNOWN_VFID; + goto out; + } + memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s)); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + iocmd->status = BFA_STATUS_OK; +out: + return 0; +} + +int +bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) + iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE); + else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) + iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE); + else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR) + iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +int +bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_fcpim_lunmask_query_s *iocmd = + (struct bfa_bsg_fcpim_lunmask_query_s *)cmd; + struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +int +bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +{ + struct bfa_bsg_fcpim_lunmask_s *iocmd = + (struct bfa_bsg_fcpim_lunmask_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD) + iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id, + &iocmd->pwwn, iocmd->rpwwn, iocmd->lun); + else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE) + iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa, + iocmd->vf_id, &iocmd->pwwn, + iocmd->rpwwn, iocmd->lun); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +static int +bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, + unsigned int payload_len) +{ + int rc = -EINVAL; + + switch (cmd) { + case IOCMD_IOC_ENABLE: + rc = bfad_iocmd_ioc_enable(bfad, iocmd); + break; + case IOCMD_IOC_DISABLE: + rc = bfad_iocmd_ioc_disable(bfad, iocmd); + break; + case IOCMD_IOC_GET_INFO: + rc = bfad_iocmd_ioc_get_info(bfad, iocmd); + break; + case IOCMD_IOC_GET_ATTR: + rc = bfad_iocmd_ioc_get_attr(bfad, iocmd); + break; + case IOCMD_IOC_GET_STATS: + rc = bfad_iocmd_ioc_get_stats(bfad, iocmd); + break; + case IOCMD_IOC_GET_FWSTATS: + rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len); + break; + case IOCMD_IOC_RESET_STATS: + case IOCMD_IOC_RESET_FWSTATS: + rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd); + break; + case IOCMD_IOC_SET_ADAPTER_NAME: + case IOCMD_IOC_SET_PORT_NAME: + rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd); + break; + case IOCMD_IOCFC_GET_ATTR: + rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd); + break; + case IOCMD_IOCFC_SET_INTR: + rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd); + break; + case IOCMD_PORT_ENABLE: + rc = bfad_iocmd_port_enable(bfad, iocmd); + break; + case IOCMD_PORT_DISABLE: + rc = bfad_iocmd_port_disable(bfad, iocmd); + break; + case IOCMD_PORT_GET_ATTR: + rc = bfad_iocmd_port_get_attr(bfad, iocmd); + break; + case IOCMD_PORT_GET_STATS: + rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len); + break; + case IOCMD_PORT_RESET_STATS: + rc = bfad_iocmd_port_reset_stats(bfad, iocmd); + break; + case IOCMD_PORT_CFG_TOPO: + case IOCMD_PORT_CFG_SPEED: + case IOCMD_PORT_CFG_ALPA: + case IOCMD_PORT_CLR_ALPA: + rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd); + break; + case IOCMD_PORT_CFG_MAXFRSZ: + rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd); + break; + case IOCMD_PORT_BBSC_ENABLE: + case IOCMD_PORT_BBSC_DISABLE: + rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd); + break; + case IOCMD_LPORT_GET_ATTR: + rc = bfad_iocmd_lport_get_attr(bfad, iocmd); + break; + case IOCMD_LPORT_GET_STATS: + rc = bfad_iocmd_lport_get_stats(bfad, iocmd); + break; + case IOCMD_LPORT_RESET_STATS: + rc = bfad_iocmd_lport_reset_stats(bfad, iocmd); + break; + case IOCMD_LPORT_GET_IOSTATS: + rc = bfad_iocmd_lport_get_iostats(bfad, iocmd); + break; + case IOCMD_LPORT_GET_RPORTS: + rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len); + break; + case IOCMD_RPORT_GET_ATTR: + rc = bfad_iocmd_rport_get_attr(bfad, iocmd); + break; + case IOCMD_RPORT_GET_ADDR: + rc = bfad_iocmd_rport_get_addr(bfad, iocmd); + break; + case IOCMD_RPORT_GET_STATS: + rc = bfad_iocmd_rport_get_stats(bfad, iocmd); + break; + case IOCMD_RPORT_RESET_STATS: + rc = bfad_iocmd_rport_clr_stats(bfad, iocmd); + break; + case IOCMD_RPORT_SET_SPEED: + rc = bfad_iocmd_rport_set_speed(bfad, iocmd); + break; + case IOCMD_VPORT_GET_ATTR: + rc = bfad_iocmd_vport_get_attr(bfad, iocmd); + break; + case IOCMD_VPORT_GET_STATS: + rc = bfad_iocmd_vport_get_stats(bfad, iocmd); + break; + case IOCMD_VPORT_RESET_STATS: + rc = bfad_iocmd_vport_clr_stats(bfad, iocmd); + break; + case IOCMD_FABRIC_GET_LPORTS: + rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len); + break; + case IOCMD_RATELIM_ENABLE: + case IOCMD_RATELIM_DISABLE: + rc = bfad_iocmd_ratelim(bfad, cmd, iocmd); + break; + case IOCMD_RATELIM_DEF_SPEED: + rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd); + break; + case IOCMD_FCPIM_FAILOVER: + rc = bfad_iocmd_cfg_fcpim(bfad, iocmd); + break; + case IOCMD_FCPIM_MODSTATS: + rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd); + break; + case IOCMD_FCPIM_MODSTATSCLR: + rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd); + break; + case IOCMD_FCPIM_DEL_ITN_STATS: + rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd); + break; + case IOCMD_ITNIM_GET_ATTR: + rc = bfad_iocmd_itnim_get_attr(bfad, iocmd); + break; + case IOCMD_ITNIM_GET_IOSTATS: + rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd); + break; + case IOCMD_ITNIM_RESET_STATS: + rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd); + break; + case IOCMD_ITNIM_GET_ITNSTATS: + rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd); + break; + case IOCMD_FCPORT_ENABLE: + rc = bfad_iocmd_fcport_enable(bfad, iocmd); + break; + case IOCMD_FCPORT_DISABLE: + rc = bfad_iocmd_fcport_disable(bfad, iocmd); + break; + case IOCMD_IOC_PCIFN_CFG: + rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd); + break; + case IOCMD_PCIFN_CREATE: + rc = bfad_iocmd_pcifn_create(bfad, iocmd); + break; + case IOCMD_PCIFN_DELETE: + rc = bfad_iocmd_pcifn_delete(bfad, iocmd); + break; + case IOCMD_PCIFN_BW: + rc = bfad_iocmd_pcifn_bw(bfad, iocmd); + break; + case IOCMD_ADAPTER_CFG_MODE: + rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd); + break; + case IOCMD_PORT_CFG_MODE: + rc = bfad_iocmd_port_cfg_mode(bfad, iocmd); + break; + case IOCMD_FLASH_ENABLE_OPTROM: + case IOCMD_FLASH_DISABLE_OPTROM: + rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd); + break; + case IOCMD_FAA_ENABLE: + rc = bfad_iocmd_faa_enable(bfad, iocmd); + break; + case IOCMD_FAA_DISABLE: + rc = bfad_iocmd_faa_disable(bfad, iocmd); + break; + case IOCMD_FAA_QUERY: + rc = bfad_iocmd_faa_query(bfad, iocmd); + break; + case IOCMD_CEE_GET_ATTR: + rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len); + break; + case IOCMD_CEE_GET_STATS: + rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len); + break; + case IOCMD_CEE_RESET_STATS: + rc = bfad_iocmd_cee_reset_stats(bfad, iocmd); + break; + case IOCMD_SFP_MEDIA: + rc = bfad_iocmd_sfp_media(bfad, iocmd); + break; + case IOCMD_SFP_SPEED: + rc = bfad_iocmd_sfp_speed(bfad, iocmd); + break; + case IOCMD_FLASH_GET_ATTR: + rc = bfad_iocmd_flash_get_attr(bfad, iocmd); + break; + case IOCMD_FLASH_ERASE_PART: + rc = bfad_iocmd_flash_erase_part(bfad, iocmd); + break; + case IOCMD_FLASH_UPDATE_PART: + rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len); + break; + case IOCMD_FLASH_READ_PART: + rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len); + break; + case IOCMD_DIAG_TEMP: + rc = bfad_iocmd_diag_temp(bfad, iocmd); + break; + case IOCMD_DIAG_MEMTEST: + rc = bfad_iocmd_diag_memtest(bfad, iocmd); + break; + case IOCMD_DIAG_LOOPBACK: + rc = bfad_iocmd_diag_loopback(bfad, iocmd); + break; + case IOCMD_DIAG_FWPING: + rc = bfad_iocmd_diag_fwping(bfad, iocmd); + break; + case IOCMD_DIAG_QUEUETEST: + rc = bfad_iocmd_diag_queuetest(bfad, iocmd); + break; + case IOCMD_DIAG_SFP: + rc = bfad_iocmd_diag_sfp(bfad, iocmd); + break; + case IOCMD_DIAG_LED: + rc = bfad_iocmd_diag_led(bfad, iocmd); + break; + case IOCMD_DIAG_BEACON_LPORT: + rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd); + break; + case IOCMD_DIAG_LB_STAT: + rc = bfad_iocmd_diag_lb_stat(bfad, iocmd); + break; + case IOCMD_PHY_GET_ATTR: + rc = bfad_iocmd_phy_get_attr(bfad, iocmd); + break; + case IOCMD_PHY_GET_STATS: + rc = bfad_iocmd_phy_get_stats(bfad, iocmd); + break; + case IOCMD_PHY_UPDATE_FW: + rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len); + break; + case IOCMD_PHY_READ_FW: + rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len); + break; + case IOCMD_VHBA_QUERY: + rc = bfad_iocmd_vhba_query(bfad, iocmd); + break; + case IOCMD_DEBUG_PORTLOG: + rc = bfad_iocmd_porglog_get(bfad, iocmd); + break; + case IOCMD_DEBUG_FW_CORE: + rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len); + break; + case IOCMD_DEBUG_FW_STATE_CLR: + case IOCMD_DEBUG_PORTLOG_CLR: + case IOCMD_DEBUG_START_DTRC: + case IOCMD_DEBUG_STOP_DTRC: + rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd); + break; + case IOCMD_DEBUG_PORTLOG_CTL: + rc = bfad_iocmd_porglog_ctl(bfad, iocmd); + break; + case IOCMD_FCPIM_PROFILE_ON: + case IOCMD_FCPIM_PROFILE_OFF: + rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd); + break; + case IOCMD_ITNIM_GET_IOPROFILE: + rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd); + break; + case IOCMD_FCPORT_GET_STATS: + rc = bfad_iocmd_fcport_get_stats(bfad, iocmd); + break; + case IOCMD_FCPORT_RESET_STATS: + rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd); + break; + case IOCMD_BOOT_CFG: + rc = bfad_iocmd_boot_cfg(bfad, iocmd); + break; + case IOCMD_BOOT_QUERY: + rc = bfad_iocmd_boot_query(bfad, iocmd); + break; + case IOCMD_PREBOOT_QUERY: + rc = bfad_iocmd_preboot_query(bfad, iocmd); + break; + case IOCMD_ETHBOOT_CFG: + rc = bfad_iocmd_ethboot_cfg(bfad, iocmd); + break; + case IOCMD_ETHBOOT_QUERY: + rc = bfad_iocmd_ethboot_query(bfad, iocmd); + break; + case IOCMD_TRUNK_ENABLE: + case IOCMD_TRUNK_DISABLE: + rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd); + break; + case IOCMD_TRUNK_GET_ATTR: + rc = bfad_iocmd_trunk_get_attr(bfad, iocmd); + break; + case IOCMD_QOS_ENABLE: + case IOCMD_QOS_DISABLE: + rc = bfad_iocmd_qos(bfad, iocmd, cmd); + break; + case IOCMD_QOS_GET_ATTR: + rc = bfad_iocmd_qos_get_attr(bfad, iocmd); + break; + case IOCMD_QOS_GET_VC_ATTR: + rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd); + break; + case IOCMD_QOS_GET_STATS: + rc = bfad_iocmd_qos_get_stats(bfad, iocmd); + break; + case IOCMD_QOS_RESET_STATS: + rc = bfad_iocmd_qos_reset_stats(bfad, iocmd); + break; + case IOCMD_VF_GET_STATS: + rc = bfad_iocmd_vf_get_stats(bfad, iocmd); + break; + case IOCMD_VF_RESET_STATS: + rc = bfad_iocmd_vf_clr_stats(bfad, iocmd); + break; + case IOCMD_FCPIM_LUNMASK_ENABLE: + case IOCMD_FCPIM_LUNMASK_DISABLE: + case IOCMD_FCPIM_LUNMASK_CLEAR: + rc = bfad_iocmd_lunmask(bfad, iocmd, cmd); + break; + case IOCMD_FCPIM_LUNMASK_QUERY: + rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd); + break; + case IOCMD_FCPIM_LUNMASK_ADD: + case IOCMD_FCPIM_LUNMASK_DELETE: + rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd); + break; + default: + rc = -EINVAL; + break; + } + return rc; +} + +static int +bfad_im_bsg_vendor_request(struct fc_bsg_job *job) +{ + uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0]; + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) job->shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + void *payload_kbuf; + int rc = -EINVAL; + + /* Allocate a temp buffer to hold the passed in user space command */ + payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); + if (!payload_kbuf) { + rc = -ENOMEM; + goto out; + } + + /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */ + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, payload_kbuf, + job->request_payload.payload_len); + + /* Invoke IOCMD handler - to handle all the vendor command requests */ + rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf, + job->request_payload.payload_len); + if (rc != BFA_STATUS_OK) + goto error; + + /* Copy the response data to the job->reply_payload sg_list */ + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + payload_kbuf, + job->reply_payload.payload_len); + + /* free the command buffer */ + kfree(payload_kbuf); + + /* Fill the BSG job reply data */ + job->reply_len = job->reply_payload.payload_len; + job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; + job->reply->result = rc; + + job->job_done(job); + return rc; +error: + /* free the command buffer */ + kfree(payload_kbuf); +out: + job->reply->result = rc; + job->reply_len = sizeof(uint32_t); + job->reply->reply_payload_rcv_len = 0; + return rc; +} + +/* FC passthru call backs */ +u64 +bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid) +{ + struct bfad_fcxp *drv_fcxp = bfad_fcxp; + struct bfa_sge_s *sge; + u64 addr; + + sge = drv_fcxp->req_sge + sgeid; + addr = (u64)(size_t) sge->sg_addr; + return addr; +} + +u32 +bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid) +{ + struct bfad_fcxp *drv_fcxp = bfad_fcxp; + struct bfa_sge_s *sge; + + sge = drv_fcxp->req_sge + sgeid; + return sge->sg_len; +} + +u64 +bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid) +{ + struct bfad_fcxp *drv_fcxp = bfad_fcxp; + struct bfa_sge_s *sge; + u64 addr; + + sge = drv_fcxp->rsp_sge + sgeid; + addr = (u64)(size_t) sge->sg_addr; + return addr; +} + +u32 +bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid) +{ + struct bfad_fcxp *drv_fcxp = bfad_fcxp; + struct bfa_sge_s *sge; + + sge = drv_fcxp->rsp_sge + sgeid; + return sge->sg_len; +} + +void +bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfad_fcxp *drv_fcxp = bfad_fcxp; + + drv_fcxp->req_status = req_status; + drv_fcxp->rsp_len = rsp_len; + + /* bfa_fcxp will be automatically freed by BFA */ + drv_fcxp->bfa_fcxp = NULL; + complete(&drv_fcxp->comp); +} + +struct bfad_buf_info * +bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf, + uint32_t payload_len, uint32_t *num_sgles) +{ + struct bfad_buf_info *buf_base, *buf_info; + struct bfa_sge_s *sg_table; + int sge_num = 1; + + buf_base = kzalloc((sizeof(struct bfad_buf_info) + + sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL); + if (!buf_base) + return NULL; + + sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) + + (sizeof(struct bfad_buf_info) * sge_num)); + + /* Allocate dma coherent memory */ + buf_info = buf_base; + buf_info->size = payload_len; + buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size, + &buf_info->phys, GFP_KERNEL); + if (!buf_info->virt) + goto out_free_mem; + + /* copy the linear bsg buffer to buf_info */ + memset(buf_info->virt, 0, buf_info->size); + memcpy(buf_info->virt, payload_kbuf, buf_info->size); + + /* + * Setup SG table + */ + sg_table->sg_len = buf_info->size; + sg_table->sg_addr = (void *)(size_t) buf_info->phys; + + *num_sgles = sge_num; + + return buf_base; + +out_free_mem: + kfree(buf_base); + return NULL; +} + +void +bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base, + uint32_t num_sgles) +{ + int i; + struct bfad_buf_info *buf_info = buf_base; + + if (buf_base) { + for (i = 0; i < num_sgles; buf_info++, i++) { + if (buf_info->virt != NULL) + dma_free_coherent(&bfad->pcidev->dev, + buf_info->size, buf_info->virt, + buf_info->phys); + } + kfree(buf_base); + } +} + +int +bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp, + bfa_bsg_fcpt_t *bsg_fcpt) +{ + struct bfa_fcxp_s *hal_fcxp; + struct bfad_s *bfad = drv_fcxp->port->bfad; + unsigned long flags; + uint8_t lp_tag; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + + /* Allocate bfa_fcxp structure */ + hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa, + drv_fcxp->num_req_sgles, + drv_fcxp->num_rsp_sgles, + bfad_fcxp_get_req_sgaddr_cb, + bfad_fcxp_get_req_sglen_cb, + bfad_fcxp_get_rsp_sgaddr_cb, + bfad_fcxp_get_rsp_sglen_cb); + if (!hal_fcxp) { + bfa_trc(bfad, 0); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return BFA_STATUS_ENOMEM; + } + + drv_fcxp->bfa_fcxp = hal_fcxp; + + lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id); + + bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag, + bsg_fcpt->cts, bsg_fcpt->cos, + job->request_payload.payload_len, + &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad, + job->reply_payload.payload_len, bsg_fcpt->tsecs); + + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return BFA_STATUS_OK; +} + +int +bfad_im_bsg_els_ct_request(struct fc_bsg_job *job) +{ + struct bfa_bsg_data *bsg_data; + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) job->shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + bfa_bsg_fcpt_t *bsg_fcpt; + struct bfad_fcxp *drv_fcxp; + struct bfa_fcs_lport_s *fcs_port; + struct bfa_fcs_rport_s *fcs_rport; + uint32_t command_type = job->request->msgcode; + unsigned long flags; + struct bfad_buf_info *rsp_buf_info; + void *req_kbuf = NULL, *rsp_kbuf = NULL; + int rc = -EINVAL; + + job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */ + job->reply->reply_payload_rcv_len = 0; + + /* Get the payload passed in from userspace */ + bsg_data = (struct bfa_bsg_data *) (((char *)job->request) + + sizeof(struct fc_bsg_request)); + if (bsg_data == NULL) + goto out; + + /* + * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload + * buffer of size bsg_data->payload_len + */ + bsg_fcpt = (struct bfa_bsg_fcpt_s *) + kzalloc(bsg_data->payload_len, GFP_KERNEL); + if (!bsg_fcpt) + goto out; + + if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload, + bsg_data->payload_len)) { + kfree(bsg_fcpt); + goto out; + } + + drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL); + if (drv_fcxp == NULL) { + rc = -ENOMEM; + goto out; + } + + spin_lock_irqsave(&bfad->bfad_lock, flags); + fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id, + bsg_fcpt->lpwwn); + if (fcs_port == NULL) { + bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + goto out_free_mem; + } + + /* Check if the port is online before sending FC Passthru cmd */ + if (!bfa_fcs_lport_is_online(fcs_port)) { + bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + goto out_free_mem; + } + + drv_fcxp->port = fcs_port->bfad_port; + + if (drv_fcxp->port->bfad == 0) + drv_fcxp->port->bfad = bfad; + + /* Fetch the bfa_rport - if nexus needed */ + if (command_type == FC_BSG_HST_ELS_NOLOGIN || + command_type == FC_BSG_HST_CT) { + /* BSG HST commands: no nexus needed */ + drv_fcxp->bfa_rport = NULL; + + } else if (command_type == FC_BSG_RPT_ELS || + command_type == FC_BSG_RPT_CT) { + /* BSG RPT commands: nexus needed */ + fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port, + bsg_fcpt->dpwwn); + if (fcs_rport == NULL) { + bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + goto out_free_mem; + } + + drv_fcxp->bfa_rport = fcs_rport->bfa_rport; + + } else { /* Unknown BSG msgcode; return -EINVAL */ + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + goto out_free_mem; + } + + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + /* allocate memory for req / rsp buffers */ + req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); + if (!req_kbuf) { + printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n", + bfad->pci_name); + rc = -ENOMEM; + goto out_free_mem; + } + + rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL); + if (!rsp_kbuf) { + printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n", + bfad->pci_name); + rc = -ENOMEM; + goto out_free_mem; + } + + /* map req sg - copy the sg_list passed in to the linear buffer */ + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, req_kbuf, + job->request_payload.payload_len); + + drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf, + job->request_payload.payload_len, + &drv_fcxp->num_req_sgles); + if (!drv_fcxp->reqbuf_info) { + printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n", + bfad->pci_name); + rc = -ENOMEM; + goto out_free_mem; + } + + drv_fcxp->req_sge = (struct bfa_sge_s *) + (((uint8_t *)drv_fcxp->reqbuf_info) + + (sizeof(struct bfad_buf_info) * + drv_fcxp->num_req_sgles)); + + /* map rsp sg */ + drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf, + job->reply_payload.payload_len, + &drv_fcxp->num_rsp_sgles); + if (!drv_fcxp->rspbuf_info) { + printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n", + bfad->pci_name); + rc = -ENOMEM; + goto out_free_mem; + } + + rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info; + drv_fcxp->rsp_sge = (struct bfa_sge_s *) + (((uint8_t *)drv_fcxp->rspbuf_info) + + (sizeof(struct bfad_buf_info) * + drv_fcxp->num_rsp_sgles)); + + /* fcxp send */ + init_completion(&drv_fcxp->comp); + rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt); + if (rc == BFA_STATUS_OK) { + wait_for_completion(&drv_fcxp->comp); + bsg_fcpt->status = drv_fcxp->req_status; + } else { + bsg_fcpt->status = rc; + goto out_free_mem; + } + + /* fill the job->reply data */ + if (drv_fcxp->req_status == BFA_STATUS_OK) { + job->reply_len = drv_fcxp->rsp_len; + job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len; + job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; + } else { + job->reply->reply_payload_rcv_len = + sizeof(struct fc_bsg_ctels_reply); + job->reply_len = sizeof(uint32_t); + job->reply->reply_data.ctels_reply.status = + FC_CTELS_STATUS_REJECT; + } + + /* Copy the response data to the reply_payload sg list */ + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + (uint8_t *)rsp_buf_info->virt, + job->reply_payload.payload_len); + +out_free_mem: + bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info, + drv_fcxp->num_rsp_sgles); + bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info, + drv_fcxp->num_req_sgles); + kfree(req_kbuf); + kfree(rsp_kbuf); + + /* Need a copy to user op */ + if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt, + bsg_data->payload_len)) + rc = -EIO; + + kfree(bsg_fcpt); + kfree(drv_fcxp); +out: + job->reply->result = rc; + + if (rc == BFA_STATUS_OK) + job->job_done(job); + + return rc; +} + +int +bfad_im_bsg_request(struct fc_bsg_job *job) +{ + uint32_t rc = BFA_STATUS_OK; + + switch (job->request->msgcode) { + case FC_BSG_HST_VENDOR: + /* Process BSG HST Vendor requests */ + rc = bfad_im_bsg_vendor_request(job); + break; + case FC_BSG_HST_ELS_NOLOGIN: + case FC_BSG_RPT_ELS: + case FC_BSG_HST_CT: + case FC_BSG_RPT_CT: + /* Process BSG ELS/CT commands */ + rc = bfad_im_bsg_els_ct_request(job); + break; + default: + job->reply->result = rc = -EINVAL; + job->reply->reply_payload_rcv_len = 0; + break; + } + + return rc; +} + +int +bfad_im_bsg_timeout(struct fc_bsg_job *job) +{ + /* Don't complete the BSG job request - return -EAGAIN + * to reset bsg job timeout : for ELS/CT pass thru we + * already have timer to track the request. + */ + return -EAGAIN; +} diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h new file mode 100644 index 00000000000..e859adb9aa9 --- /dev/null +++ b/drivers/scsi/bfa/bfad_bsg.h @@ -0,0 +1,746 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#ifndef BFAD_BSG_H +#define BFAD_BSG_H + +#include "bfa_defs.h" +#include "bfa_defs_fcs.h" + +/* Definitions of vendor unique structures and command codes passed in + * using FC_BSG_HST_VENDOR message code. + */ +enum { + IOCMD_IOC_ENABLE = 0x1, + IOCMD_IOC_DISABLE, + IOCMD_IOC_GET_ATTR, + IOCMD_IOC_GET_INFO, + IOCMD_IOC_GET_STATS, + IOCMD_IOC_GET_FWSTATS, + IOCMD_IOC_RESET_STATS, + IOCMD_IOC_RESET_FWSTATS, + IOCMD_IOC_SET_ADAPTER_NAME, + IOCMD_IOC_SET_PORT_NAME, + IOCMD_IOCFC_GET_ATTR, + IOCMD_IOCFC_SET_INTR, + IOCMD_PORT_ENABLE, + IOCMD_PORT_DISABLE, + IOCMD_PORT_GET_ATTR, + IOCMD_PORT_GET_STATS, + IOCMD_PORT_RESET_STATS, + IOCMD_PORT_CFG_TOPO, + IOCMD_PORT_CFG_SPEED, + IOCMD_PORT_CFG_ALPA, + IOCMD_PORT_CFG_MAXFRSZ, + IOCMD_PORT_CLR_ALPA, + IOCMD_PORT_BBSC_ENABLE, + IOCMD_PORT_BBSC_DISABLE, + IOCMD_LPORT_GET_ATTR, + IOCMD_LPORT_GET_RPORTS, + IOCMD_LPORT_GET_STATS, + IOCMD_LPORT_RESET_STATS, + IOCMD_LPORT_GET_IOSTATS, + IOCMD_RPORT_GET_ATTR, + IOCMD_RPORT_GET_ADDR, + IOCMD_RPORT_GET_STATS, + IOCMD_RPORT_RESET_STATS, + IOCMD_RPORT_SET_SPEED, + IOCMD_VPORT_GET_ATTR, + IOCMD_VPORT_GET_STATS, + IOCMD_VPORT_RESET_STATS, + IOCMD_FABRIC_GET_LPORTS, + IOCMD_RATELIM_ENABLE, + IOCMD_RATELIM_DISABLE, + IOCMD_RATELIM_DEF_SPEED, + IOCMD_FCPIM_FAILOVER, + IOCMD_FCPIM_MODSTATS, + IOCMD_FCPIM_MODSTATSCLR, + IOCMD_FCPIM_DEL_ITN_STATS, + IOCMD_ITNIM_GET_ATTR, + IOCMD_ITNIM_GET_IOSTATS, + IOCMD_ITNIM_RESET_STATS, + IOCMD_ITNIM_GET_ITNSTATS, + IOCMD_IOC_PCIFN_CFG, + IOCMD_FCPORT_ENABLE, + IOCMD_FCPORT_DISABLE, + IOCMD_PCIFN_CREATE, + IOCMD_PCIFN_DELETE, + IOCMD_PCIFN_BW, + IOCMD_ADAPTER_CFG_MODE, + IOCMD_PORT_CFG_MODE, + IOCMD_FLASH_ENABLE_OPTROM, + IOCMD_FLASH_DISABLE_OPTROM, + IOCMD_FAA_ENABLE, + IOCMD_FAA_DISABLE, + IOCMD_FAA_QUERY, + IOCMD_CEE_GET_ATTR, + IOCMD_CEE_GET_STATS, + IOCMD_CEE_RESET_STATS, + IOCMD_SFP_MEDIA, + IOCMD_SFP_SPEED, + IOCMD_FLASH_GET_ATTR, + IOCMD_FLASH_ERASE_PART, + IOCMD_FLASH_UPDATE_PART, + IOCMD_FLASH_READ_PART, + IOCMD_DIAG_TEMP, + IOCMD_DIAG_MEMTEST, + IOCMD_DIAG_LOOPBACK, + IOCMD_DIAG_FWPING, + IOCMD_DIAG_QUEUETEST, + IOCMD_DIAG_SFP, + IOCMD_DIAG_LED, + IOCMD_DIAG_BEACON_LPORT, + IOCMD_DIAG_LB_STAT, + IOCMD_PHY_GET_ATTR, + IOCMD_PHY_GET_STATS, + IOCMD_PHY_UPDATE_FW, + IOCMD_PHY_READ_FW, + IOCMD_VHBA_QUERY, + IOCMD_DEBUG_PORTLOG, + IOCMD_DEBUG_FW_CORE, + IOCMD_DEBUG_FW_STATE_CLR, + IOCMD_DEBUG_PORTLOG_CLR, + IOCMD_DEBUG_START_DTRC, + IOCMD_DEBUG_STOP_DTRC, + IOCMD_DEBUG_PORTLOG_CTL, + IOCMD_FCPIM_PROFILE_ON, + IOCMD_FCPIM_PROFILE_OFF, + IOCMD_ITNIM_GET_IOPROFILE, + IOCMD_FCPORT_GET_STATS, + IOCMD_FCPORT_RESET_STATS, + IOCMD_BOOT_CFG, + IOCMD_BOOT_QUERY, + IOCMD_PREBOOT_QUERY, + IOCMD_ETHBOOT_CFG, + IOCMD_ETHBOOT_QUERY, + IOCMD_TRUNK_ENABLE, + IOCMD_TRUNK_DISABLE, + IOCMD_TRUNK_GET_ATTR, + IOCMD_QOS_ENABLE, + IOCMD_QOS_DISABLE, + IOCMD_QOS_GET_ATTR, + IOCMD_QOS_GET_VC_ATTR, + IOCMD_QOS_GET_STATS, + IOCMD_QOS_RESET_STATS, + IOCMD_VF_GET_STATS, + IOCMD_VF_RESET_STATS, + IOCMD_FCPIM_LUNMASK_ENABLE, + IOCMD_FCPIM_LUNMASK_DISABLE, + IOCMD_FCPIM_LUNMASK_CLEAR, + IOCMD_FCPIM_LUNMASK_QUERY, + IOCMD_FCPIM_LUNMASK_ADD, + IOCMD_FCPIM_LUNMASK_DELETE, +}; + +struct bfa_bsg_gen_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; +}; + +struct bfa_bsg_portlogctl_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + bfa_boolean_t ctl; + int inst_no; +}; + +struct bfa_bsg_fcpim_profile_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; +}; + +struct bfa_bsg_itnim_ioprofile_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t lpwwn; + wwn_t rpwwn; + struct bfa_itnim_ioprofile_s ioprofile; +}; + +struct bfa_bsg_fcport_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + union bfa_fcport_stats_u stats; +}; + +struct bfa_bsg_ioc_name_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + char name[BFA_ADAPTER_SYM_NAME_LEN]; +}; + +struct bfa_bsg_ioc_info_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + char serialnum[64]; + char hwpath[BFA_STRING_32]; + char adapter_hwpath[BFA_STRING_32]; + char guid[BFA_ADAPTER_SYM_NAME_LEN*2]; + char name[BFA_ADAPTER_SYM_NAME_LEN]; + char port_name[BFA_ADAPTER_SYM_NAME_LEN]; + char eth_name[BFA_ADAPTER_SYM_NAME_LEN]; + wwn_t pwwn; + wwn_t nwwn; + wwn_t factorypwwn; + wwn_t factorynwwn; + mac_t mac; + mac_t factory_mac; /* Factory mac address */ + mac_t current_mac; /* Currently assigned mac address */ + enum bfa_ioc_type_e ioc_type; + u16 pvid; /* Port vlan id */ + u16 rsvd1; + u32 host; + u32 bandwidth; /* For PF support */ + u32 rsvd2; +}; + +struct bfa_bsg_ioc_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_ioc_attr_s ioc_attr; +}; + +struct bfa_bsg_ioc_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_ioc_stats_s ioc_stats; +}; + +struct bfa_bsg_ioc_fwstats_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 buf_size; + u32 rsvd1; + u64 buf_ptr; +}; + +struct bfa_bsg_iocfc_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_iocfc_attr_s iocfc_attr; +}; + +struct bfa_bsg_iocfc_intr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_iocfc_intr_attr_s attr; +}; + +struct bfa_bsg_port_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_port_attr_s attr; +}; + +struct bfa_bsg_port_cfg_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 param; + u32 rsvd1; +}; + +struct bfa_bsg_port_cfg_maxfrsize_s { + bfa_status_t status; + u16 bfad_num; + u16 maxfrsize; +}; + +struct bfa_bsg_port_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 buf_size; + u32 rsvd1; + u64 buf_ptr; +}; + +struct bfa_bsg_lport_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + struct bfa_lport_attr_s port_attr; +}; + +struct bfa_bsg_lport_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + struct bfa_lport_stats_s port_stats; +}; + +struct bfa_bsg_lport_iostats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + struct bfa_itnim_iostats_s iostats; +}; + +struct bfa_bsg_lport_get_rports_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + u64 rbuf_ptr; + u32 nrports; + u32 rsvd; +}; + +struct bfa_bsg_rport_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + wwn_t rpwwn; + struct bfa_rport_attr_s attr; +}; + +struct bfa_bsg_rport_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + wwn_t rpwwn; + struct bfa_rport_stats_s stats; +}; + +struct bfa_bsg_rport_scsi_addr_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + wwn_t rpwwn; + u32 host; + u32 bus; + u32 target; + u32 lun; +}; + +struct bfa_bsg_rport_reset_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + wwn_t rpwwn; +}; + +struct bfa_bsg_rport_set_speed_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + enum bfa_port_speed speed; + u32 rsvd; + wwn_t pwwn; + wwn_t rpwwn; +}; + +struct bfa_bsg_vport_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t vpwwn; + struct bfa_vport_attr_s vport_attr; +}; + +struct bfa_bsg_vport_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t vpwwn; + struct bfa_vport_stats_s vport_stats; +}; + +struct bfa_bsg_reset_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t vpwwn; +}; + +struct bfa_bsg_fabric_get_lports_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + u64 buf_ptr; + u32 nports; + u32 rsvd; +}; + +struct bfa_bsg_trl_speed_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + enum bfa_port_speed speed; +}; + +struct bfa_bsg_fcpim_s { + bfa_status_t status; + u16 bfad_num; + u16 param; +}; + +struct bfa_bsg_fcpim_modstats_s { + bfa_status_t status; + u16 bfad_num; + struct bfa_itnim_iostats_s modstats; +}; + +struct bfa_bsg_fcpim_del_itn_stats_s { + bfa_status_t status; + u16 bfad_num; + struct bfa_fcpim_del_itn_stats_s modstats; +}; + +struct bfa_bsg_fcpim_modstatsclr_s { + bfa_status_t status; + u16 bfad_num; +}; + +struct bfa_bsg_itnim_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t lpwwn; + wwn_t rpwwn; + struct bfa_itnim_attr_s attr; +}; + +struct bfa_bsg_itnim_iostats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t lpwwn; + wwn_t rpwwn; + struct bfa_itnim_iostats_s iostats; +}; + +struct bfa_bsg_itnim_itnstats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t lpwwn; + wwn_t rpwwn; + struct bfa_itnim_stats_s itnstats; +}; + +struct bfa_bsg_pcifn_cfg_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_ablk_cfg_s pcifn_cfg; +}; + +struct bfa_bsg_pcifn_s { + bfa_status_t status; + u16 bfad_num; + u16 pcifn_id; + u32 bandwidth; + u8 port; + enum bfi_pcifn_class pcifn_class; + u8 rsvd[1]; +}; + +struct bfa_bsg_adapter_cfg_mode_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_adapter_cfg_mode_s cfg; +}; + +struct bfa_bsg_port_cfg_mode_s { + bfa_status_t status; + u16 bfad_num; + u16 instance; + struct bfa_port_cfg_mode_s cfg; +}; + +struct bfa_bsg_faa_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_faa_attr_s faa_attr; +}; + +struct bfa_bsg_cee_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 buf_size; + u32 rsvd1; + u64 buf_ptr; +}; + +struct bfa_bsg_cee_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 buf_size; + u32 rsvd1; + u64 buf_ptr; +}; + +struct bfa_bsg_sfp_media_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + enum bfa_defs_sfp_media_e media; +}; + +struct bfa_bsg_sfp_speed_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + enum bfa_port_speed speed; +}; + +struct bfa_bsg_flash_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_flash_attr_s attr; +}; + +struct bfa_bsg_flash_s { + bfa_status_t status; + u16 bfad_num; + u8 instance; + u8 rsvd; + enum bfa_flash_part_type type; + int bufsz; + u64 buf_ptr; +}; + +struct bfa_bsg_diag_get_temp_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_diag_results_tempsensor_s result; +}; + +struct bfa_bsg_diag_memtest_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd[3]; + u32 pat; + struct bfa_diag_memtest_result result; + struct bfa_diag_memtest_s memtest; +}; + +struct bfa_bsg_diag_loopback_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + enum bfa_port_opmode opmode; + enum bfa_port_speed speed; + u32 lpcnt; + u32 pat; + struct bfa_diag_loopback_result_s result; +}; + +struct bfa_bsg_diag_fwping_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 cnt; + u32 pattern; + struct bfa_diag_results_fwping result; +}; + +struct bfa_bsg_diag_qtest_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 force; + u32 queue; + struct bfa_diag_qtest_result_s result; +}; + +struct bfa_bsg_sfp_show_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct sfp_mem_s sfp; +}; + +struct bfa_bsg_diag_led_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_diag_ledtest_s ledtest; +}; + +struct bfa_bsg_diag_beacon_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + bfa_boolean_t beacon; + bfa_boolean_t link_e2e_beacon; + u32 second; +}; + +struct bfa_bsg_diag_lb_stat_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; +}; + +struct bfa_bsg_phy_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 instance; + struct bfa_phy_attr_s attr; +}; + +struct bfa_bsg_phy_s { + bfa_status_t status; + u16 bfad_num; + u16 instance; + u64 bufsz; + u64 buf_ptr; +}; + +struct bfa_bsg_debug_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u32 bufsz; + int inst_no; + u64 buf_ptr; + u64 offset; +}; + +struct bfa_bsg_phy_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 instance; + struct bfa_phy_stats_s stats; +}; + +struct bfa_bsg_vhba_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 pcifn_id; + struct bfa_vhba_attr_s attr; +}; + +struct bfa_bsg_boot_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_boot_cfg_s cfg; +}; + +struct bfa_bsg_preboot_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_boot_pbc_s cfg; +}; + +struct bfa_bsg_ethboot_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_ethboot_cfg_s cfg; +}; + +struct bfa_bsg_trunk_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_trunk_attr_s attr; +}; + +struct bfa_bsg_qos_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_qos_attr_s attr; +}; + +struct bfa_bsg_qos_vc_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_qos_vc_attr_s attr; +}; + +struct bfa_bsg_vf_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + struct bfa_vf_stats_s stats; +}; + +struct bfa_bsg_vf_reset_stats_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; +}; + +struct bfa_bsg_fcpim_lunmask_query_s { + bfa_status_t status; + u16 bfad_num; + struct bfa_lunmask_cfg_s lun_mask; +}; + +struct bfa_bsg_fcpim_lunmask_s { + bfa_status_t status; + u16 bfad_num; + u16 vf_id; + wwn_t pwwn; + wwn_t rpwwn; + struct scsi_lun lun; +}; + +struct bfa_bsg_fcpt_s { + bfa_status_t status; + u16 vf_id; + wwn_t lpwwn; + wwn_t dpwwn; + u32 tsecs; + int cts; + enum fc_cos cos; + struct fchs_s fchs; +}; +#define bfa_bsg_fcpt_t struct bfa_bsg_fcpt_s + +struct bfa_bsg_data { + int payload_len; + void *payload; +}; + +#define bfad_chk_iocmd_sz(__payload_len, __hdrsz, __bufsz) \ + (((__payload_len) != ((__hdrsz) + (__bufsz))) ? \ + BFA_STATUS_FAILED : BFA_STATUS_OK) + +#endif /* BFAD_BSG_H */ diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c index 48be0c54f2d..b412e0300dd 100644 --- a/drivers/scsi/bfa/bfad_debugfs.c +++ b/drivers/scsi/bfa/bfad_debugfs.c @@ -214,10 +214,10 @@ bfad_debugfs_read(struct file *file, char __user *buf, #define BFA_REG_CT_ADDRSZ (0x40000) #define BFA_REG_CB_ADDRSZ (0x20000) -#define BFA_REG_ADDRSZ(__bfa) \ - ((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ? \ - BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ) -#define BFA_REG_ADDRMSK(__bfa) ((u32)(BFA_REG_ADDRSZ(__bfa) - 1)) +#define BFA_REG_ADDRSZ(__ioc) \ + ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \ + BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)) +#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1) static bfa_status_t bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len) @@ -236,7 +236,7 @@ bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len) return BFA_STATUS_EINVAL; } else { /* CB register space 64KB */ - if ((offset + (len<<2)) > BFA_REG_ADDRMSK(bfa)) + if ((offset + (len<<2)) > BFA_REG_ADDRMSK(&bfa->ioc)) return BFA_STATUS_EINVAL; } return BFA_STATUS_OK; @@ -317,7 +317,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf, bfad->reglen = len << 2; rb = bfa_ioc_bar0(ioc); - addr &= BFA_REG_ADDRMSK(bfa); + addr &= BFA_REG_ADDRMSK(ioc); /* offset and len sanity check */ rc = bfad_reg_offset_check(bfa, addr, len); @@ -380,7 +380,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf, } kfree(kern_buf); - addr &= BFA_REG_ADDRMSK(bfa); /* offset only 17 bit and word align */ + addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */ /* offset and len sanity check */ rc = bfad_reg_offset_check(bfa, addr, 1); diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index 7f9ea90254c..bda999ad9f5 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -43,6 +43,7 @@ #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> #include <scsi/scsi_transport.h> +#include <scsi/scsi_bsg_fc.h> #include "bfa_modules.h" #include "bfa_fcs.h" @@ -55,7 +56,7 @@ #ifdef BFA_DRIVER_VERSION #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION #else -#define BFAD_DRIVER_VERSION "2.3.2.3" +#define BFAD_DRIVER_VERSION "3.0.2.2" #endif #define BFAD_PROTO_NAME FCPI_NAME @@ -79,7 +80,7 @@ #define BFAD_HAL_INIT_FAIL 0x00000100 #define BFAD_FC4_PROBE_DONE 0x00000200 #define BFAD_PORT_DELETE 0x00000001 - +#define BFAD_INTX_ON 0x00000400 /* * BFAD related definition */ @@ -92,6 +93,8 @@ */ #define BFAD_LUN_QUEUE_DEPTH 32 #define BFAD_IO_MAX_SGE SG_ALL +#define BFAD_MIN_SECTORS 128 /* 64k */ +#define BFAD_MAX_SECTORS 0xFFFF /* 32 MB */ #define bfad_isr_t irq_handler_t @@ -110,6 +113,7 @@ struct bfad_msix_s { enum { BFA_TRC_LDRV_BFAD = 1, BFA_TRC_LDRV_IM = 2, + BFA_TRC_LDRV_BSG = 3, }; enum bfad_port_pvb_type { @@ -189,8 +193,10 @@ struct bfad_s { struct bfa_pcidev_s hal_pcidev; struct bfa_ioc_pci_attr_s pci_attr; void __iomem *pci_bar0_kva; + void __iomem *pci_bar2_kva; struct completion comp; struct completion suspend; + struct completion enable_comp; struct completion disable_comp; bfa_boolean_t disable_active; struct bfad_port_s pport; /* physical port of the BFAD */ @@ -218,6 +224,10 @@ struct bfad_s { char *regdata; u32 reglen; struct dentry *bfad_dentry_files[5]; + struct list_head free_aen_q; + struct list_head active_aen_q; + struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY]; + spinlock_t bfad_aen_spinlock; }; /* BFAD state machine events */ @@ -273,21 +283,6 @@ struct bfad_hal_comp { struct completion comp; }; -/* - * Macro to obtain the immediate lower power - * of two for the integer. - */ -#define nextLowerInt(x) \ -do { \ - int __i; \ - (*x)--; \ - for (__i = 1; __i < (sizeof(int)*8); __i <<= 1) \ - (*x) = (*x) | (*x) >> __i; \ - (*x)++; \ - (*x) = (*x) >> 1; \ -} while (0) - - #define BFA_LOG(level, bfad, mask, fmt, arg...) \ do { \ if (((mask) == 4) || (level[1] <= '4')) \ @@ -354,6 +349,7 @@ extern int msix_disable_ct; extern int fdmi_enable; extern int supported_fc4s; extern int pcie_max_read_reqsz; +extern int max_xfer_size; extern int bfa_debugfs_enable; extern struct mutex bfad_mutex; diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index c2b36179e8e..01312381639 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -175,21 +175,11 @@ bfad_im_info(struct Scsi_Host *shost) struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - struct bfa_s *bfa = &bfad->bfa; - struct bfa_ioc_s *ioc = &bfa->ioc; - char model[BFA_ADAPTER_MODEL_NAME_LEN]; - - bfa_get_adapter_model(bfa, model); memset(bfa_buf, 0, sizeof(bfa_buf)); - if (ioc->ctdev && !ioc->fcmode) - snprintf(bfa_buf, sizeof(bfa_buf), - "Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s", - model, bfad->pci_name, BFAD_DRIVER_VERSION); - else - snprintf(bfa_buf, sizeof(bfa_buf), - "Brocade FC Adapter, " "model: %s hwpath: %s driver: %s", - model, bfad->pci_name, BFAD_DRIVER_VERSION); + snprintf(bfa_buf, sizeof(bfa_buf), + "Brocade FC/FCOE Adapter, " "hwpath: %s driver: %s", + bfad->pci_name, BFAD_DRIVER_VERSION); return bfa_buf; } @@ -572,9 +562,6 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, goto out_fc_rel; } - /* setup host fixed attribute if the lk supports */ - bfad_fc_host_init(im_port); - return 0; out_fc_rel: @@ -669,6 +656,31 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port) spin_unlock_irqrestore(&bfad->bfad_lock, flags); } +static void bfad_aen_im_notify_handler(struct work_struct *work) +{ + struct bfad_im_s *im = + container_of(work, struct bfad_im_s, aen_im_notify_work); + struct bfa_aen_entry_s *aen_entry; + struct bfad_s *bfad = im->bfad; + struct Scsi_Host *shost = bfad->pport.im_port->shost; + void *event_data; + unsigned long flags; + + while (!list_empty(&bfad->active_aen_q)) { + spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); + bfa_q_deq(&bfad->active_aen_q, &aen_entry); + spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); + event_data = (char *)aen_entry + sizeof(struct list_head); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(struct bfa_aen_entry_s) - + sizeof(struct list_head), + (char *)event_data, BFAD_NL_VENDOR_ID); + spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); + list_add_tail(&aen_entry->qe, &bfad->free_aen_q); + spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); + } +} + bfa_status_t bfad_im_probe(struct bfad_s *bfad) { @@ -689,6 +701,7 @@ bfad_im_probe(struct bfad_s *bfad) rc = BFA_STATUS_FAILED; } + INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler); ext: return rc; } @@ -713,6 +726,9 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) else sht = &bfad_im_vport_template; + if (max_xfer_size != BFAD_MAX_SECTORS >> 1) + sht->max_sectors = max_xfer_size << 1; + sht->sg_tablesize = bfad->cfg_data.io_max_sge; return scsi_host_alloc(sht, sizeof(unsigned long)); @@ -790,7 +806,8 @@ struct scsi_host_template bfad_im_scsi_host_template = { .cmd_per_lun = 3, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = bfad_im_host_attrs, - .max_sectors = 0xFFFF, + .max_sectors = BFAD_MAX_SECTORS, + .vendor_id = BFA_PCI_VENDOR_ID_BROCADE, }; struct scsi_host_template bfad_im_vport_template = { @@ -811,7 +828,7 @@ struct scsi_host_template bfad_im_vport_template = { .cmd_per_lun = 3, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = bfad_im_vport_attrs, - .max_sectors = 0xFFFF, + .max_sectors = BFAD_MAX_SECTORS, }; bfa_status_t @@ -925,7 +942,10 @@ bfad_im_supported_speeds(struct bfa_s *bfa) return 0; bfa_ioc_get_attr(&bfa->ioc, ioc_attr); - if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { + if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_16GBPS) + supported_speed |= FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT | + FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT; + else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { if (ioc_attr->adapter_attr.is_mezz) { supported_speed |= FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index c296c896851..004b6cf848d 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h @@ -115,8 +115,30 @@ struct bfad_im_s { struct bfad_s *bfad; struct workqueue_struct *drv_workq; char drv_workq_name[KOBJ_NAME_LEN]; + struct work_struct aen_im_notify_work; }; +#define bfad_get_aen_entry(_drv, _entry) do { \ + unsigned long _flags; \ + spin_lock_irqsave(&(_drv)->bfad_aen_spinlock, _flags); \ + bfa_q_deq(&(_drv)->free_aen_q, &(_entry)); \ + if (_entry) \ + list_add_tail(&(_entry)->qe, &(_drv)->active_aen_q); \ + spin_unlock_irqrestore(&(_drv)->bfad_aen_spinlock, _flags); \ +} while (0) + +/* post fc_host vendor event */ +#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do { \ + do_gettimeofday(&(_entry)->aen_tv); \ + (_entry)->bfad_num = (_drv)->inst_no; \ + (_entry)->seq_num = (_cnt); \ + (_entry)->aen_category = (_cat); \ + (_entry)->aen_type = (_evt); \ + if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE) \ + queue_work((_drv)->im->drv_workq, \ + &(_drv)->im->aen_im_notify_work); \ +} while (0) + struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *); bfa_status_t bfad_thread_workq(struct bfad_s *bfad); @@ -141,4 +163,7 @@ extern struct device_attribute *bfad_im_vport_attrs[]; irqreturn_t bfad_intx(int irq, void *dev_id); +int bfad_im_bsg_request(struct fc_bsg_job *job); +int bfad_im_bsg_timeout(struct fc_bsg_job *job); + #endif diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h index 72b69a0c3b5..b2ba0b2e91b 100644 --- a/drivers/scsi/bfa/bfi.h +++ b/drivers/scsi/bfa/bfi.h @@ -23,17 +23,29 @@ #pragma pack(1) +/* Per dma segment max size */ +#define BFI_MEM_DMA_SEG_SZ (131072) + +/* Get number of dma segments required */ +#define BFI_MEM_DMA_NSEGS(_num_reqs, _req_sz) \ + ((u16)(((((_num_reqs) * (_req_sz)) + BFI_MEM_DMA_SEG_SZ - 1) & \ + ~(BFI_MEM_DMA_SEG_SZ - 1)) / BFI_MEM_DMA_SEG_SZ)) + +/* Get num dma reqs - that fit in a segment */ +#define BFI_MEM_NREQS_SEG(_rqsz) (BFI_MEM_DMA_SEG_SZ / (_rqsz)) + +/* Get segment num from tag */ +#define BFI_MEM_SEG_FROM_TAG(_tag, _rqsz) ((_tag) / BFI_MEM_NREQS_SEG(_rqsz)) + +/* Get dma req offset in a segment */ +#define BFI_MEM_SEG_REQ_OFFSET(_tag, _sz) \ + ((_tag) - (BFI_MEM_SEG_FROM_TAG(_tag, _sz) * BFI_MEM_NREQS_SEG(_sz))) + /* * BFI FW image type */ #define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */ #define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) -enum { - BFI_IMAGE_CB_FC, - BFI_IMAGE_CT_FC, - BFI_IMAGE_CT_CNA, - BFI_IMAGE_MAX, -}; /* * Msg header common to all msgs @@ -43,17 +55,20 @@ struct bfi_mhdr_s { u8 msg_id; /* msg opcode with in the class */ union { struct { - u8 rsvd; - u8 lpu_id; /* msg destination */ + u8 qid; + u8 fn_lpu; /* msg destination */ } h2i; u16 i2htok; /* token in msgs to host */ } mtag; }; -#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \ +#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu)) +#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1) + +#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do { \ (_mh).msg_class = (_mc); \ (_mh).msg_id = (_op); \ - (_mh).mtag.h2i.lpu_id = (_lpuid); \ + (_mh).mtag.h2i.fn_lpu = (_fn_lpu); \ } while (0) #define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \ @@ -101,7 +116,7 @@ union bfi_addr_u { }; /* - * Scatter Gather Element + * Scatter Gather Element used for fast-path IO requests */ struct bfi_sge_s { #ifdef __BIG_ENDIAN @@ -116,6 +131,14 @@ struct bfi_sge_s { union bfi_addr_u sga; }; +/** + * Generic DMA addr-len pair. + */ +struct bfi_alen_s { + union bfi_addr_u al_addr; /* DMA addr of buffer */ + u32 al_len; /* length of buffer */ +}; + /* * Scatter Gather Page */ @@ -127,6 +150,12 @@ struct bfi_sgpg_s { u32 rsvd[BFI_SGPG_RSVD_WD_LEN]; }; +/* FCP module definitions */ +#define BFI_IO_MAX (2000) +#define BFI_IOIM_SNSLEN (256) +#define BFI_IOIM_SNSBUF_SEGS \ + BFI_MEM_DMA_NSEGS(BFI_IO_MAX, BFI_IOIM_SNSLEN) + /* * Large Message structure - 128 Bytes size Msgs */ @@ -149,18 +178,29 @@ struct bfi_mbmsg_s { }; /* + * Supported PCI function class codes (personality) + */ +enum bfi_pcifn_class { + BFI_PCIFN_CLASS_FC = 0x0c04, + BFI_PCIFN_CLASS_ETH = 0x0200, +}; + +/* * Message Classes */ enum bfi_mclass { BFI_MC_IOC = 1, /* IO Controller (IOC) */ + BFI_MC_DIAG = 2, /* Diagnostic Msgs */ + BFI_MC_FLASH = 3, /* Flash message class */ + BFI_MC_CEE = 4, /* CEE */ BFI_MC_FCPORT = 5, /* FC port */ BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */ - BFI_MC_LL = 7, /* Link Layer */ + BFI_MC_ABLK = 7, /* ASIC block configuration */ BFI_MC_UF = 8, /* Unsolicited frame receive */ BFI_MC_FCXP = 9, /* FC Transport */ BFI_MC_LPS = 10, /* lport fc login services */ BFI_MC_RPORT = 11, /* Remote port */ - BFI_MC_ITNIM = 12, /* I-T nexus (Initiator mode) */ + BFI_MC_ITN = 12, /* I-T nexus (Initiator mode) */ BFI_MC_IOIM_READ = 13, /* read IO (Initiator mode) */ BFI_MC_IOIM_WRITE = 14, /* write IO (Initiator mode) */ BFI_MC_IOIM_IO = 15, /* IO (Initiator mode) */ @@ -168,6 +208,8 @@ enum bfi_mclass { BFI_MC_IOIM_IOCOM = 17, /* good IO completion */ BFI_MC_TSKIM = 18, /* Initiator Task management */ BFI_MC_PORT = 21, /* Physical port */ + BFI_MC_SFP = 22, /* SFP module */ + BFI_MC_PHY = 25, /* External PHY message class */ BFI_MC_MAX = 32 }; @@ -175,23 +217,28 @@ enum bfi_mclass { #define BFI_IOC_MAX_CQS_ASIC 8 #define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */ -#define BFI_BOOT_TYPE_OFF 8 -#define BFI_BOOT_LOADER_OFF 12 - -#define BFI_BOOT_TYPE_NORMAL 0 -#define BFI_BOOT_TYPE_FLASH 1 -#define BFI_BOOT_TYPE_MEMTEST 2 - -#define BFI_BOOT_LOADER_OS 0 -#define BFI_BOOT_LOADER_BIOS 1 -#define BFI_BOOT_LOADER_UEFI 2 - /* *---------------------------------------------------------------------- * IOC *---------------------------------------------------------------------- */ +/* + * Different asic generations + */ +enum bfi_asic_gen { + BFI_ASIC_GEN_CB = 1, /* crossbow 8G FC */ + BFI_ASIC_GEN_CT = 2, /* catapult 8G FC or 10G CNA */ + BFI_ASIC_GEN_CT2 = 3, /* catapult-2 16G FC or 10G CNA */ +}; + +enum bfi_asic_mode { + BFI_ASIC_MODE_FC = 1, /* FC upto 8G speed */ + BFI_ASIC_MODE_FC16 = 2, /* FC upto 16G speed */ + BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */ + BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */ +}; + enum bfi_ioc_h2i_msgs { BFI_IOC_H2I_ENABLE_REQ = 1, BFI_IOC_H2I_DISABLE_REQ = 2, @@ -204,8 +251,8 @@ enum bfi_ioc_i2h_msgs { BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1), BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2), BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3), - BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4), - BFI_IOC_I2H_HBEAT = BFA_I2HM(5), + BFI_IOC_I2H_HBEAT = BFA_I2HM(4), + BFI_IOC_I2H_ACQ_ADDR_REPLY = BFA_I2HM(5), }; /* @@ -220,7 +267,8 @@ struct bfi_ioc_attr_s { wwn_t mfg_pwwn; /* Mfg port wwn */ wwn_t mfg_nwwn; /* Mfg node wwn */ mac_t mfg_mac; /* Mfg mac */ - u16 rsvd_a; + u8 port_mode; /* bfi_port_mode */ + u8 rsvd_a; wwn_t pwwn; wwn_t nwwn; mac_t mac; /* PBC or Mfg mac */ @@ -272,21 +320,33 @@ struct bfi_ioc_getattr_reply_s { #define BFI_IOC_FW_SIGNATURE (0xbfadbfad) #define BFI_IOC_MD5SUM_SZ 4 struct bfi_ioc_image_hdr_s { - u32 signature; /* constant signature */ - u32 rsvd_a; - u32 exec; /* exec vector */ - u32 param; /* parameters */ + u32 signature; /* constant signature */ + u8 asic_gen; /* asic generation */ + u8 asic_mode; + u8 port0_mode; /* device mode for port 0 */ + u8 port1_mode; /* device mode for port 1 */ + u32 exec; /* exec vector */ + u32 bootenv; /* fimware boot env */ u32 rsvd_b[4]; u32 md5sum[BFI_IOC_MD5SUM_SZ]; }; -/* - * BFI_IOC_I2H_READY_EVENT message - */ -struct bfi_ioc_rdy_event_s { - struct bfi_mhdr_s mh; /* common msg header */ - u8 init_status; /* init event status */ - u8 rsvd[3]; +#define BFI_FWBOOT_DEVMODE_OFF 4 +#define BFI_FWBOOT_TYPE_OFF 8 +#define BFI_FWBOOT_ENV_OFF 12 +#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \ + (((u32)(__asic_gen)) << 24 | \ + ((u32)(__asic_mode)) << 16 | \ + ((u32)(__p0_mode)) << 8 | \ + ((u32)(__p1_mode))) + +#define BFI_FWBOOT_TYPE_NORMAL 0 +#define BFI_FWBOOT_TYPE_MEMTEST 2 +#define BFI_FWBOOT_ENV_OS 0 + +enum bfi_port_mode { + BFI_PORT_MODE_FC = 1, + BFI_PORT_MODE_ETH = 2, }; struct bfi_ioc_hbeat_s { @@ -345,8 +405,8 @@ enum { */ struct bfi_ioc_ctrl_req_s { struct bfi_mhdr_s mh; - u8 ioc_class; - u8 rsvd[3]; + u16 clscode; + u16 rsvd; u32 tv_sec; }; #define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s; @@ -358,7 +418,9 @@ struct bfi_ioc_ctrl_req_s { struct bfi_ioc_ctrl_reply_s { struct bfi_mhdr_s mh; /* Common msg header */ u8 status; /* enable/disable status */ - u8 rsvd[3]; + u8 port_mode; /* bfa_mode_s */ + u8 cap_bm; /* capability bit mask */ + u8 rsvd; }; #define bfi_ioc_enable_reply_t struct bfi_ioc_ctrl_reply_s; #define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s; @@ -380,7 +442,7 @@ union bfi_ioc_h2i_msg_u { */ union bfi_ioc_i2h_msg_u { struct bfi_mhdr_s mh; - struct bfi_ioc_rdy_event_s rdy_event; + struct bfi_ioc_ctrl_reply_s fw_event; u32 mboxmsg[BFI_IOC_MSGSZ]; }; @@ -393,6 +455,7 @@ union bfi_ioc_i2h_msg_u { #define BFI_PBC_MAX_BLUNS 8 #define BFI_PBC_MAX_VPORTS 16 +#define BFI_PBC_PORT_DISABLED 2 /* * PBC boot lun configuration @@ -574,6 +637,516 @@ union bfi_port_i2h_msg_u { struct bfi_port_generic_rsp_s clearstats_rsp; }; +/* + *---------------------------------------------------------------------- + * ABLK + *---------------------------------------------------------------------- + */ +enum bfi_ablk_h2i_msgs_e { + BFI_ABLK_H2I_QUERY = 1, + BFI_ABLK_H2I_ADPT_CONFIG = 2, + BFI_ABLK_H2I_PORT_CONFIG = 3, + BFI_ABLK_H2I_PF_CREATE = 4, + BFI_ABLK_H2I_PF_DELETE = 5, + BFI_ABLK_H2I_PF_UPDATE = 6, + BFI_ABLK_H2I_OPTROM_ENABLE = 7, + BFI_ABLK_H2I_OPTROM_DISABLE = 8, +}; + +enum bfi_ablk_i2h_msgs_e { + BFI_ABLK_I2H_QUERY = BFA_I2HM(BFI_ABLK_H2I_QUERY), + BFI_ABLK_I2H_ADPT_CONFIG = BFA_I2HM(BFI_ABLK_H2I_ADPT_CONFIG), + BFI_ABLK_I2H_PORT_CONFIG = BFA_I2HM(BFI_ABLK_H2I_PORT_CONFIG), + BFI_ABLK_I2H_PF_CREATE = BFA_I2HM(BFI_ABLK_H2I_PF_CREATE), + BFI_ABLK_I2H_PF_DELETE = BFA_I2HM(BFI_ABLK_H2I_PF_DELETE), + BFI_ABLK_I2H_PF_UPDATE = BFA_I2HM(BFI_ABLK_H2I_PF_UPDATE), + BFI_ABLK_I2H_OPTROM_ENABLE = BFA_I2HM(BFI_ABLK_H2I_OPTROM_ENABLE), + BFI_ABLK_I2H_OPTROM_DISABLE = BFA_I2HM(BFI_ABLK_H2I_OPTROM_DISABLE), +}; + +/* BFI_ABLK_H2I_QUERY */ +struct bfi_ablk_h2i_query_s { + struct bfi_mhdr_s mh; + union bfi_addr_u addr; +}; + +/* BFI_ABL_H2I_ADPT_CONFIG, BFI_ABLK_H2I_PORT_CONFIG */ +struct bfi_ablk_h2i_cfg_req_s { + struct bfi_mhdr_s mh; + u8 mode; + u8 port; + u8 max_pf; + u8 max_vf; +}; + +/* + * BFI_ABLK_H2I_PF_CREATE, BFI_ABLK_H2I_PF_DELETE, + */ +struct bfi_ablk_h2i_pf_req_s { + struct bfi_mhdr_s mh; + u8 pcifn; + u8 port; + u16 pers; + u32 bw; +}; + +/* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */ +struct bfi_ablk_h2i_optrom_s { + struct bfi_mhdr_s mh; +}; + +/* + * BFI_ABLK_I2H_QUERY + * BFI_ABLK_I2H_PORT_CONFIG + * BFI_ABLK_I2H_PF_CREATE + * BFI_ABLK_I2H_PF_DELETE + * BFI_ABLK_I2H_PF_UPDATE + * BFI_ABLK_I2H_OPTROM_ENABLE + * BFI_ABLK_I2H_OPTROM_DISABLE + */ +struct bfi_ablk_i2h_rsp_s { + struct bfi_mhdr_s mh; + u8 status; + u8 pcifn; + u8 port_mode; +}; + + +/* + * CEE module specific messages + */ + +/* Mailbox commands from host to firmware */ +enum bfi_cee_h2i_msgs_e { + BFI_CEE_H2I_GET_CFG_REQ = 1, + BFI_CEE_H2I_RESET_STATS = 2, + BFI_CEE_H2I_GET_STATS_REQ = 3, +}; + +enum bfi_cee_i2h_msgs_e { + BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1), + BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2), + BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3), +}; + +/* + * H2I command structure for resetting the stats + */ +struct bfi_cee_reset_stats_s { + struct bfi_mhdr_s mh; +}; + +/* + * Get configuration command from host + */ +struct bfi_cee_get_req_s { + struct bfi_mhdr_s mh; + union bfi_addr_u dma_addr; +}; + +/* + * Reply message from firmware + */ +struct bfi_cee_get_rsp_s { + struct bfi_mhdr_s mh; + u8 cmd_status; + u8 rsvd[3]; +}; + +/* + * Reply message from firmware + */ +struct bfi_cee_stats_rsp_s { + struct bfi_mhdr_s mh; + u8 cmd_status; + u8 rsvd[3]; +}; + +/* Mailbox message structures from firmware to host */ +union bfi_cee_i2h_msg_u { + struct bfi_mhdr_s mh; + struct bfi_cee_get_rsp_s get_rsp; + struct bfi_cee_stats_rsp_s stats_rsp; +}; + +/* + * SFP related + */ + +enum bfi_sfp_h2i_e { + BFI_SFP_H2I_SHOW = 1, + BFI_SFP_H2I_SCN = 2, +}; + +enum bfi_sfp_i2h_e { + BFI_SFP_I2H_SHOW = BFA_I2HM(BFI_SFP_H2I_SHOW), + BFI_SFP_I2H_SCN = BFA_I2HM(BFI_SFP_H2I_SCN), +}; + +/* + * SFP state change notification + */ +struct bfi_sfp_scn_s { + struct bfi_mhdr_s mhr; /* host msg header */ + u8 event; + u8 sfpid; + u8 pomlvl; /* pom level: normal/warning/alarm */ + u8 is_elb; /* e-loopback */ +}; + +/* + * SFP state + */ +enum bfa_sfp_stat_e { + BFA_SFP_STATE_INIT = 0, /* SFP state is uninit */ + BFA_SFP_STATE_REMOVED = 1, /* SFP is removed */ + BFA_SFP_STATE_INSERTED = 2, /* SFP is inserted */ + BFA_SFP_STATE_VALID = 3, /* SFP is valid */ + BFA_SFP_STATE_UNSUPPORT = 4, /* SFP is unsupport */ + BFA_SFP_STATE_FAILED = 5, /* SFP i2c read fail */ +}; + +/* + * SFP memory access type + */ +enum bfi_sfp_mem_e { + BFI_SFP_MEM_ALL = 0x1, /* access all data field */ + BFI_SFP_MEM_DIAGEXT = 0x2, /* access diag ext data field only */ +}; + +struct bfi_sfp_req_s { + struct bfi_mhdr_s mh; + u8 memtype; + u8 rsvd[3]; + struct bfi_alen_s alen; +}; + +struct bfi_sfp_rsp_s { + struct bfi_mhdr_s mh; + u8 status; + u8 state; + u8 rsvd[2]; +}; + +/* + * FLASH module specific + */ +enum bfi_flash_h2i_msgs { + BFI_FLASH_H2I_QUERY_REQ = 1, + BFI_FLASH_H2I_ERASE_REQ = 2, + BFI_FLASH_H2I_WRITE_REQ = 3, + BFI_FLASH_H2I_READ_REQ = 4, + BFI_FLASH_H2I_BOOT_VER_REQ = 5, +}; + +enum bfi_flash_i2h_msgs { + BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1), + BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2), + BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3), + BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4), + BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5), + BFI_FLASH_I2H_EVENT = BFA_I2HM(127), +}; + +/* + * Flash query request + */ +struct bfi_flash_query_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + struct bfi_alen_s alen; +}; + +/* + * Flash erase request + */ +struct bfi_flash_erase_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; +}; + +/* + * Flash write request + */ +struct bfi_flash_write_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + struct bfi_alen_s alen; + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 last; + u8 rsv[2]; + u32 offset; + u32 length; +}; + +/* + * Flash read request + */ +struct bfi_flash_read_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 offset; + u32 length; + struct bfi_alen_s alen; +}; + +/* + * Flash query response + */ +struct bfi_flash_query_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 status; +}; + +/* + * Flash read response + */ +struct bfi_flash_read_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 status; + u32 length; +}; + +/* + * Flash write response + */ +struct bfi_flash_write_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 status; + u32 length; +}; + +/* + * Flash erase response + */ +struct bfi_flash_erase_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 status; +}; + +/* + * Flash event notification + */ +struct bfi_flash_event_s { + struct bfi_mhdr_s mh; /* Common msg header */ + bfa_status_t status; + u32 param; +}; + +/* + *---------------------------------------------------------------------- + * DIAG + *---------------------------------------------------------------------- + */ +enum bfi_diag_h2i { + BFI_DIAG_H2I_PORTBEACON = 1, + BFI_DIAG_H2I_LOOPBACK = 2, + BFI_DIAG_H2I_FWPING = 3, + BFI_DIAG_H2I_TEMPSENSOR = 4, + BFI_DIAG_H2I_LEDTEST = 5, + BFI_DIAG_H2I_QTEST = 6, +}; + +enum bfi_diag_i2h { + BFI_DIAG_I2H_PORTBEACON = BFA_I2HM(BFI_DIAG_H2I_PORTBEACON), + BFI_DIAG_I2H_LOOPBACK = BFA_I2HM(BFI_DIAG_H2I_LOOPBACK), + BFI_DIAG_I2H_FWPING = BFA_I2HM(BFI_DIAG_H2I_FWPING), + BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR), + BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST), + BFI_DIAG_I2H_QTEST = BFA_I2HM(BFI_DIAG_H2I_QTEST), +}; + +#define BFI_DIAG_MAX_SGES 2 +#define BFI_DIAG_DMA_BUF_SZ (2 * 1024) +#define BFI_BOOT_MEMTEST_RES_ADDR 0x900 +#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3 + +struct bfi_diag_lb_req_s { + struct bfi_mhdr_s mh; + u32 loopcnt; + u32 pattern; + u8 lb_mode; /*!< bfa_port_opmode_t */ + u8 speed; /*!< bfa_port_speed_t */ + u8 rsvd[2]; +}; + +struct bfi_diag_lb_rsp_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + struct bfa_diag_loopback_result_s res; /* 16 bytes */ +}; + +struct bfi_diag_fwping_req_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + struct bfi_alen_s alen; /* 12 bytes */ + u32 data; /* user input data pattern */ + u32 count; /* user input dma count */ + u8 qtag; /* track CPE vc */ + u8 rsv[3]; +}; + +struct bfi_diag_fwping_rsp_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + u32 data; /* user input data pattern */ + u8 qtag; /* track CPE vc */ + u8 dma_status; /* dma status */ + u8 rsv[2]; +}; + +/* + * Temperature Sensor + */ +struct bfi_diag_ts_req_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + u16 temp; /* 10-bit A/D value */ + u16 brd_temp; /* 9-bit board temp */ + u8 status; + u8 ts_junc; /* show junction tempsensor */ + u8 ts_brd; /* show board tempsensor */ + u8 rsv; +}; +#define bfi_diag_ts_rsp_t struct bfi_diag_ts_req_s + +struct bfi_diag_ledtest_req_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + u8 cmd; + u8 color; + u8 portid; + u8 led; /* bitmap of LEDs to be tested */ + u16 freq; /* no. of blinks every 10 secs */ + u8 rsv[2]; +}; + +/* notify host led operation is done */ +struct bfi_diag_ledtest_rsp_s { + struct bfi_mhdr_s mh; /* 4 bytes */ +}; + +struct bfi_diag_portbeacon_req_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + u32 period; /* beaconing period */ + u8 beacon; /* 1: beacon on */ + u8 rsvd[3]; +}; + +/* notify host the beacon is off */ +struct bfi_diag_portbeacon_rsp_s { + struct bfi_mhdr_s mh; /* 4 bytes */ +}; + +struct bfi_diag_qtest_req_s { + struct bfi_mhdr_s mh; /* 4 bytes */ + u32 data[BFI_LMSG_PL_WSZ]; /* fill up tcm prefetch area */ +}; +#define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s + +/* + * PHY module specific + */ +enum bfi_phy_h2i_msgs_e { + BFI_PHY_H2I_QUERY_REQ = 1, + BFI_PHY_H2I_STATS_REQ = 2, + BFI_PHY_H2I_WRITE_REQ = 3, + BFI_PHY_H2I_READ_REQ = 4, +}; + +enum bfi_phy_i2h_msgs_e { + BFI_PHY_I2H_QUERY_RSP = BFA_I2HM(1), + BFI_PHY_I2H_STATS_RSP = BFA_I2HM(2), + BFI_PHY_I2H_WRITE_RSP = BFA_I2HM(3), + BFI_PHY_I2H_READ_RSP = BFA_I2HM(4), +}; + +/* + * External PHY query request + */ +struct bfi_phy_query_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 instance; + u8 rsv[3]; + struct bfi_alen_s alen; +}; + +/* + * External PHY stats request + */ +struct bfi_phy_stats_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 instance; + u8 rsv[3]; + struct bfi_alen_s alen; +}; + +/* + * External PHY write request + */ +struct bfi_phy_write_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 instance; + u8 last; + u8 rsv[2]; + u32 offset; + u32 length; + struct bfi_alen_s alen; +}; + +/* + * External PHY read request + */ +struct bfi_phy_read_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 instance; + u8 rsv[3]; + u32 offset; + u32 length; + struct bfi_alen_s alen; +}; + +/* + * External PHY query response + */ +struct bfi_phy_query_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 status; +}; + +/* + * External PHY stats response + */ +struct bfi_phy_stats_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 status; +}; + +/* + * External PHY read response + */ +struct bfi_phy_read_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 status; + u32 length; +}; + +/* + * External PHY write response + */ +struct bfi_phy_write_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u32 status; + u32 length; +}; + #pragma pack() #endif /* __BFI_H__ */ diff --git a/drivers/scsi/bfa/bfi_cbreg.h b/drivers/scsi/bfa/bfi_cbreg.h deleted file mode 100644 index 39ad42b66b5..00000000000 --- a/drivers/scsi/bfa/bfi_cbreg.h +++ /dev/null @@ -1,305 +0,0 @@ - -/* - * bfi_cbreg.h crossbow host block register definitions - * - * !!! Do not edit. Auto generated. !!! - */ - -#ifndef __BFI_CBREG_H__ -#define __BFI_CBREG_H__ - - -#define HOSTFN0_INT_STATUS 0x00014000 -#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000 -#define __HOSTFN0_INT_STATUS_LVL_SH 20 -#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH) -#define __HOSTFN0_INT_STATUS_P 0x000fffff -#define HOSTFN0_INT_MSK 0x00014004 -#define HOST_PAGE_NUM_FN0 0x00014008 -#define __HOST_PAGE_NUM_FN 0x000001ff -#define HOSTFN1_INT_STATUS 0x00014100 -#define __HOSTFN1_INT_STAT_LVL_MK 0x00f00000 -#define __HOSTFN1_INT_STAT_LVL_SH 20 -#define __HOSTFN1_INT_STAT_LVL(_v) ((_v) << __HOSTFN1_INT_STAT_LVL_SH) -#define __HOSTFN1_INT_STAT_P 0x000fffff -#define HOSTFN1_INT_MSK 0x00014104 -#define HOST_PAGE_NUM_FN1 0x00014108 -#define APP_PLL_400_CTL_REG 0x00014204 -#define __P_400_PLL_LOCK 0x80000000 -#define __APP_PLL_400_SRAM_USE_100MHZ 0x00100000 -#define __APP_PLL_400_RESET_TIMER_MK 0x000e0000 -#define __APP_PLL_400_RESET_TIMER_SH 17 -#define __APP_PLL_400_RESET_TIMER(_v) ((_v) << __APP_PLL_400_RESET_TIMER_SH) -#define __APP_PLL_400_LOGIC_SOFT_RESET 0x00010000 -#define __APP_PLL_400_CNTLMT0_1_MK 0x0000c000 -#define __APP_PLL_400_CNTLMT0_1_SH 14 -#define __APP_PLL_400_CNTLMT0_1(_v) ((_v) << __APP_PLL_400_CNTLMT0_1_SH) -#define __APP_PLL_400_JITLMT0_1_MK 0x00003000 -#define __APP_PLL_400_JITLMT0_1_SH 12 -#define __APP_PLL_400_JITLMT0_1(_v) ((_v) << __APP_PLL_400_JITLMT0_1_SH) -#define __APP_PLL_400_HREF 0x00000800 -#define __APP_PLL_400_HDIV 0x00000400 -#define __APP_PLL_400_P0_1_MK 0x00000300 -#define __APP_PLL_400_P0_1_SH 8 -#define __APP_PLL_400_P0_1(_v) ((_v) << __APP_PLL_400_P0_1_SH) -#define __APP_PLL_400_Z0_2_MK 0x000000e0 -#define __APP_PLL_400_Z0_2_SH 5 -#define __APP_PLL_400_Z0_2(_v) ((_v) << __APP_PLL_400_Z0_2_SH) -#define __APP_PLL_400_RSEL200500 0x00000010 -#define __APP_PLL_400_ENARST 0x00000008 -#define __APP_PLL_400_BYPASS 0x00000004 -#define __APP_PLL_400_LRESETN 0x00000002 -#define __APP_PLL_400_ENABLE 0x00000001 -#define APP_PLL_212_CTL_REG 0x00014208 -#define __P_212_PLL_LOCK 0x80000000 -#define __APP_PLL_212_RESET_TIMER_MK 0x000e0000 -#define __APP_PLL_212_RESET_TIMER_SH 17 -#define __APP_PLL_212_RESET_TIMER(_v) ((_v) << __APP_PLL_212_RESET_TIMER_SH) -#define __APP_PLL_212_LOGIC_SOFT_RESET 0x00010000 -#define __APP_PLL_212_CNTLMT0_1_MK 0x0000c000 -#define __APP_PLL_212_CNTLMT0_1_SH 14 -#define __APP_PLL_212_CNTLMT0_1(_v) ((_v) << __APP_PLL_212_CNTLMT0_1_SH) -#define __APP_PLL_212_JITLMT0_1_MK 0x00003000 -#define __APP_PLL_212_JITLMT0_1_SH 12 -#define __APP_PLL_212_JITLMT0_1(_v) ((_v) << __APP_PLL_212_JITLMT0_1_SH) -#define __APP_PLL_212_HREF 0x00000800 -#define __APP_PLL_212_HDIV 0x00000400 -#define __APP_PLL_212_P0_1_MK 0x00000300 -#define __APP_PLL_212_P0_1_SH 8 -#define __APP_PLL_212_P0_1(_v) ((_v) << __APP_PLL_212_P0_1_SH) -#define __APP_PLL_212_Z0_2_MK 0x000000e0 -#define __APP_PLL_212_Z0_2_SH 5 -#define __APP_PLL_212_Z0_2(_v) ((_v) << __APP_PLL_212_Z0_2_SH) -#define __APP_PLL_212_RSEL200500 0x00000010 -#define __APP_PLL_212_ENARST 0x00000008 -#define __APP_PLL_212_BYPASS 0x00000004 -#define __APP_PLL_212_LRESETN 0x00000002 -#define __APP_PLL_212_ENABLE 0x00000001 -#define HOST_SEM0_REG 0x00014230 -#define __HOST_SEMAPHORE 0x00000001 -#define HOST_SEM1_REG 0x00014234 -#define HOST_SEM2_REG 0x00014238 -#define HOST_SEM3_REG 0x0001423c -#define HOST_SEM0_INFO_REG 0x00014240 -#define HOST_SEM1_INFO_REG 0x00014244 -#define HOST_SEM2_INFO_REG 0x00014248 -#define HOST_SEM3_INFO_REG 0x0001424c -#define HOSTFN0_LPU0_CMD_STAT 0x00019000 -#define __HOSTFN0_LPU0_MBOX_INFO_MK 0xfffffffe -#define __HOSTFN0_LPU0_MBOX_INFO_SH 1 -#define __HOSTFN0_LPU0_MBOX_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX_INFO_SH) -#define __HOSTFN0_LPU0_MBOX_CMD_STATUS 0x00000001 -#define LPU0_HOSTFN0_CMD_STAT 0x00019008 -#define __LPU0_HOSTFN0_MBOX_INFO_MK 0xfffffffe -#define __LPU0_HOSTFN0_MBOX_INFO_SH 1 -#define __LPU0_HOSTFN0_MBOX_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX_INFO_SH) -#define __LPU0_HOSTFN0_MBOX_CMD_STATUS 0x00000001 -#define HOSTFN1_LPU1_CMD_STAT 0x00019014 -#define __HOSTFN1_LPU1_MBOX_INFO_MK 0xfffffffe -#define __HOSTFN1_LPU1_MBOX_INFO_SH 1 -#define __HOSTFN1_LPU1_MBOX_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX_INFO_SH) -#define __HOSTFN1_LPU1_MBOX_CMD_STATUS 0x00000001 -#define LPU1_HOSTFN1_CMD_STAT 0x0001901c -#define __LPU1_HOSTFN1_MBOX_INFO_MK 0xfffffffe -#define __LPU1_HOSTFN1_MBOX_INFO_SH 1 -#define __LPU1_HOSTFN1_MBOX_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX_INFO_SH) -#define __LPU1_HOSTFN1_MBOX_CMD_STATUS 0x00000001 -#define CPE_Q0_DEPTH 0x00010014 -#define CPE_Q0_PI 0x0001001c -#define CPE_Q0_CI 0x00010020 -#define CPE_Q1_DEPTH 0x00010034 -#define CPE_Q1_PI 0x0001003c -#define CPE_Q1_CI 0x00010040 -#define CPE_Q2_DEPTH 0x00010054 -#define CPE_Q2_PI 0x0001005c -#define CPE_Q2_CI 0x00010060 -#define CPE_Q3_DEPTH 0x00010074 -#define CPE_Q3_PI 0x0001007c -#define CPE_Q3_CI 0x00010080 -#define CPE_Q4_DEPTH 0x00010094 -#define CPE_Q4_PI 0x0001009c -#define CPE_Q4_CI 0x000100a0 -#define CPE_Q5_DEPTH 0x000100b4 -#define CPE_Q5_PI 0x000100bc -#define CPE_Q5_CI 0x000100c0 -#define CPE_Q6_DEPTH 0x000100d4 -#define CPE_Q6_PI 0x000100dc -#define CPE_Q6_CI 0x000100e0 -#define CPE_Q7_DEPTH 0x000100f4 -#define CPE_Q7_PI 0x000100fc -#define CPE_Q7_CI 0x00010100 -#define RME_Q0_DEPTH 0x00011014 -#define RME_Q0_PI 0x0001101c -#define RME_Q0_CI 0x00011020 -#define RME_Q1_DEPTH 0x00011034 -#define RME_Q1_PI 0x0001103c -#define RME_Q1_CI 0x00011040 -#define RME_Q2_DEPTH 0x00011054 -#define RME_Q2_PI 0x0001105c -#define RME_Q2_CI 0x00011060 -#define RME_Q3_DEPTH 0x00011074 -#define RME_Q3_PI 0x0001107c -#define RME_Q3_CI 0x00011080 -#define RME_Q4_DEPTH 0x00011094 -#define RME_Q4_PI 0x0001109c -#define RME_Q4_CI 0x000110a0 -#define RME_Q5_DEPTH 0x000110b4 -#define RME_Q5_PI 0x000110bc -#define RME_Q5_CI 0x000110c0 -#define RME_Q6_DEPTH 0x000110d4 -#define RME_Q6_PI 0x000110dc -#define RME_Q6_CI 0x000110e0 -#define RME_Q7_DEPTH 0x000110f4 -#define RME_Q7_PI 0x000110fc -#define RME_Q7_CI 0x00011100 -#define PSS_CTL_REG 0x00018800 -#define __PSS_I2C_CLK_DIV_MK 0x00030000 -#define __PSS_I2C_CLK_DIV_SH 16 -#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH) -#define __PSS_LMEM_INIT_DONE 0x00001000 -#define __PSS_LMEM_RESET 0x00000200 -#define __PSS_LMEM_INIT_EN 0x00000100 -#define __PSS_LPU1_RESET 0x00000002 -#define __PSS_LPU0_RESET 0x00000001 -#define PSS_ERR_STATUS_REG 0x00018810 -#define __PSS_LMEM1_CORR_ERR 0x00000800 -#define __PSS_LMEM0_CORR_ERR 0x00000400 -#define __PSS_LMEM1_UNCORR_ERR 0x00000200 -#define __PSS_LMEM0_UNCORR_ERR 0x00000100 -#define __PSS_BAL_PERR 0x00000080 -#define __PSS_DIP_IF_ERR 0x00000040 -#define __PSS_IOH_IF_ERR 0x00000020 -#define __PSS_TDS_IF_ERR 0x00000010 -#define __PSS_RDS_IF_ERR 0x00000008 -#define __PSS_SGM_IF_ERR 0x00000004 -#define __PSS_LPU1_RAM_ERR 0x00000002 -#define __PSS_LPU0_RAM_ERR 0x00000001 -#define ERR_SET_REG 0x00018818 -#define __PSS_ERR_STATUS_SET 0x00000fff - - -/* - * These definitions are either in error/missing in spec. Its auto-generated - * from hard coded values in regparse.pl. - */ -#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c -#define __EMPHPOST_AT_4G_SH_FIX 0x00000002 -#define __EMPHPRE_AT_4G_FIX 0x00000003 -#define __SFP_TXRATE_EN_FIX 0x00000100 -#define __SFP_RXRATE_EN_FIX 0x00000080 - - -/* - * These register definitions are auto-generated from hard coded values - * in regparse.pl. - */ -#define HOSTFN0_LPU_MBOX0_0 0x00019200 -#define HOSTFN1_LPU_MBOX0_8 0x00019260 -#define LPU_HOSTFN0_MBOX0_0 0x00019280 -#define LPU_HOSTFN1_MBOX0_8 0x000192e0 - - -/* - * These register mapping definitions are auto-generated from mapping tables - * in regparse.pl. - */ -#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG -#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG -#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG -#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG -#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG -#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG - -#define CPE_Q_DEPTH(__n) \ - (CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH)) -#define CPE_Q_PI(__n) \ - (CPE_Q0_PI + (__n) * (CPE_Q1_PI - CPE_Q0_PI)) -#define CPE_Q_CI(__n) \ - (CPE_Q0_CI + (__n) * (CPE_Q1_CI - CPE_Q0_CI)) -#define RME_Q_DEPTH(__n) \ - (RME_Q0_DEPTH + (__n) * (RME_Q1_DEPTH - RME_Q0_DEPTH)) -#define RME_Q_PI(__n) \ - (RME_Q0_PI + (__n) * (RME_Q1_PI - RME_Q0_PI)) -#define RME_Q_CI(__n) \ - (RME_Q0_CI + (__n) * (RME_Q1_CI - RME_Q0_CI)) - -#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) -#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) -#define CPE_Q_MASK(__q) ((__q) & 0x3) -#define RME_Q_MASK(__q) ((__q) & 0x3) - - -/* - * PCI MSI-X vector defines - */ -enum { - BFA_MSIX_CPE_Q0 = 0, - BFA_MSIX_CPE_Q1 = 1, - BFA_MSIX_CPE_Q2 = 2, - BFA_MSIX_CPE_Q3 = 3, - BFA_MSIX_CPE_Q4 = 4, - BFA_MSIX_CPE_Q5 = 5, - BFA_MSIX_CPE_Q6 = 6, - BFA_MSIX_CPE_Q7 = 7, - BFA_MSIX_RME_Q0 = 8, - BFA_MSIX_RME_Q1 = 9, - BFA_MSIX_RME_Q2 = 10, - BFA_MSIX_RME_Q3 = 11, - BFA_MSIX_RME_Q4 = 12, - BFA_MSIX_RME_Q5 = 13, - BFA_MSIX_RME_Q6 = 14, - BFA_MSIX_RME_Q7 = 15, - BFA_MSIX_ERR_EMC = 16, - BFA_MSIX_ERR_LPU0 = 17, - BFA_MSIX_ERR_LPU1 = 18, - BFA_MSIX_ERR_PSS = 19, - BFA_MSIX_MBOX_LPU0 = 20, - BFA_MSIX_MBOX_LPU1 = 21, - BFA_MSIX_CB_MAX = 22, -}; - -/* - * And corresponding host interrupt status bit field defines - */ -#define __HFN_INT_CPE_Q0 0x00000001U -#define __HFN_INT_CPE_Q1 0x00000002U -#define __HFN_INT_CPE_Q2 0x00000004U -#define __HFN_INT_CPE_Q3 0x00000008U -#define __HFN_INT_CPE_Q4 0x00000010U -#define __HFN_INT_CPE_Q5 0x00000020U -#define __HFN_INT_CPE_Q6 0x00000040U -#define __HFN_INT_CPE_Q7 0x00000080U -#define __HFN_INT_RME_Q0 0x00000100U -#define __HFN_INT_RME_Q1 0x00000200U -#define __HFN_INT_RME_Q2 0x00000400U -#define __HFN_INT_RME_Q3 0x00000800U -#define __HFN_INT_RME_Q4 0x00001000U -#define __HFN_INT_RME_Q5 0x00002000U -#define __HFN_INT_RME_Q6 0x00004000U -#define __HFN_INT_RME_Q7 0x00008000U -#define __HFN_INT_ERR_EMC 0x00010000U -#define __HFN_INT_ERR_LPU0 0x00020000U -#define __HFN_INT_ERR_LPU1 0x00040000U -#define __HFN_INT_ERR_PSS 0x00080000U -#define __HFN_INT_MBOX_LPU0 0x00100000U -#define __HFN_INT_MBOX_LPU1 0x00200000U -#define __HFN_INT_MBOX1_LPU0 0x00400000U -#define __HFN_INT_MBOX1_LPU1 0x00800000U -#define __HFN_INT_CPE_MASK 0x000000ffU -#define __HFN_INT_RME_MASK 0x0000ff00U - - -/* - * crossbow memory map. - */ -#define PSS_SMEM_PAGE_START 0x8000 -#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15)) -#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff) - -/* - * End of crossbow memory map - */ - - -#endif /* __BFI_CBREG_H__ */ - diff --git a/drivers/scsi/bfa/bfi_ctreg.h b/drivers/scsi/bfa/bfi_ctreg.h deleted file mode 100644 index fc4ce4a5a18..00000000000 --- a/drivers/scsi/bfa/bfi_ctreg.h +++ /dev/null @@ -1,636 +0,0 @@ - -/* - * bfi_ctreg.h catapult host block register definitions - * - * !!! Do not edit. Auto generated. !!! - */ - -#ifndef __BFI_CTREG_H__ -#define __BFI_CTREG_H__ - - -#define HOSTFN0_LPU_MBOX0_0 0x00019200 -#define HOSTFN1_LPU_MBOX0_8 0x00019260 -#define LPU_HOSTFN0_MBOX0_0 0x00019280 -#define LPU_HOSTFN1_MBOX0_8 0x000192e0 -#define HOSTFN2_LPU_MBOX0_0 0x00019400 -#define HOSTFN3_LPU_MBOX0_8 0x00019460 -#define LPU_HOSTFN2_MBOX0_0 0x00019480 -#define LPU_HOSTFN3_MBOX0_8 0x000194e0 -#define HOSTFN0_INT_STATUS 0x00014000 -#define __HOSTFN0_HALT_OCCURRED 0x01000000 -#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000 -#define __HOSTFN0_INT_STATUS_LVL_SH 20 -#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH) -#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000 -#define __HOSTFN0_INT_STATUS_P_SH 16 -#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH) -#define __HOSTFN0_INT_STATUS_F 0x0000ffff -#define HOSTFN0_INT_MSK 0x00014004 -#define HOST_PAGE_NUM_FN0 0x00014008 -#define __HOST_PAGE_NUM_FN 0x000001ff -#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c -#define __MSIX_ERR_INDEX_FN 0x000001ff -#define HOSTFN1_INT_STATUS 0x00014100 -#define __HOSTFN1_HALT_OCCURRED 0x01000000 -#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000 -#define __HOSTFN1_INT_STATUS_LVL_SH 20 -#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH) -#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000 -#define __HOSTFN1_INT_STATUS_P_SH 16 -#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH) -#define __HOSTFN1_INT_STATUS_F 0x0000ffff -#define HOSTFN1_INT_MSK 0x00014104 -#define HOST_PAGE_NUM_FN1 0x00014108 -#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c -#define APP_PLL_425_CTL_REG 0x00014204 -#define __P_425_PLL_LOCK 0x80000000 -#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000 -#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000 -#define __APP_PLL_425_RESET_TIMER_SH 17 -#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH) -#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000 -#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000 -#define __APP_PLL_425_CNTLMT0_1_SH 14 -#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH) -#define __APP_PLL_425_JITLMT0_1_MK 0x00003000 -#define __APP_PLL_425_JITLMT0_1_SH 12 -#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH) -#define __APP_PLL_425_HREF 0x00000800 -#define __APP_PLL_425_HDIV 0x00000400 -#define __APP_PLL_425_P0_1_MK 0x00000300 -#define __APP_PLL_425_P0_1_SH 8 -#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH) -#define __APP_PLL_425_Z0_2_MK 0x000000e0 -#define __APP_PLL_425_Z0_2_SH 5 -#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH) -#define __APP_PLL_425_RSEL200500 0x00000010 -#define __APP_PLL_425_ENARST 0x00000008 -#define __APP_PLL_425_BYPASS 0x00000004 -#define __APP_PLL_425_LRESETN 0x00000002 -#define __APP_PLL_425_ENABLE 0x00000001 -#define APP_PLL_312_CTL_REG 0x00014208 -#define __P_312_PLL_LOCK 0x80000000 -#define __ENABLE_MAC_AHB_1 0x00800000 -#define __ENABLE_MAC_AHB_0 0x00400000 -#define __ENABLE_MAC_1 0x00200000 -#define __ENABLE_MAC_0 0x00100000 -#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000 -#define __APP_PLL_312_RESET_TIMER_SH 17 -#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH) -#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000 -#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000 -#define __APP_PLL_312_CNTLMT0_1_SH 14 -#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH) -#define __APP_PLL_312_JITLMT0_1_MK 0x00003000 -#define __APP_PLL_312_JITLMT0_1_SH 12 -#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH) -#define __APP_PLL_312_HREF 0x00000800 -#define __APP_PLL_312_HDIV 0x00000400 -#define __APP_PLL_312_P0_1_MK 0x00000300 -#define __APP_PLL_312_P0_1_SH 8 -#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH) -#define __APP_PLL_312_Z0_2_MK 0x000000e0 -#define __APP_PLL_312_Z0_2_SH 5 -#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH) -#define __APP_PLL_312_RSEL200500 0x00000010 -#define __APP_PLL_312_ENARST 0x00000008 -#define __APP_PLL_312_BYPASS 0x00000004 -#define __APP_PLL_312_LRESETN 0x00000002 -#define __APP_PLL_312_ENABLE 0x00000001 -#define MBIST_CTL_REG 0x00014220 -#define __EDRAM_BISTR_START 0x00000004 -#define __MBIST_RESET 0x00000002 -#define __MBIST_START 0x00000001 -#define MBIST_STAT_REG 0x00014224 -#define __EDRAM_BISTR_STATUS 0x00000008 -#define __EDRAM_BISTR_DONE 0x00000004 -#define __MEM_BIT_STATUS 0x00000002 -#define __MBIST_DONE 0x00000001 -#define HOST_SEM0_REG 0x00014230 -#define __HOST_SEMAPHORE 0x00000001 -#define HOST_SEM1_REG 0x00014234 -#define HOST_SEM2_REG 0x00014238 -#define HOST_SEM3_REG 0x0001423c -#define HOST_SEM0_INFO_REG 0x00014240 -#define HOST_SEM1_INFO_REG 0x00014244 -#define HOST_SEM2_INFO_REG 0x00014248 -#define HOST_SEM3_INFO_REG 0x0001424c -#define ETH_MAC_SER_REG 0x00014288 -#define __APP_EMS_CKBUFAMPIN 0x00000020 -#define __APP_EMS_REFCLKSEL 0x00000010 -#define __APP_EMS_CMLCKSEL 0x00000008 -#define __APP_EMS_REFCKBUFEN2 0x00000004 -#define __APP_EMS_REFCKBUFEN1 0x00000002 -#define __APP_EMS_CHANNEL_SEL 0x00000001 -#define HOSTFN2_INT_STATUS 0x00014300 -#define __HOSTFN2_HALT_OCCURRED 0x01000000 -#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000 -#define __HOSTFN2_INT_STATUS_LVL_SH 20 -#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH) -#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000 -#define __HOSTFN2_INT_STATUS_P_SH 16 -#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH) -#define __HOSTFN2_INT_STATUS_F 0x0000ffff -#define HOSTFN2_INT_MSK 0x00014304 -#define HOST_PAGE_NUM_FN2 0x00014308 -#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c -#define HOSTFN3_INT_STATUS 0x00014400 -#define __HALT_OCCURRED 0x01000000 -#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000 -#define __HOSTFN3_INT_STATUS_LVL_SH 20 -#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH) -#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000 -#define __HOSTFN3_INT_STATUS_P_SH 16 -#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH) -#define __HOSTFN3_INT_STATUS_F 0x0000ffff -#define HOSTFN3_INT_MSK 0x00014404 -#define HOST_PAGE_NUM_FN3 0x00014408 -#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c -#define FNC_ID_REG 0x00014600 -#define __FUNCTION_NUMBER 0x00000007 -#define FNC_PERS_REG 0x00014604 -#define __F3_FUNCTION_ACTIVE 0x80000000 -#define __F3_FUNCTION_MODE 0x40000000 -#define __F3_PORT_MAP_MK 0x30000000 -#define __F3_PORT_MAP_SH 28 -#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH) -#define __F3_VM_MODE 0x08000000 -#define __F3_INTX_STATUS_MK 0x07000000 -#define __F3_INTX_STATUS_SH 24 -#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH) -#define __F2_FUNCTION_ACTIVE 0x00800000 -#define __F2_FUNCTION_MODE 0x00400000 -#define __F2_PORT_MAP_MK 0x00300000 -#define __F2_PORT_MAP_SH 20 -#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH) -#define __F2_VM_MODE 0x00080000 -#define __F2_INTX_STATUS_MK 0x00070000 -#define __F2_INTX_STATUS_SH 16 -#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH) -#define __F1_FUNCTION_ACTIVE 0x00008000 -#define __F1_FUNCTION_MODE 0x00004000 -#define __F1_PORT_MAP_MK 0x00003000 -#define __F1_PORT_MAP_SH 12 -#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH) -#define __F1_VM_MODE 0x00000800 -#define __F1_INTX_STATUS_MK 0x00000700 -#define __F1_INTX_STATUS_SH 8 -#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH) -#define __F0_FUNCTION_ACTIVE 0x00000080 -#define __F0_FUNCTION_MODE 0x00000040 -#define __F0_PORT_MAP_MK 0x00000030 -#define __F0_PORT_MAP_SH 4 -#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH) -#define __F0_VM_MODE 0x00000008 -#define __F0_INTX_STATUS 0x00000007 -enum { - __F0_INTX_STATUS_MSIX = 0x0, - __F0_INTX_STATUS_INTA = 0x1, - __F0_INTX_STATUS_INTB = 0x2, - __F0_INTX_STATUS_INTC = 0x3, - __F0_INTX_STATUS_INTD = 0x4, -}; -#define OP_MODE 0x0001460c -#define __APP_ETH_CLK_LOWSPEED 0x00000004 -#define __GLOBAL_CORECLK_HALFSPEED 0x00000002 -#define __GLOBAL_FCOE_MODE 0x00000001 -#define HOST_SEM4_REG 0x00014610 -#define HOST_SEM5_REG 0x00014614 -#define HOST_SEM6_REG 0x00014618 -#define HOST_SEM7_REG 0x0001461c -#define HOST_SEM4_INFO_REG 0x00014620 -#define HOST_SEM5_INFO_REG 0x00014624 -#define HOST_SEM6_INFO_REG 0x00014628 -#define HOST_SEM7_INFO_REG 0x0001462c -#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000 -#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1 -#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH) -#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001 -#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004 -#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1 -#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH) -#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001 -#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008 -#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe -#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1 -#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH) -#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001 -#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c -#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe -#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1 -#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH) -#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001 -#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010 -#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1 -#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH) -#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001 -#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014 -#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1 -#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH) -#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001 -#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018 -#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe -#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1 -#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH) -#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001 -#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c -#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe -#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1 -#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH) -#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001 -#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150 -#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1 -#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH) -#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001 -#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154 -#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1 -#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH) -#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001 -#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158 -#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe -#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1 -#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH) -#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001 -#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c -#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe -#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1 -#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH) -#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001 -#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160 -#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1 -#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH) -#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001 -#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164 -#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1 -#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH) -#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001 -#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168 -#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe -#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1 -#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH) -#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001 -#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c -#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe -#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1 -#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH) -#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001 -#define FW_INIT_HALT_P0 0x000191ac -#define __FW_INIT_HALT_P 0x00000001 -#define FW_INIT_HALT_P1 0x000191bc -#define CPE_PI_PTR_Q0 0x00038000 -#define __CPE_PI_UNUSED_MK 0xffff0000 -#define __CPE_PI_UNUSED_SH 16 -#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH) -#define __CPE_PI_PTR 0x0000ffff -#define CPE_PI_PTR_Q1 0x00038040 -#define CPE_CI_PTR_Q0 0x00038004 -#define __CPE_CI_UNUSED_MK 0xffff0000 -#define __CPE_CI_UNUSED_SH 16 -#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH) -#define __CPE_CI_PTR 0x0000ffff -#define CPE_CI_PTR_Q1 0x00038044 -#define CPE_DEPTH_Q0 0x00038008 -#define __CPE_DEPTH_UNUSED_MK 0xf8000000 -#define __CPE_DEPTH_UNUSED_SH 27 -#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH) -#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000 -#define __CPE_MSIX_VEC_INDEX_SH 16 -#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH) -#define __CPE_DEPTH 0x0000ffff -#define CPE_DEPTH_Q1 0x00038048 -#define CPE_QCTRL_Q0 0x0003800c -#define __CPE_CTRL_UNUSED30_MK 0xfc000000 -#define __CPE_CTRL_UNUSED30_SH 26 -#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH) -#define __CPE_FUNC_INT_CTRL_MK 0x03000000 -#define __CPE_FUNC_INT_CTRL_SH 24 -#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH) -enum { - __CPE_FUNC_INT_CTRL_DISABLE = 0x0, - __CPE_FUNC_INT_CTRL_F2NF = 0x1, - __CPE_FUNC_INT_CTRL_3QUART = 0x2, - __CPE_FUNC_INT_CTRL_HALF = 0x3, -}; -#define __CPE_CTRL_UNUSED20_MK 0x00f00000 -#define __CPE_CTRL_UNUSED20_SH 20 -#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH) -#define __CPE_SCI_TH_MK 0x000f0000 -#define __CPE_SCI_TH_SH 16 -#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH) -#define __CPE_CTRL_UNUSED10_MK 0x0000c000 -#define __CPE_CTRL_UNUSED10_SH 14 -#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH) -#define __CPE_ACK_PENDING 0x00002000 -#define __CPE_CTRL_UNUSED40_MK 0x00001c00 -#define __CPE_CTRL_UNUSED40_SH 10 -#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH) -#define __CPE_PCIEID_MK 0x00000300 -#define __CPE_PCIEID_SH 8 -#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH) -#define __CPE_CTRL_UNUSED00_MK 0x000000fe -#define __CPE_CTRL_UNUSED00_SH 1 -#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH) -#define __CPE_ESIZE 0x00000001 -#define CPE_QCTRL_Q1 0x0003804c -#define __CPE_CTRL_UNUSED31_MK 0xfc000000 -#define __CPE_CTRL_UNUSED31_SH 26 -#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH) -#define __CPE_CTRL_UNUSED21_MK 0x00f00000 -#define __CPE_CTRL_UNUSED21_SH 20 -#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH) -#define __CPE_CTRL_UNUSED11_MK 0x0000c000 -#define __CPE_CTRL_UNUSED11_SH 14 -#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH) -#define __CPE_CTRL_UNUSED41_MK 0x00001c00 -#define __CPE_CTRL_UNUSED41_SH 10 -#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH) -#define __CPE_CTRL_UNUSED01_MK 0x000000fe -#define __CPE_CTRL_UNUSED01_SH 1 -#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH) -#define RME_PI_PTR_Q0 0x00038020 -#define __LATENCY_TIME_STAMP_MK 0xffff0000 -#define __LATENCY_TIME_STAMP_SH 16 -#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH) -#define __RME_PI_PTR 0x0000ffff -#define RME_PI_PTR_Q1 0x00038060 -#define RME_CI_PTR_Q0 0x00038024 -#define __DELAY_TIME_STAMP_MK 0xffff0000 -#define __DELAY_TIME_STAMP_SH 16 -#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH) -#define __RME_CI_PTR 0x0000ffff -#define RME_CI_PTR_Q1 0x00038064 -#define RME_DEPTH_Q0 0x00038028 -#define __RME_DEPTH_UNUSED_MK 0xf8000000 -#define __RME_DEPTH_UNUSED_SH 27 -#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH) -#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000 -#define __RME_MSIX_VEC_INDEX_SH 16 -#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH) -#define __RME_DEPTH 0x0000ffff -#define RME_DEPTH_Q1 0x00038068 -#define RME_QCTRL_Q0 0x0003802c -#define __RME_INT_LATENCY_TIMER_MK 0xff000000 -#define __RME_INT_LATENCY_TIMER_SH 24 -#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH) -#define __RME_INT_DELAY_TIMER_MK 0x00ff0000 -#define __RME_INT_DELAY_TIMER_SH 16 -#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH) -#define __RME_INT_DELAY_DISABLE 0x00008000 -#define __RME_DLY_DELAY_DISABLE 0x00004000 -#define __RME_ACK_PENDING 0x00002000 -#define __RME_FULL_INTERRUPT_DISABLE 0x00001000 -#define __RME_CTRL_UNUSED10_MK 0x00000c00 -#define __RME_CTRL_UNUSED10_SH 10 -#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH) -#define __RME_PCIEID_MK 0x00000300 -#define __RME_PCIEID_SH 8 -#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH) -#define __RME_CTRL_UNUSED00_MK 0x000000fe -#define __RME_CTRL_UNUSED00_SH 1 -#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH) -#define __RME_ESIZE 0x00000001 -#define RME_QCTRL_Q1 0x0003806c -#define __RME_CTRL_UNUSED11_MK 0x00000c00 -#define __RME_CTRL_UNUSED11_SH 10 -#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH) -#define __RME_CTRL_UNUSED01_MK 0x000000fe -#define __RME_CTRL_UNUSED01_SH 1 -#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH) -#define PSS_CTL_REG 0x00018800 -#define __PSS_I2C_CLK_DIV_MK 0x007f0000 -#define __PSS_I2C_CLK_DIV_SH 16 -#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH) -#define __PSS_LMEM_INIT_DONE 0x00001000 -#define __PSS_LMEM_RESET 0x00000200 -#define __PSS_LMEM_INIT_EN 0x00000100 -#define __PSS_LPU1_RESET 0x00000002 -#define __PSS_LPU0_RESET 0x00000001 -#define PSS_ERR_STATUS_REG 0x00018810 -#define __PSS_LPU1_TCM_READ_ERR 0x00200000 -#define __PSS_LPU0_TCM_READ_ERR 0x00100000 -#define __PSS_LMEM5_CORR_ERR 0x00080000 -#define __PSS_LMEM4_CORR_ERR 0x00040000 -#define __PSS_LMEM3_CORR_ERR 0x00020000 -#define __PSS_LMEM2_CORR_ERR 0x00010000 -#define __PSS_LMEM1_CORR_ERR 0x00008000 -#define __PSS_LMEM0_CORR_ERR 0x00004000 -#define __PSS_LMEM5_UNCORR_ERR 0x00002000 -#define __PSS_LMEM4_UNCORR_ERR 0x00001000 -#define __PSS_LMEM3_UNCORR_ERR 0x00000800 -#define __PSS_LMEM2_UNCORR_ERR 0x00000400 -#define __PSS_LMEM1_UNCORR_ERR 0x00000200 -#define __PSS_LMEM0_UNCORR_ERR 0x00000100 -#define __PSS_BAL_PERR 0x00000080 -#define __PSS_DIP_IF_ERR 0x00000040 -#define __PSS_IOH_IF_ERR 0x00000020 -#define __PSS_TDS_IF_ERR 0x00000010 -#define __PSS_RDS_IF_ERR 0x00000008 -#define __PSS_SGM_IF_ERR 0x00000004 -#define __PSS_LPU1_RAM_ERR 0x00000002 -#define __PSS_LPU0_RAM_ERR 0x00000001 -#define ERR_SET_REG 0x00018818 -#define __PSS_ERR_STATUS_SET 0x003fffff -#define PMM_1T_RESET_REG_P0 0x0002381c -#define __PMM_1T_RESET_P 0x00000001 -#define PMM_1T_RESET_REG_P1 0x00023c1c -#define HQM_QSET0_RXQ_DRBL_P0 0x00038000 -#define __RXQ0_ADD_VECTORS_P 0x80000000 -#define __RXQ0_STOP_P 0x40000000 -#define __RXQ0_PRD_PTR_P 0x0000ffff -#define HQM_QSET1_RXQ_DRBL_P0 0x00038080 -#define __RXQ1_ADD_VECTORS_P 0x80000000 -#define __RXQ1_STOP_P 0x40000000 -#define __RXQ1_PRD_PTR_P 0x0000ffff -#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000 -#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080 -#define HQM_QSET0_TXQ_DRBL_P0 0x00038020 -#define __TXQ0_ADD_VECTORS_P 0x80000000 -#define __TXQ0_STOP_P 0x40000000 -#define __TXQ0_PRD_PTR_P 0x0000ffff -#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0 -#define __TXQ1_ADD_VECTORS_P 0x80000000 -#define __TXQ1_STOP_P 0x40000000 -#define __TXQ1_PRD_PTR_P 0x0000ffff -#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020 -#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0 -#define HQM_QSET0_IB_DRBL_1_P0 0x00038040 -#define __IB1_0_ACK_P 0x80000000 -#define __IB1_0_DISABLE_P 0x40000000 -#define __IB1_0_COALESCING_CFG_P_MK 0x00ff0000 -#define __IB1_0_COALESCING_CFG_P_SH 16 -#define __IB1_0_COALESCING_CFG_P(_v) ((_v) << __IB1_0_COALESCING_CFG_P_SH) -#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff -#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0 -#define __IB1_1_ACK_P 0x80000000 -#define __IB1_1_DISABLE_P 0x40000000 -#define __IB1_1_COALESCING_CFG_P_MK 0x00ff0000 -#define __IB1_1_COALESCING_CFG_P_SH 16 -#define __IB1_1_COALESCING_CFG_P(_v) ((_v) << __IB1_1_COALESCING_CFG_P_SH) -#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff -#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040 -#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0 -#define HQM_QSET0_IB_DRBL_2_P0 0x00038060 -#define __IB2_0_ACK_P 0x80000000 -#define __IB2_0_DISABLE_P 0x40000000 -#define __IB2_0_COALESCING_CFG_P_MK 0x00ff0000 -#define __IB2_0_COALESCING_CFG_P_SH 16 -#define __IB2_0_COALESCING_CFG_P(_v) ((_v) << __IB2_0_COALESCING_CFG_P_SH) -#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff -#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0 -#define __IB2_1_ACK_P 0x80000000 -#define __IB2_1_DISABLE_P 0x40000000 -#define __IB2_1_COALESCING_CFG_P_MK 0x00ff0000 -#define __IB2_1_COALESCING_CFG_P_SH 16 -#define __IB2_1_COALESCING_CFG_P(_v) ((_v) << __IB2_1_COALESCING_CFG_P_SH) -#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff -#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060 -#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0 - - -/* - * These definitions are either in error/missing in spec. Its auto-generated - * from hard coded values in regparse.pl. - */ -#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c -#define __EMPHPOST_AT_4G_SH_FIX 0x00000002 -#define __EMPHPRE_AT_4G_FIX 0x00000003 -#define __SFP_TXRATE_EN_FIX 0x00000100 -#define __SFP_RXRATE_EN_FIX 0x00000080 - - -/* - * These register definitions are auto-generated from hard coded values - * in regparse.pl. - */ - - -/* - * These register mapping definitions are auto-generated from mapping tables - * in regparse.pl. - */ -#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG -#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG -#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG -#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG -#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG -#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG - -#define CPE_DEPTH_Q(__n) \ - (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0)) -#define CPE_QCTRL_Q(__n) \ - (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0)) -#define CPE_PI_PTR_Q(__n) \ - (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0)) -#define CPE_CI_PTR_Q(__n) \ - (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0)) -#define RME_DEPTH_Q(__n) \ - (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0)) -#define RME_QCTRL_Q(__n) \ - (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0)) -#define RME_PI_PTR_Q(__n) \ - (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0)) -#define RME_CI_PTR_Q(__n) \ - (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0)) -#define HQM_QSET_RXQ_DRBL_P0(__n) \ - (HQM_QSET0_RXQ_DRBL_P0 + (__n) * \ - (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0)) -#define HQM_QSET_TXQ_DRBL_P0(__n) \ - (HQM_QSET0_TXQ_DRBL_P0 + (__n) * \ - (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0)) -#define HQM_QSET_IB_DRBL_1_P0(__n) \ - (HQM_QSET0_IB_DRBL_1_P0 + (__n) * \ - (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0)) -#define HQM_QSET_IB_DRBL_2_P0(__n) \ - (HQM_QSET0_IB_DRBL_2_P0 + (__n) * \ - (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0)) -#define HQM_QSET_RXQ_DRBL_P1(__n) \ - (HQM_QSET0_RXQ_DRBL_P1 + (__n) * \ - (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1)) -#define HQM_QSET_TXQ_DRBL_P1(__n) \ - (HQM_QSET0_TXQ_DRBL_P1 + (__n) * \ - (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1)) -#define HQM_QSET_IB_DRBL_1_P1(__n) \ - (HQM_QSET0_IB_DRBL_1_P1 + (__n) * \ - (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1)) -#define HQM_QSET_IB_DRBL_2_P1(__n) \ - (HQM_QSET0_IB_DRBL_2_P1 + (__n) * \ - (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1)) - -#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) -#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) -#define CPE_Q_MASK(__q) ((__q) & 0x3) -#define RME_Q_MASK(__q) ((__q) & 0x3) - - -/* - * PCI MSI-X vector defines - */ -enum { - BFA_MSIX_CPE_Q0 = 0, - BFA_MSIX_CPE_Q1 = 1, - BFA_MSIX_CPE_Q2 = 2, - BFA_MSIX_CPE_Q3 = 3, - BFA_MSIX_RME_Q0 = 4, - BFA_MSIX_RME_Q1 = 5, - BFA_MSIX_RME_Q2 = 6, - BFA_MSIX_RME_Q3 = 7, - BFA_MSIX_LPU_ERR = 8, - BFA_MSIX_CT_MAX = 9, -}; - -/* - * And corresponding host interrupt status bit field defines - */ -#define __HFN_INT_CPE_Q0 0x00000001U -#define __HFN_INT_CPE_Q1 0x00000002U -#define __HFN_INT_CPE_Q2 0x00000004U -#define __HFN_INT_CPE_Q3 0x00000008U -#define __HFN_INT_CPE_Q4 0x00000010U -#define __HFN_INT_CPE_Q5 0x00000020U -#define __HFN_INT_CPE_Q6 0x00000040U -#define __HFN_INT_CPE_Q7 0x00000080U -#define __HFN_INT_RME_Q0 0x00000100U -#define __HFN_INT_RME_Q1 0x00000200U -#define __HFN_INT_RME_Q2 0x00000400U -#define __HFN_INT_RME_Q3 0x00000800U -#define __HFN_INT_RME_Q4 0x00001000U -#define __HFN_INT_RME_Q5 0x00002000U -#define __HFN_INT_RME_Q6 0x00004000U -#define __HFN_INT_RME_Q7 0x00008000U -#define __HFN_INT_ERR_EMC 0x00010000U -#define __HFN_INT_ERR_LPU0 0x00020000U -#define __HFN_INT_ERR_LPU1 0x00040000U -#define __HFN_INT_ERR_PSS 0x00080000U -#define __HFN_INT_MBOX_LPU0 0x00100000U -#define __HFN_INT_MBOX_LPU1 0x00200000U -#define __HFN_INT_MBOX1_LPU0 0x00400000U -#define __HFN_INT_MBOX1_LPU1 0x00800000U -#define __HFN_INT_LL_HALT 0x01000000U -#define __HFN_INT_CPE_MASK 0x000000ffU -#define __HFN_INT_RME_MASK 0x0000ff00U - - -/* - * catapult memory map. - */ -#define LL_PGN_HQM0 0x0096 -#define LL_PGN_HQM1 0x0097 -#define PSS_SMEM_PAGE_START 0x8000 -#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15)) -#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff) - -/* - * End of catapult memory map - */ - - -#endif /* __BFI_CTREG_H__ */ diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h index 19e888a5755..0d9f1fb50db 100644 --- a/drivers/scsi/bfa/bfi_ms.h +++ b/drivers/scsi/bfa/bfi_ms.h @@ -28,11 +28,17 @@ enum bfi_iocfc_h2i_msgs { BFI_IOCFC_H2I_CFG_REQ = 1, BFI_IOCFC_H2I_SET_INTR_REQ = 2, BFI_IOCFC_H2I_UPDATEQ_REQ = 3, + BFI_IOCFC_H2I_FAA_ENABLE_REQ = 4, + BFI_IOCFC_H2I_FAA_DISABLE_REQ = 5, + BFI_IOCFC_H2I_FAA_QUERY_REQ = 6, }; enum bfi_iocfc_i2h_msgs { BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1), BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3), + BFI_IOCFC_I2H_FAA_ENABLE_RSP = BFA_I2HM(4), + BFI_IOCFC_I2H_FAA_DISABLE_RSP = BFA_I2HM(5), + BFI_IOCFC_I2H_FAA_QUERY_RSP = BFA_I2HM(6), }; struct bfi_iocfc_cfg_s { @@ -40,6 +46,12 @@ struct bfi_iocfc_cfg_s { u8 sense_buf_len; /* SCSI sense length */ u16 rsvd_1; u32 endian_sig; /* endian signature of host */ + u8 rsvd_2; + u8 single_msix_vec; + u8 rsvd[2]; + __be16 num_ioim_reqs; + __be16 num_fwtio_reqs; + /* * Request and response circular queue base addresses, size and @@ -54,7 +66,8 @@ struct bfi_iocfc_cfg_s { union bfi_addr_u stats_addr; /* DMA-able address for stats */ union bfi_addr_u cfgrsp_addr; /* config response dma address */ - union bfi_addr_u ioim_snsbase; /* IO sense buffer base address */ + union bfi_addr_u ioim_snsbase[BFI_IOIM_SNSBUF_SEGS]; + /* IO sense buf base addr segments */ struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */ }; @@ -68,11 +81,25 @@ struct bfi_iocfc_bootwwns { u8 rsvd[7]; }; +/** + * Queue configuration response from firmware + */ +struct bfi_iocfc_qreg_s { + u32 cpe_q_ci_off[BFI_IOC_MAX_CQS]; + u32 cpe_q_pi_off[BFI_IOC_MAX_CQS]; + u32 cpe_qctl_off[BFI_IOC_MAX_CQS]; + u32 rme_q_ci_off[BFI_IOC_MAX_CQS]; + u32 rme_q_pi_off[BFI_IOC_MAX_CQS]; + u32 rme_qctl_off[BFI_IOC_MAX_CQS]; + u8 hw_qid[BFI_IOC_MAX_CQS]; +}; + struct bfi_iocfc_cfgrsp_s { struct bfa_iocfc_fwcfg_s fwcfg; struct bfa_iocfc_intr_attr_s intr_attr; struct bfi_iocfc_bootwwns bootwwns; struct bfi_pbc_s pbc_cfg; + struct bfi_iocfc_qreg_s qreg; }; /* @@ -150,6 +177,37 @@ union bfi_iocfc_i2h_msg_u { u32 mboxmsg[BFI_IOC_MSGSZ]; }; +/* + * BFI_IOCFC_H2I_FAA_ENABLE_REQ BFI_IOCFC_H2I_FAA_DISABLE_REQ message + */ +struct bfi_faa_en_dis_s { + struct bfi_mhdr_s mh; /* common msg header */ +}; + +/* + * BFI_IOCFC_H2I_FAA_QUERY_REQ message + */ +struct bfi_faa_query_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 faa_status; /* FAA status */ + u8 addr_source; /* PWWN source */ + u8 rsvd[2]; + wwn_t faa; /* Fabric acquired PWWN */ +}; + +/* + * BFI_IOCFC_I2H_FAA_ENABLE_RSP, BFI_IOCFC_I2H_FAA_DISABLE_RSP message + */ +struct bfi_faa_en_dis_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 status; /* updateq status */ + u8 rsvd[3]; +}; + +/* + * BFI_IOCFC_I2H_FAA_QUERY_RSP message + */ +#define bfi_faa_query_rsp_t struct bfi_faa_query_s enum bfi_fcport_h2i { BFI_FCPORT_H2I_ENABLE_REQ = (1), @@ -213,7 +271,8 @@ struct bfi_fcport_enable_req_s { struct bfi_fcport_set_svc_params_req_s { struct bfi_mhdr_s mh; /* msg header */ __be16 tx_bbcredit; /* Tx credits */ - u16 rsvd; + u8 bb_scn; /* BB_SC FC credit recovery */ + u8 rsvd; }; /* @@ -293,12 +352,12 @@ struct bfi_fcxp_send_req_s { u8 class; /* FC class used for req/rsp */ u8 rsp_timeout; /* timeout in secs, 0-no response */ u8 cts; /* continue sequence */ - u8 lp_tag; /* lport tag */ + u8 lp_fwtag; /* lport tag */ struct fchs_s fchs; /* request FC header structure */ __be32 req_len; /* request payload length */ __be32 rsp_maxlen; /* max response length expected */ - struct bfi_sge_s req_sge[BFA_FCXP_MAX_SGES]; /* request buf */ - struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */ + struct bfi_alen_s req_alen; /* request buffer */ + struct bfi_alen_s rsp_alen; /* response buffer */ }; /* @@ -328,7 +387,7 @@ struct bfi_uf_buf_post_s { struct bfi_mhdr_s mh; /* Common msg header */ u16 buf_tag; /* buffer tag */ __be16 buf_len; /* total buffer length */ - struct bfi_sge_s sge[BFA_UF_MAX_SGES]; /* buffer DMA SGEs */ + struct bfi_alen_s alen; /* buffer address/len pair */ }; struct bfi_uf_frm_rcvd_s { @@ -346,26 +405,27 @@ enum bfi_lps_h2i_msgs { }; enum bfi_lps_i2h_msgs { - BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1), - BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2), - BFI_LPS_H2I_CVL_EVENT = BFA_I2HM(3), + BFI_LPS_I2H_LOGIN_RSP = BFA_I2HM(1), + BFI_LPS_I2H_LOGOUT_RSP = BFA_I2HM(2), + BFI_LPS_I2H_CVL_EVENT = BFA_I2HM(3), }; struct bfi_lps_login_req_s { struct bfi_mhdr_s mh; /* common msg header */ - u8 lp_tag; + u8 bfa_tag; u8 alpa; __be16 pdu_size; wwn_t pwwn; wwn_t nwwn; u8 fdisc; u8 auth_en; - u8 rsvd[2]; + u8 lps_role; + u8 bb_scn; }; struct bfi_lps_login_rsp_s { struct bfi_mhdr_s mh; /* common msg header */ - u8 lp_tag; + u8 fw_tag; u8 status; u8 lsrjt_rsn; u8 lsrjt_expl; @@ -380,31 +440,33 @@ struct bfi_lps_login_rsp_s { mac_t fcf_mac; u8 ext_status; u8 brcd_switch; /* attached peer is brcd switch */ + u8 bb_scn; /* atatched port's bb_scn */ + u8 bfa_tag; }; struct bfi_lps_logout_req_s { struct bfi_mhdr_s mh; /* common msg header */ - u8 lp_tag; + u8 fw_tag; u8 rsvd[3]; wwn_t port_name; }; struct bfi_lps_logout_rsp_s { struct bfi_mhdr_s mh; /* common msg header */ - u8 lp_tag; + u8 bfa_tag; u8 status; u8 rsvd[2]; }; struct bfi_lps_cvl_event_s { struct bfi_mhdr_s mh; /* common msg header */ - u8 lp_tag; + u8 bfa_tag; u8 rsvd[3]; }; struct bfi_lps_n2n_pid_req_s { struct bfi_mhdr_s mh; /* common msg header */ - u8 lp_tag; + u8 fw_tag; u32 lp_pid:24; }; @@ -439,7 +501,7 @@ struct bfi_rport_create_req_s { u16 bfa_handle; /* host rport handle */ __be16 max_frmsz; /* max rcv pdu size */ u32 pid:24, /* remote port ID */ - lp_tag:8; /* local port tag */ + lp_fwtag:8; /* local port tag */ u32 local_pid:24, /* local port ID */ cisc:8; u8 fc_class; /* supported FC classes */ @@ -502,62 +564,63 @@ union bfi_rport_i2h_msg_u { * Initiator mode I-T nexus interface defines. */ -enum bfi_itnim_h2i { - BFI_ITNIM_H2I_CREATE_REQ = 1, /* i-t nexus creation */ - BFI_ITNIM_H2I_DELETE_REQ = 2, /* i-t nexus deletion */ +enum bfi_itn_h2i { + BFI_ITN_H2I_CREATE_REQ = 1, /* i-t nexus creation */ + BFI_ITN_H2I_DELETE_REQ = 2, /* i-t nexus deletion */ }; -enum bfi_itnim_i2h { - BFI_ITNIM_I2H_CREATE_RSP = BFA_I2HM(1), - BFI_ITNIM_I2H_DELETE_RSP = BFA_I2HM(2), - BFI_ITNIM_I2H_SLER_EVENT = BFA_I2HM(3), +enum bfi_itn_i2h { + BFI_ITN_I2H_CREATE_RSP = BFA_I2HM(1), + BFI_ITN_I2H_DELETE_RSP = BFA_I2HM(2), + BFI_ITN_I2H_SLER_EVENT = BFA_I2HM(3), }; -struct bfi_itnim_create_req_s { +struct bfi_itn_create_req_s { struct bfi_mhdr_s mh; /* common msg header */ u16 fw_handle; /* f/w handle for itnim */ u8 class; /* FC class for IO */ u8 seq_rec; /* sequence recovery support */ u8 msg_no; /* seq id of the msg */ + u8 role; }; -struct bfi_itnim_create_rsp_s { +struct bfi_itn_create_rsp_s { struct bfi_mhdr_s mh; /* common msg header */ u16 bfa_handle; /* bfa handle for itnim */ u8 status; /* fcp request status */ u8 seq_id; /* seq id of the msg */ }; -struct bfi_itnim_delete_req_s { +struct bfi_itn_delete_req_s { struct bfi_mhdr_s mh; /* common msg header */ u16 fw_handle; /* f/w itnim handle */ u8 seq_id; /* seq id of the msg */ u8 rsvd; }; -struct bfi_itnim_delete_rsp_s { +struct bfi_itn_delete_rsp_s { struct bfi_mhdr_s mh; /* common msg header */ u16 bfa_handle; /* bfa handle for itnim */ u8 status; /* fcp request status */ u8 seq_id; /* seq id of the msg */ }; -struct bfi_itnim_sler_event_s { +struct bfi_itn_sler_event_s { struct bfi_mhdr_s mh; /* common msg header */ u16 bfa_handle; /* bfa handle for itnim */ u16 rsvd; }; -union bfi_itnim_h2i_msg_u { - struct bfi_itnim_create_req_s *create_req; - struct bfi_itnim_delete_req_s *delete_req; +union bfi_itn_h2i_msg_u { + struct bfi_itn_create_req_s *create_req; + struct bfi_itn_delete_req_s *delete_req; struct bfi_msg_s *msg; }; -union bfi_itnim_i2h_msg_u { - struct bfi_itnim_create_rsp_s *create_rsp; - struct bfi_itnim_delete_rsp_s *delete_rsp; - struct bfi_itnim_sler_event_s *sler_event; +union bfi_itn_i2h_msg_u { + struct bfi_itn_create_rsp_s *create_rsp; + struct bfi_itn_delete_rsp_s *delete_rsp; + struct bfi_itn_sler_event_s *sler_event; struct bfi_msg_s *msg; }; @@ -693,7 +756,6 @@ enum bfi_ioim_status { BFI_IOIM_STS_PATHTOV = 8, }; -#define BFI_IOIM_SNSLEN (256) /* * I/O response message */ @@ -772,4 +834,27 @@ struct bfi_tskim_rsp_s { #pragma pack() +/* + * Crossbow PCI MSI-X vector defines + */ +enum { + BFI_MSIX_CPE_QMIN_CB = 0, + BFI_MSIX_CPE_QMAX_CB = 7, + BFI_MSIX_RME_QMIN_CB = 8, + BFI_MSIX_RME_QMAX_CB = 15, + BFI_MSIX_CB_MAX = 22, +}; + +/* + * Catapult FC PCI MSI-X vector defines + */ +enum { + BFI_MSIX_LPU_ERR_CT = 0, + BFI_MSIX_CPE_QMIN_CT = 1, + BFI_MSIX_CPE_QMAX_CT = 4, + BFI_MSIX_RME_QMIN_CT = 5, + BFI_MSIX_RME_QMAX_CT = 8, + BFI_MSIX_CT_MAX = 9, +}; + #endif /* __BFI_MS_H__ */ diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h new file mode 100644 index 00000000000..d892064b64a --- /dev/null +++ b/drivers/scsi/bfa/bfi_reg.h @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +/* + * bfi_reg.h ASIC register defines for all Brocade adapter ASICs + */ + +#ifndef __BFI_REG_H__ +#define __BFI_REG_H__ + +#define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */ +#define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */ +#define HOSTFN2_INT_STATUS 0x00014300 /* ct */ +#define HOSTFN3_INT_STATUS 0x00014400 /* ct */ +#define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */ +#define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */ +#define HOSTFN2_INT_MSK 0x00014304 /* ct */ +#define HOSTFN3_INT_MSK 0x00014404 /* ct */ + +#define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */ +#define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */ +#define HOST_PAGE_NUM_FN2 0x00014308 /* ct */ +#define HOST_PAGE_NUM_FN3 0x00014408 /* ct */ + +#define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */ +#define __P_LCLK_PLL_LOCK 0x80000000 +#define __APP_PLL_LCLK_SRAM_USE_100MHZ 0x00100000 +#define __APP_PLL_LCLK_RESET_TIMER_MK 0x000e0000 +#define __APP_PLL_LCLK_RESET_TIMER_SH 17 +#define __APP_PLL_LCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_LCLK_RESET_TIMER_SH) +#define __APP_PLL_LCLK_LOGIC_SOFT_RESET 0x00010000 +#define __APP_PLL_LCLK_CNTLMT0_1_MK 0x0000c000 +#define __APP_PLL_LCLK_CNTLMT0_1_SH 14 +#define __APP_PLL_LCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_LCLK_CNTLMT0_1_SH) +#define __APP_PLL_LCLK_JITLMT0_1_MK 0x00003000 +#define __APP_PLL_LCLK_JITLMT0_1_SH 12 +#define __APP_PLL_LCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_LCLK_JITLMT0_1_SH) +#define __APP_PLL_LCLK_HREF 0x00000800 +#define __APP_PLL_LCLK_HDIV 0x00000400 +#define __APP_PLL_LCLK_P0_1_MK 0x00000300 +#define __APP_PLL_LCLK_P0_1_SH 8 +#define __APP_PLL_LCLK_P0_1(_v) ((_v) << __APP_PLL_LCLK_P0_1_SH) +#define __APP_PLL_LCLK_Z0_2_MK 0x000000e0 +#define __APP_PLL_LCLK_Z0_2_SH 5 +#define __APP_PLL_LCLK_Z0_2(_v) ((_v) << __APP_PLL_LCLK_Z0_2_SH) +#define __APP_PLL_LCLK_RSEL200500 0x00000010 +#define __APP_PLL_LCLK_ENARST 0x00000008 +#define __APP_PLL_LCLK_BYPASS 0x00000004 +#define __APP_PLL_LCLK_LRESETN 0x00000002 +#define __APP_PLL_LCLK_ENABLE 0x00000001 +#define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */ +#define __P_SCLK_PLL_LOCK 0x80000000 +#define __APP_PLL_SCLK_RESET_TIMER_MK 0x000e0000 +#define __APP_PLL_SCLK_RESET_TIMER_SH 17 +#define __APP_PLL_SCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_SCLK_RESET_TIMER_SH) +#define __APP_PLL_SCLK_LOGIC_SOFT_RESET 0x00010000 +#define __APP_PLL_SCLK_CNTLMT0_1_MK 0x0000c000 +#define __APP_PLL_SCLK_CNTLMT0_1_SH 14 +#define __APP_PLL_SCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_SCLK_CNTLMT0_1_SH) +#define __APP_PLL_SCLK_JITLMT0_1_MK 0x00003000 +#define __APP_PLL_SCLK_JITLMT0_1_SH 12 +#define __APP_PLL_SCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_SCLK_JITLMT0_1_SH) +#define __APP_PLL_SCLK_HREF 0x00000800 +#define __APP_PLL_SCLK_HDIV 0x00000400 +#define __APP_PLL_SCLK_P0_1_MK 0x00000300 +#define __APP_PLL_SCLK_P0_1_SH 8 +#define __APP_PLL_SCLK_P0_1(_v) ((_v) << __APP_PLL_SCLK_P0_1_SH) +#define __APP_PLL_SCLK_Z0_2_MK 0x000000e0 +#define __APP_PLL_SCLK_Z0_2_SH 5 +#define __APP_PLL_SCLK_Z0_2(_v) ((_v) << __APP_PLL_SCLK_Z0_2_SH) +#define __APP_PLL_SCLK_RSEL200500 0x00000010 +#define __APP_PLL_SCLK_ENARST 0x00000008 +#define __APP_PLL_SCLK_BYPASS 0x00000004 +#define __APP_PLL_SCLK_LRESETN 0x00000002 +#define __APP_PLL_SCLK_ENABLE 0x00000001 +#define __ENABLE_MAC_AHB_1 0x00800000 /* ct */ +#define __ENABLE_MAC_AHB_0 0x00400000 /* ct */ +#define __ENABLE_MAC_1 0x00200000 /* ct */ +#define __ENABLE_MAC_0 0x00100000 /* ct */ + +#define HOST_SEM0_REG 0x00014230 /* cb/ct */ +#define HOST_SEM1_REG 0x00014234 /* cb/ct */ +#define HOST_SEM2_REG 0x00014238 /* cb/ct */ +#define HOST_SEM3_REG 0x0001423c /* cb/ct */ +#define HOST_SEM4_REG 0x00014610 /* cb/ct */ +#define HOST_SEM5_REG 0x00014614 /* cb/ct */ +#define HOST_SEM6_REG 0x00014618 /* cb/ct */ +#define HOST_SEM7_REG 0x0001461c /* cb/ct */ +#define HOST_SEM0_INFO_REG 0x00014240 /* cb/ct */ +#define HOST_SEM1_INFO_REG 0x00014244 /* cb/ct */ +#define HOST_SEM2_INFO_REG 0x00014248 /* cb/ct */ +#define HOST_SEM3_INFO_REG 0x0001424c /* cb/ct */ +#define HOST_SEM4_INFO_REG 0x00014620 /* cb/ct */ +#define HOST_SEM5_INFO_REG 0x00014624 /* cb/ct */ +#define HOST_SEM6_INFO_REG 0x00014628 /* cb/ct */ +#define HOST_SEM7_INFO_REG 0x0001462c /* cb/ct */ + +#define HOSTFN0_LPU0_CMD_STAT 0x00019000 /* cb/ct */ +#define HOSTFN0_LPU1_CMD_STAT 0x00019004 /* cb/ct */ +#define HOSTFN1_LPU0_CMD_STAT 0x00019010 /* cb/ct */ +#define HOSTFN1_LPU1_CMD_STAT 0x00019014 /* cb/ct */ +#define HOSTFN2_LPU0_CMD_STAT 0x00019150 /* ct */ +#define HOSTFN2_LPU1_CMD_STAT 0x00019154 /* ct */ +#define HOSTFN3_LPU0_CMD_STAT 0x00019160 /* ct */ +#define HOSTFN3_LPU1_CMD_STAT 0x00019164 /* ct */ +#define LPU0_HOSTFN0_CMD_STAT 0x00019008 /* cb/ct */ +#define LPU1_HOSTFN0_CMD_STAT 0x0001900c /* cb/ct */ +#define LPU0_HOSTFN1_CMD_STAT 0x00019018 /* cb/ct */ +#define LPU1_HOSTFN1_CMD_STAT 0x0001901c /* cb/ct */ +#define LPU0_HOSTFN2_CMD_STAT 0x00019158 /* ct */ +#define LPU1_HOSTFN2_CMD_STAT 0x0001915c /* ct */ +#define LPU0_HOSTFN3_CMD_STAT 0x00019168 /* ct */ +#define LPU1_HOSTFN3_CMD_STAT 0x0001916c /* ct */ + +#define PSS_CTL_REG 0x00018800 /* cb/ct */ +#define __PSS_I2C_CLK_DIV_MK 0x007f0000 +#define __PSS_I2C_CLK_DIV_SH 16 +#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH) +#define __PSS_LMEM_INIT_DONE 0x00001000 +#define __PSS_LMEM_RESET 0x00000200 +#define __PSS_LMEM_INIT_EN 0x00000100 +#define __PSS_LPU1_RESET 0x00000002 +#define __PSS_LPU0_RESET 0x00000001 +#define PSS_ERR_STATUS_REG 0x00018810 /* cb/ct */ +#define ERR_SET_REG 0x00018818 /* cb/ct */ +#define PSS_GPIO_OUT_REG 0x000188c0 /* cb/ct */ +#define __PSS_GPIO_OUT_REG 0x00000fff +#define PSS_GPIO_OE_REG 0x000188c8 /* cb/ct */ +#define __PSS_GPIO_OE_REG 0x000000ff + +#define HOSTFN0_LPU_MBOX0_0 0x00019200 /* cb/ct */ +#define HOSTFN1_LPU_MBOX0_8 0x00019260 /* cb/ct */ +#define LPU_HOSTFN0_MBOX0_0 0x00019280 /* cb/ct */ +#define LPU_HOSTFN1_MBOX0_8 0x000192e0 /* cb/ct */ +#define HOSTFN2_LPU_MBOX0_0 0x00019400 /* ct */ +#define HOSTFN3_LPU_MBOX0_8 0x00019460 /* ct */ +#define LPU_HOSTFN2_MBOX0_0 0x00019480 /* ct */ +#define LPU_HOSTFN3_MBOX0_8 0x000194e0 /* ct */ + +#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c /* ct */ +#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c /* ct */ +#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c /* ct */ +#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c /* ct */ + +#define MBIST_CTL_REG 0x00014220 /* ct */ +#define __EDRAM_BISTR_START 0x00000004 +#define MBIST_STAT_REG 0x00014224 /* ct */ +#define ETH_MAC_SER_REG 0x00014288 /* ct */ +#define __APP_EMS_CKBUFAMPIN 0x00000020 +#define __APP_EMS_REFCLKSEL 0x00000010 +#define __APP_EMS_CMLCKSEL 0x00000008 +#define __APP_EMS_REFCKBUFEN2 0x00000004 +#define __APP_EMS_REFCKBUFEN1 0x00000002 +#define __APP_EMS_CHANNEL_SEL 0x00000001 +#define FNC_PERS_REG 0x00014604 /* ct */ +#define __F3_FUNCTION_ACTIVE 0x80000000 +#define __F3_FUNCTION_MODE 0x40000000 +#define __F3_PORT_MAP_MK 0x30000000 +#define __F3_PORT_MAP_SH 28 +#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH) +#define __F3_VM_MODE 0x08000000 +#define __F3_INTX_STATUS_MK 0x07000000 +#define __F3_INTX_STATUS_SH 24 +#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH) +#define __F2_FUNCTION_ACTIVE 0x00800000 +#define __F2_FUNCTION_MODE 0x00400000 +#define __F2_PORT_MAP_MK 0x00300000 +#define __F2_PORT_MAP_SH 20 +#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH) +#define __F2_VM_MODE 0x00080000 +#define __F2_INTX_STATUS_MK 0x00070000 +#define __F2_INTX_STATUS_SH 16 +#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH) +#define __F1_FUNCTION_ACTIVE 0x00008000 +#define __F1_FUNCTION_MODE 0x00004000 +#define __F1_PORT_MAP_MK 0x00003000 +#define __F1_PORT_MAP_SH 12 +#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH) +#define __F1_VM_MODE 0x00000800 +#define __F1_INTX_STATUS_MK 0x00000700 +#define __F1_INTX_STATUS_SH 8 +#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH) +#define __F0_FUNCTION_ACTIVE 0x00000080 +#define __F0_FUNCTION_MODE 0x00000040 +#define __F0_PORT_MAP_MK 0x00000030 +#define __F0_PORT_MAP_SH 4 +#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH) +#define __F0_VM_MODE 0x00000008 +#define __F0_INTX_STATUS 0x00000007 +enum { + __F0_INTX_STATUS_MSIX = 0x0, + __F0_INTX_STATUS_INTA = 0x1, + __F0_INTX_STATUS_INTB = 0x2, + __F0_INTX_STATUS_INTC = 0x3, + __F0_INTX_STATUS_INTD = 0x4, +}; + +#define OP_MODE 0x0001460c /* ct */ +#define __APP_ETH_CLK_LOWSPEED 0x00000004 +#define __GLOBAL_CORECLK_HALFSPEED 0x00000002 +#define __GLOBAL_FCOE_MODE 0x00000001 +#define FW_INIT_HALT_P0 0x000191ac /* ct */ +#define __FW_INIT_HALT_P 0x00000001 +#define FW_INIT_HALT_P1 0x000191bc /* ct */ +#define PMM_1T_RESET_REG_P0 0x0002381c /* ct */ +#define __PMM_1T_RESET_P 0x00000001 +#define PMM_1T_RESET_REG_P1 0x00023c1c /* ct */ + +/** + * Catapult-2 specific defines + */ +#define CT2_PCI_CPQ_BASE 0x00030000 +#define CT2_PCI_APP_BASE 0x00030100 +#define CT2_PCI_ETH_BASE 0x00030400 + +/* + * APP block registers + */ +#define CT2_HOSTFN_INT_STATUS (CT2_PCI_APP_BASE + 0x00) +#define CT2_HOSTFN_INTR_MASK (CT2_PCI_APP_BASE + 0x04) +#define CT2_HOSTFN_PERSONALITY0 (CT2_PCI_APP_BASE + 0x08) +#define __PME_STATUS_ 0x00200000 +#define __PF_VF_BAR_SIZE_MODE__MK 0x00180000 +#define __PF_VF_BAR_SIZE_MODE__SH 19 +#define __PF_VF_BAR_SIZE_MODE_(_v) ((_v) << __PF_VF_BAR_SIZE_MODE__SH) +#define __FC_LL_PORT_MAP__MK 0x00060000 +#define __FC_LL_PORT_MAP__SH 17 +#define __FC_LL_PORT_MAP_(_v) ((_v) << __FC_LL_PORT_MAP__SH) +#define __PF_VF_ACTIVE_ 0x00010000 +#define __PF_VF_CFG_RDY_ 0x00008000 +#define __PF_VF_ENABLE_ 0x00004000 +#define __PF_DRIVER_ACTIVE_ 0x00002000 +#define __PF_PME_SEND_ENABLE_ 0x00001000 +#define __PF_EXROM_OFFSET__MK 0x00000ff0 +#define __PF_EXROM_OFFSET__SH 4 +#define __PF_EXROM_OFFSET_(_v) ((_v) << __PF_EXROM_OFFSET__SH) +#define __FC_LL_MODE_ 0x00000008 +#define __PF_INTX_PIN_ 0x00000007 +#define CT2_HOSTFN_PERSONALITY1 (CT2_PCI_APP_BASE + 0x0C) +#define __PF_NUM_QUEUES1__MK 0xff000000 +#define __PF_NUM_QUEUES1__SH 24 +#define __PF_NUM_QUEUES1_(_v) ((_v) << __PF_NUM_QUEUES1__SH) +#define __PF_VF_QUE_OFFSET1__MK 0x00ff0000 +#define __PF_VF_QUE_OFFSET1__SH 16 +#define __PF_VF_QUE_OFFSET1_(_v) ((_v) << __PF_VF_QUE_OFFSET1__SH) +#define __PF_VF_NUM_QUEUES__MK 0x0000ff00 +#define __PF_VF_NUM_QUEUES__SH 8 +#define __PF_VF_NUM_QUEUES_(_v) ((_v) << __PF_VF_NUM_QUEUES__SH) +#define __PF_VF_QUE_OFFSET_ 0x000000ff +#define CT2_HOSTFN_PAGE_NUM (CT2_PCI_APP_BASE + 0x18) +#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38) + +/* + * Catapult-2 CPQ block registers + */ +#define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00) +#define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20) +#define CT2_LPU0_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x40) +#define CT2_LPU1_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x60) +#define CT2_HOSTFN_LPU0_CMD_STAT (CT2_PCI_CPQ_BASE + 0x80) +#define CT2_HOSTFN_LPU1_CMD_STAT (CT2_PCI_CPQ_BASE + 0x84) +#define CT2_LPU0_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x88) +#define CT2_LPU1_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x8c) +#define CT2_HOSTFN_LPU0_READ_STAT (CT2_PCI_CPQ_BASE + 0x90) +#define CT2_HOSTFN_LPU1_READ_STAT (CT2_PCI_CPQ_BASE + 0x94) +#define CT2_LPU0_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x98) +#define CT2_LPU1_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x9C) +#define CT2_HOST_SEM0_REG 0x000148f0 +#define CT2_HOST_SEM1_REG 0x000148f4 +#define CT2_HOST_SEM2_REG 0x000148f8 +#define CT2_HOST_SEM3_REG 0x000148fc +#define CT2_HOST_SEM4_REG 0x00014900 +#define CT2_HOST_SEM5_REG 0x00014904 +#define CT2_HOST_SEM6_REG 0x00014908 +#define CT2_HOST_SEM7_REG 0x0001490c +#define CT2_HOST_SEM0_INFO_REG 0x000148b0 +#define CT2_HOST_SEM1_INFO_REG 0x000148b4 +#define CT2_HOST_SEM2_INFO_REG 0x000148b8 +#define CT2_HOST_SEM3_INFO_REG 0x000148bc +#define CT2_HOST_SEM4_INFO_REG 0x000148c0 +#define CT2_HOST_SEM5_INFO_REG 0x000148c4 +#define CT2_HOST_SEM6_INFO_REG 0x000148c8 +#define CT2_HOST_SEM7_INFO_REG 0x000148cc + +#define CT2_APP_PLL_LCLK_CTL_REG 0x00014808 +#define __APP_LPUCLK_HALFSPEED 0x40000000 +#define __APP_PLL_LCLK_LOAD 0x20000000 +#define __APP_PLL_LCLK_FBCNT_MK 0x1fe00000 +#define __APP_PLL_LCLK_FBCNT_SH 21 +#define __APP_PLL_LCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH) +enum { + __APP_PLL_LCLK_FBCNT_425_MHZ = 6, + __APP_PLL_LCLK_FBCNT_468_MHZ = 4, +}; +#define __APP_PLL_LCLK_EXTFB 0x00000800 +#define __APP_PLL_LCLK_ENOUTS 0x00000400 +#define __APP_PLL_LCLK_RATE 0x00000010 +#define CT2_APP_PLL_SCLK_CTL_REG 0x0001480c +#define __P_SCLK_PLL_LOCK 0x80000000 +#define __APP_PLL_SCLK_REFCLK_SEL 0x40000000 +#define __APP_PLL_SCLK_CLK_DIV2 0x20000000 +#define __APP_PLL_SCLK_LOAD 0x10000000 +#define __APP_PLL_SCLK_FBCNT_MK 0x0ff00000 +#define __APP_PLL_SCLK_FBCNT_SH 20 +#define __APP_PLL_SCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH) +enum { + __APP_PLL_SCLK_FBCNT_NORM = 6, + __APP_PLL_SCLK_FBCNT_10G_FC = 10, +}; +#define __APP_PLL_SCLK_EXTFB 0x00000800 +#define __APP_PLL_SCLK_ENOUTS 0x00000400 +#define __APP_PLL_SCLK_RATE 0x00000010 +#define CT2_PCIE_MISC_REG 0x00014804 +#define __ETH_CLK_ENABLE_PORT1 0x00000010 +#define CT2_CHIP_MISC_PRG 0x000148a4 +#define __ETH_CLK_ENABLE_PORT0 0x00004000 +#define __APP_LPU_SPEED 0x00000002 +#define CT2_MBIST_STAT_REG 0x00014818 +#define CT2_MBIST_CTL_REG 0x0001481c +#define CT2_PMM_1T_CONTROL_REG_P0 0x0002381c +#define __PMM_1T_PNDB_P 0x00000002 +#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c +#define CT2_WGN_STATUS 0x00014990 +#define __WGN_READY 0x00000400 +#define __GLBL_PF_VF_CFG_RDY 0x00000200 +#define CT2_NFC_CSR_SET_REG 0x00027424 +#define __HALT_NFC_CONTROLLER 0x00000002 +#define __NFC_CONTROLLER_HALTED 0x00001000 + +#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0 +#define __CSI_MAC_RESET 0x00000010 +#define __CSI_MAC_AHB_RESET 0x00000008 +#define CT2_CSI_MAC1_CONTROL_REG 0x000270d4 +#define CT2_CSI_MAC_CONTROL_REG(__n) \ + (CT2_CSI_MAC0_CONTROL_REG + \ + (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG)) + +/* + * Name semaphore registers based on usage + */ +#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG +#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG +#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG +#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG +#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG +#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG + +/* + * CT2 semaphore register locations changed + */ +#define CT2_BFA_IOC0_HBEAT_REG CT2_HOST_SEM0_INFO_REG +#define CT2_BFA_IOC0_STATE_REG CT2_HOST_SEM1_INFO_REG +#define CT2_BFA_IOC1_HBEAT_REG CT2_HOST_SEM2_INFO_REG +#define CT2_BFA_IOC1_STATE_REG CT2_HOST_SEM3_INFO_REG +#define CT2_BFA_FW_USE_COUNT CT2_HOST_SEM4_INFO_REG +#define CT2_BFA_IOC_FAIL_SYNC CT2_HOST_SEM5_INFO_REG + +#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) +#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) + +/* + * And corresponding host interrupt status bit field defines + */ +#define __HFN_INT_CPE_Q0 0x00000001U +#define __HFN_INT_CPE_Q1 0x00000002U +#define __HFN_INT_CPE_Q2 0x00000004U +#define __HFN_INT_CPE_Q3 0x00000008U +#define __HFN_INT_CPE_Q4 0x00000010U +#define __HFN_INT_CPE_Q5 0x00000020U +#define __HFN_INT_CPE_Q6 0x00000040U +#define __HFN_INT_CPE_Q7 0x00000080U +#define __HFN_INT_RME_Q0 0x00000100U +#define __HFN_INT_RME_Q1 0x00000200U +#define __HFN_INT_RME_Q2 0x00000400U +#define __HFN_INT_RME_Q3 0x00000800U +#define __HFN_INT_RME_Q4 0x00001000U +#define __HFN_INT_RME_Q5 0x00002000U +#define __HFN_INT_RME_Q6 0x00004000U +#define __HFN_INT_RME_Q7 0x00008000U +#define __HFN_INT_ERR_EMC 0x00010000U +#define __HFN_INT_ERR_LPU0 0x00020000U +#define __HFN_INT_ERR_LPU1 0x00040000U +#define __HFN_INT_ERR_PSS 0x00080000U +#define __HFN_INT_MBOX_LPU0 0x00100000U +#define __HFN_INT_MBOX_LPU1 0x00200000U +#define __HFN_INT_MBOX1_LPU0 0x00400000U +#define __HFN_INT_MBOX1_LPU1 0x00800000U +#define __HFN_INT_LL_HALT 0x01000000U +#define __HFN_INT_CPE_MASK 0x000000ffU +#define __HFN_INT_RME_MASK 0x0000ff00U +#define __HFN_INT_ERR_MASK \ + (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | \ + __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT) +#define __HFN_INT_FN0_MASK \ + (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \ + __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \ + __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0) +#define __HFN_INT_FN1_MASK \ + (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \ + __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \ + __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1) + +/* + * Host interrupt status defines for catapult-2 + */ +#define __HFN_INT_MBOX_LPU0_CT2 0x00010000U +#define __HFN_INT_MBOX_LPU1_CT2 0x00020000U +#define __HFN_INT_ERR_PSS_CT2 0x00040000U +#define __HFN_INT_ERR_LPU0_CT2 0x00080000U +#define __HFN_INT_ERR_LPU1_CT2 0x00100000U +#define __HFN_INT_CPQ_HALT_CT2 0x00200000U +#define __HFN_INT_ERR_WGN_CT2 0x00400000U +#define __HFN_INT_ERR_LEHRX_CT2 0x00800000U +#define __HFN_INT_ERR_LEHTX_CT2 0x01000000U +#define __HFN_INT_ERR_MASK_CT2 \ + (__HFN_INT_ERR_PSS_CT2 | __HFN_INT_ERR_LPU0_CT2 | \ + __HFN_INT_ERR_LPU1_CT2 | __HFN_INT_CPQ_HALT_CT2 | \ + __HFN_INT_ERR_WGN_CT2 | __HFN_INT_ERR_LEHRX_CT2 | \ + __HFN_INT_ERR_LEHTX_CT2) +#define __HFN_INT_FN0_MASK_CT2 \ + (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \ + __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \ + __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0_CT2) +#define __HFN_INT_FN1_MASK_CT2 \ + (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \ + __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \ + __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1_CT2) + +/* + * asic memory map. + */ +#define PSS_SMEM_PAGE_START 0x8000 +#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15)) +#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff) + +#endif /* __BFI_REG_H__ */ diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h index 97a61b4d81b..e1f1e3448f9 100644 --- a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h +++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h @@ -19,6 +19,23 @@ struct b577xx_doorbell_hdr { /* * doorbell message sent to the chip */ +struct b577xx_doorbell { +#if defined(__BIG_ENDIAN) + u16 zero_fill2; + u8 zero_fill1; + struct b577xx_doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct b577xx_doorbell_hdr header; + u8 zero_fill1; + u16 zero_fill2; +#endif +}; + + + +/* + * doorbell message sent to the chip + */ struct b577xx_doorbell_set_prod { #if defined(__BIG_ENDIAN) u16 prod; @@ -39,106 +56,63 @@ struct regpair { /* - * Fixed size structure in order to plant it in Union structure + * ABTS info $$KEEP_ENDIANNESS$$ */ -struct fcoe_abts_rsp_union { - u32 r_ctl; - u32 abts_rsp_payload[7]; +struct fcoe_abts_info { + __le16 aborted_task_id; + __le16 reserved0; + __le32 reserved1; }; /* - * 4 regs size + * Fixed size structure in order to plant it in Union structure + * $$KEEP_ENDIANNESS$$ */ -struct fcoe_bd_ctx { - u32 buf_addr_hi; - u32 buf_addr_lo; -#if defined(__BIG_ENDIAN) - u16 rsrv0; - u16 buf_len; -#elif defined(__LITTLE_ENDIAN) - u16 buf_len; - u16 rsrv0; -#endif -#if defined(__BIG_ENDIAN) - u16 rsrv1; - u16 flags; -#elif defined(__LITTLE_ENDIAN) - u16 flags; - u16 rsrv1; -#endif +struct fcoe_abts_rsp_union { + u8 r_ctl; + u8 rsrv[3]; + __le32 abts_rsp_payload[7]; }; -struct fcoe_cleanup_flow_info { -#if defined(__BIG_ENDIAN) - u16 reserved1; - u16 task_id; -#elif defined(__LITTLE_ENDIAN) - u16 task_id; - u16 reserved1; -#endif - u32 reserved2[7]; +/* + * 4 regs size $$KEEP_ENDIANNESS$$ + */ +struct fcoe_bd_ctx { + __le32 buf_addr_hi; + __le32 buf_addr_lo; + __le16 buf_len; + __le16 rsrv0; + __le16 flags; + __le16 rsrv1; }; -struct fcoe_fcp_cmd_payload { - u32 opaque[8]; -}; - -struct fcoe_fc_hdr { -#if defined(__BIG_ENDIAN) - u8 cs_ctl; - u8 s_id[3]; -#elif defined(__LITTLE_ENDIAN) - u8 s_id[3]; - u8 cs_ctl; -#endif -#if defined(__BIG_ENDIAN) - u8 r_ctl; - u8 d_id[3]; -#elif defined(__LITTLE_ENDIAN) - u8 d_id[3]; - u8 r_ctl; -#endif -#if defined(__BIG_ENDIAN) - u8 seq_id; - u8 df_ctl; - u16 seq_cnt; -#elif defined(__LITTLE_ENDIAN) - u16 seq_cnt; - u8 df_ctl; - u8 seq_id; -#endif -#if defined(__BIG_ENDIAN) - u8 type; - u8 f_ctl[3]; -#elif defined(__LITTLE_ENDIAN) - u8 f_ctl[3]; - u8 type; -#endif - u32 parameters; -#if defined(__BIG_ENDIAN) - u16 ox_id; - u16 rx_id; -#elif defined(__LITTLE_ENDIAN) - u16 rx_id; - u16 ox_id; -#endif +/* + * FCoE cached sges context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_cached_sge_ctx { + struct regpair cur_buf_addr; + __le16 cur_buf_rem; + __le16 second_buf_rem; + struct regpair second_buf_addr; }; -struct fcoe_fc_frame { - struct fcoe_fc_hdr fc_hdr; - u32 reserved0[2]; -}; -union fcoe_cmd_flow_info { - struct fcoe_fcp_cmd_payload fcp_cmd_payload; - struct fcoe_fc_frame mp_fc_frame; +/* + * Cleanup info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_cleanup_info { + __le16 cleaned_task_id; + __le16 rolled_tx_seq_cnt; + __le32 rolled_tx_data_offset; }; - +/* + * Fcp RSP flags $$KEEP_ENDIANNESS$$ + */ struct fcoe_fcp_rsp_flags { u8 flags; #define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0) @@ -155,95 +129,168 @@ struct fcoe_fcp_rsp_flags { #define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5 }; - +/* + * Fcp RSP payload $$KEEP_ENDIANNESS$$ + */ struct fcoe_fcp_rsp_payload { struct regpair reserved0; - u32 fcp_resid; -#if defined(__BIG_ENDIAN) - u16 retry_delay_timer; - struct fcoe_fcp_rsp_flags fcp_flags; - u8 scsi_status_code; -#elif defined(__LITTLE_ENDIAN) + __le32 fcp_resid; u8 scsi_status_code; struct fcoe_fcp_rsp_flags fcp_flags; - u16 retry_delay_timer; -#endif - u32 fcp_rsp_len; - u32 fcp_sns_len; + __le16 retry_delay_timer; + __le32 fcp_rsp_len; + __le32 fcp_sns_len; }; - /* * Fixed size structure in order to plant it in Union structure + * $$KEEP_ENDIANNESS$$ */ struct fcoe_fcp_rsp_union { struct fcoe_fcp_rsp_payload payload; struct regpair reserved0; }; +/* + * FC header $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fc_hdr { + u8 s_id[3]; + u8 cs_ctl; + u8 d_id[3]; + u8 r_ctl; + __le16 seq_cnt; + u8 df_ctl; + u8 seq_id; + u8 f_ctl[3]; + u8 type; + __le32 parameters; + __le16 rx_id; + __le16 ox_id; +}; -struct fcoe_fcp_xfr_rdy_payload { - u32 burst_len; - u32 data_ro; +/* + * FC header union $$KEEP_ENDIANNESS$$ + */ +struct fcoe_mp_rsp_union { + struct fcoe_fc_hdr fc_hdr; + __le32 mp_payload_len; + __le32 rsrv; }; -struct fcoe_read_flow_info { - struct fcoe_fc_hdr fc_data_in_hdr; - u32 reserved[2]; +/* + * Completion information $$KEEP_ENDIANNESS$$ + */ +union fcoe_comp_flow_info { + struct fcoe_fcp_rsp_union fcp_rsp; + struct fcoe_abts_rsp_union abts_rsp; + struct fcoe_mp_rsp_union mp_rsp; + __le32 opaque[8]; }; -struct fcoe_write_flow_info { - struct fcoe_fc_hdr fc_data_out_hdr; - struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload; + +/* + * External ABTS info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_abts_info { + __le32 rsrv0[6]; + struct fcoe_abts_info ctx; }; -union fcoe_rsp_flow_info { - struct fcoe_fcp_rsp_union fcp_rsp; - struct fcoe_abts_rsp_union abts_rsp; + +/* + * External cleanup info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_cleanup_info { + __le32 rsrv0[6]; + struct fcoe_cleanup_info ctx; }; + /* - * 32 bytes used for general purposes + * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$ */ -union fcoe_general_task_ctx { - union fcoe_cmd_flow_info cmd_info; - struct fcoe_read_flow_info read_info; - struct fcoe_write_flow_info write_info; - union fcoe_rsp_flow_info rsp_info; - struct fcoe_cleanup_flow_info cleanup_info; - u32 comp_info[8]; +struct fcoe_fw_tx_seq_ctx { + __le32 data_offset; + __le16 seq_cnt; + __le16 rsrv0; +}; + +/* + * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_fw_tx_seq_ctx { + __le32 rsrv0[6]; + struct fcoe_fw_tx_seq_ctx ctx; +}; + + +/* + * FCoE multiple sges context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_mul_sges_ctx { + struct regpair cur_sge_addr; + __le16 cur_sge_off; + u8 cur_sge_idx; + u8 sgl_size; +}; + +/* + * FCoE external multiple sges context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_mul_sges_ctx { + struct fcoe_mul_sges_ctx mul_sgl; + struct regpair rsrv0; }; /* - * FCoE KCQ CQE parameters + * FCP CMD payload $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_cmd_payload { + __le32 opaque[8]; +}; + + + + + +/* + * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_xfr_rdy_payload { + __le32 burst_len; + __le32 data_ro; +}; + + +/* + * FC frame $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fc_frame { + struct fcoe_fc_hdr fc_hdr; + __le32 reserved0[2]; +}; + + + + +/* + * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$ */ union fcoe_kcqe_params { - u32 reserved0[4]; + __le32 reserved0[4]; }; /* - * FCoE KCQ CQE + * FCoE KCQ CQE $$KEEP_ENDIANNESS$$ */ struct fcoe_kcqe { - u32 fcoe_conn_id; - u32 completion_status; - u32 fcoe_conn_context_id; + __le32 fcoe_conn_id; + __le32 completion_status; + __le32 fcoe_conn_context_id; union fcoe_kcqe_params params; -#if defined(__BIG_ENDIAN) - u8 flags; -#define FCOE_KCQE_RESERVED0 (0x7<<0) -#define FCOE_KCQE_RESERVED0_SHIFT 0 -#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3) -#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3 -#define FCOE_KCQE_LAYER_CODE (0x7<<4) -#define FCOE_KCQE_LAYER_CODE_SHIFT 4 -#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) -#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 - u8 op_code; - u16 qe_self_seq; -#elif defined(__LITTLE_ENDIAN) - u16 qe_self_seq; + __le16 qe_self_seq; u8 op_code; u8 flags; #define FCOE_KCQE_RESERVED0 (0x7<<0) @@ -254,23 +301,14 @@ struct fcoe_kcqe { #define FCOE_KCQE_LAYER_CODE_SHIFT 4 #define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) #define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 -#endif }; + + /* - * FCoE KWQE header + * FCoE KWQE header $$KEEP_ENDIANNESS$$ */ struct fcoe_kwqe_header { -#if defined(__BIG_ENDIAN) - u8 flags; -#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) -#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0 -#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4) -#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 -#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) -#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 - u8 op_code; -#elif defined(__LITTLE_ENDIAN) u8 op_code; u8 flags; #define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) @@ -279,50 +317,23 @@ struct fcoe_kwqe_header { #define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 #define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) #define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 -#endif }; /* - * FCoE firmware init request 1 + * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$ */ struct fcoe_kwqe_init1 { -#if defined(__BIG_ENDIAN) + __le16 num_tasks; struct fcoe_kwqe_header hdr; - u16 num_tasks; -#elif defined(__LITTLE_ENDIAN) - u16 num_tasks; - struct fcoe_kwqe_header hdr; -#endif - u32 task_list_pbl_addr_lo; - u32 task_list_pbl_addr_hi; - u32 dummy_buffer_addr_lo; - u32 dummy_buffer_addr_hi; -#if defined(__BIG_ENDIAN) - u16 rq_num_wqes; - u16 sq_num_wqes; -#elif defined(__LITTLE_ENDIAN) - u16 sq_num_wqes; - u16 rq_num_wqes; -#endif -#if defined(__BIG_ENDIAN) - u16 cq_num_wqes; - u16 rq_buffer_log_size; -#elif defined(__LITTLE_ENDIAN) - u16 rq_buffer_log_size; - u16 cq_num_wqes; -#endif -#if defined(__BIG_ENDIAN) - u8 flags; -#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) -#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0 -#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4) -#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 -#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) -#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 - u8 num_sessions_log; - u16 mtu; -#elif defined(__LITTLE_ENDIAN) - u16 mtu; + __le32 task_list_pbl_addr_lo; + __le32 task_list_pbl_addr_hi; + __le32 dummy_buffer_addr_lo; + __le32 dummy_buffer_addr_hi; + __le16 sq_num_wqes; + __le16 rq_num_wqes; + __le16 rq_buffer_log_size; + __le16 cq_num_wqes; + __le16 mtu; u8 num_sessions_log; u8 flags; #define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) @@ -331,113 +342,73 @@ struct fcoe_kwqe_init1 { #define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 #define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) #define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 -#endif }; /* - * FCoE firmware init request 2 + * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$ */ struct fcoe_kwqe_init2 { -#if defined(__BIG_ENDIAN) - struct fcoe_kwqe_header hdr; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; + u8 hsi_major_version; + u8 hsi_minor_version; struct fcoe_kwqe_header hdr; -#endif - u32 hash_tbl_pbl_addr_lo; - u32 hash_tbl_pbl_addr_hi; - u32 t2_hash_tbl_addr_lo; - u32 t2_hash_tbl_addr_hi; - u32 t2_ptr_hash_tbl_addr_lo; - u32 t2_ptr_hash_tbl_addr_hi; - u32 free_list_count; + __le32 hash_tbl_pbl_addr_lo; + __le32 hash_tbl_pbl_addr_hi; + __le32 t2_hash_tbl_addr_lo; + __le32 t2_hash_tbl_addr_hi; + __le32 t2_ptr_hash_tbl_addr_lo; + __le32 t2_ptr_hash_tbl_addr_hi; + __le32 free_list_count; }; /* - * FCoE firmware init request 3 + * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$ */ struct fcoe_kwqe_init3 { -#if defined(__BIG_ENDIAN) - struct fcoe_kwqe_header hdr; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; + __le16 reserved0; struct fcoe_kwqe_header hdr; -#endif - u32 error_bit_map_lo; - u32 error_bit_map_hi; -#if defined(__BIG_ENDIAN) - u8 reserved21[3]; - u8 cached_session_enable; -#elif defined(__LITTLE_ENDIAN) - u8 cached_session_enable; + __le32 error_bit_map_lo; + __le32 error_bit_map_hi; + u8 perf_config; u8 reserved21[3]; -#endif - u32 reserved2[4]; + __le32 reserved2[4]; }; /* - * FCoE connection offload request 1 + * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$ */ struct fcoe_kwqe_conn_offload1 { -#if defined(__BIG_ENDIAN) + __le16 fcoe_conn_id; struct fcoe_kwqe_header hdr; - u16 fcoe_conn_id; -#elif defined(__LITTLE_ENDIAN) - u16 fcoe_conn_id; - struct fcoe_kwqe_header hdr; -#endif - u32 sq_addr_lo; - u32 sq_addr_hi; - u32 rq_pbl_addr_lo; - u32 rq_pbl_addr_hi; - u32 rq_first_pbe_addr_lo; - u32 rq_first_pbe_addr_hi; -#if defined(__BIG_ENDIAN) - u16 reserved0; - u16 rq_prod; -#elif defined(__LITTLE_ENDIAN) - u16 rq_prod; - u16 reserved0; -#endif + __le32 sq_addr_lo; + __le32 sq_addr_hi; + __le32 rq_pbl_addr_lo; + __le32 rq_pbl_addr_hi; + __le32 rq_first_pbe_addr_lo; + __le32 rq_first_pbe_addr_hi; + __le16 rq_prod; + __le16 reserved0; }; /* - * FCoE connection offload request 2 + * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$ */ struct fcoe_kwqe_conn_offload2 { -#if defined(__BIG_ENDIAN) - struct fcoe_kwqe_header hdr; - u16 tx_max_fc_pay_len; -#elif defined(__LITTLE_ENDIAN) - u16 tx_max_fc_pay_len; + __le16 tx_max_fc_pay_len; struct fcoe_kwqe_header hdr; -#endif - u32 cq_addr_lo; - u32 cq_addr_hi; - u32 xferq_addr_lo; - u32 xferq_addr_hi; - u32 conn_db_addr_lo; - u32 conn_db_addr_hi; - u32 reserved1; + __le32 cq_addr_lo; + __le32 cq_addr_hi; + __le32 xferq_addr_lo; + __le32 xferq_addr_hi; + __le32 conn_db_addr_lo; + __le32 conn_db_addr_hi; + __le32 reserved1; }; /* - * FCoE connection offload request 3 + * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$ */ struct fcoe_kwqe_conn_offload3 { -#if defined(__BIG_ENDIAN) - struct fcoe_kwqe_header hdr; - u16 vlan_tag; -#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) -#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 -#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) -#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12 -#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) -#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 -#elif defined(__LITTLE_ENDIAN) - u16 vlan_tag; + __le16 vlan_tag; #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 #define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) @@ -445,34 +416,8 @@ struct fcoe_kwqe_conn_offload3 { #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 struct fcoe_kwqe_header hdr; -#endif -#if defined(__BIG_ENDIAN) - u8 tx_max_conc_seqs_c3; - u8 s_id[3]; -#elif defined(__LITTLE_ENDIAN) u8 s_id[3]; u8 tx_max_conc_seqs_c3; -#endif -#if defined(__BIG_ENDIAN) - u8 flags; -#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) -#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0 -#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1) -#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1 -#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2) -#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2 -#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3) -#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3 -#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4) -#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4 -#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5) -#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5 -#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6) -#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 -#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) -#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 - u8 d_id[3]; -#elif defined(__LITTLE_ENDIAN) u8 d_id[3]; u8 flags; #define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) @@ -491,79 +436,44 @@ struct fcoe_kwqe_conn_offload3 { #define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 -#endif - u32 reserved; - u32 confq_first_pbe_addr_lo; - u32 confq_first_pbe_addr_hi; -#if defined(__BIG_ENDIAN) - u16 rx_max_fc_pay_len; - u16 tx_total_conc_seqs; -#elif defined(__LITTLE_ENDIAN) - u16 tx_total_conc_seqs; - u16 rx_max_fc_pay_len; -#endif -#if defined(__BIG_ENDIAN) - u8 rx_open_seqs_exch_c3; - u8 rx_max_conc_seqs_c3; - u16 rx_total_conc_seqs; -#elif defined(__LITTLE_ENDIAN) - u16 rx_total_conc_seqs; + __le32 reserved; + __le32 confq_first_pbe_addr_lo; + __le32 confq_first_pbe_addr_hi; + __le16 tx_total_conc_seqs; + __le16 rx_max_fc_pay_len; + __le16 rx_total_conc_seqs; u8 rx_max_conc_seqs_c3; u8 rx_open_seqs_exch_c3; -#endif }; /* - * FCoE connection offload request 4 + * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$ */ struct fcoe_kwqe_conn_offload4 { -#if defined(__BIG_ENDIAN) - struct fcoe_kwqe_header hdr; - u8 reserved2; - u8 e_d_tov_timer_val; -#elif defined(__LITTLE_ENDIAN) u8 e_d_tov_timer_val; u8 reserved2; struct fcoe_kwqe_header hdr; -#endif - u8 src_mac_addr_lo32[4]; -#if defined(__BIG_ENDIAN) - u8 dst_mac_addr_hi16[2]; - u8 src_mac_addr_hi16[2]; -#elif defined(__LITTLE_ENDIAN) - u8 src_mac_addr_hi16[2]; - u8 dst_mac_addr_hi16[2]; -#endif - u8 dst_mac_addr_lo32[4]; - u32 lcq_addr_lo; - u32 lcq_addr_hi; - u32 confq_pbl_base_addr_lo; - u32 confq_pbl_base_addr_hi; + u8 src_mac_addr_lo[2]; + u8 src_mac_addr_mid[2]; + u8 src_mac_addr_hi[2]; + u8 dst_mac_addr_hi[2]; + u8 dst_mac_addr_lo[2]; + u8 dst_mac_addr_mid[2]; + __le32 lcq_addr_lo; + __le32 lcq_addr_hi; + __le32 confq_pbl_base_addr_lo; + __le32 confq_pbl_base_addr_hi; }; /* - * FCoE connection enable request + * FCoE connection enable request $$KEEP_ENDIANNESS$$ */ struct fcoe_kwqe_conn_enable_disable { -#if defined(__BIG_ENDIAN) - struct fcoe_kwqe_header hdr; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; + __le16 reserved0; struct fcoe_kwqe_header hdr; -#endif - u8 src_mac_addr_lo32[4]; -#if defined(__BIG_ENDIAN) - u16 vlan_tag; -#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) -#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 -#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12) -#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 -#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) -#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 - u8 src_mac_addr_hi16[2]; -#elif defined(__LITTLE_ENDIAN) - u8 src_mac_addr_hi16[2]; + u8 src_mac_addr_lo[2]; + u8 src_mac_addr_mid[2]; + u8 src_mac_addr_hi[2]; u16 vlan_tag; #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 @@ -571,82 +481,52 @@ struct fcoe_kwqe_conn_enable_disable { #define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 -#endif - u8 dst_mac_addr_lo32[4]; -#if defined(__BIG_ENDIAN) - u16 reserved1; - u8 dst_mac_addr_hi16[2]; -#elif defined(__LITTLE_ENDIAN) - u8 dst_mac_addr_hi16[2]; - u16 reserved1; -#endif -#if defined(__BIG_ENDIAN) - u8 vlan_flag; - u8 s_id[3]; -#elif defined(__LITTLE_ENDIAN) + u8 dst_mac_addr_lo[2]; + u8 dst_mac_addr_mid[2]; + u8 dst_mac_addr_hi[2]; + __le16 reserved1; u8 s_id[3]; u8 vlan_flag; -#endif -#if defined(__BIG_ENDIAN) - u8 reserved3; - u8 d_id[3]; -#elif defined(__LITTLE_ENDIAN) u8 d_id[3]; u8 reserved3; -#endif - u32 context_id; - u32 conn_id; - u32 reserved4; + __le32 context_id; + __le32 conn_id; + __le32 reserved4; }; /* - * FCoE connection destroy request + * FCoE connection destroy request $$KEEP_ENDIANNESS$$ */ struct fcoe_kwqe_conn_destroy { -#if defined(__BIG_ENDIAN) - struct fcoe_kwqe_header hdr; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; + __le16 reserved0; struct fcoe_kwqe_header hdr; -#endif - u32 context_id; - u32 conn_id; - u32 reserved1[5]; + __le32 context_id; + __le32 conn_id; + __le32 reserved1[5]; }; /* - * FCoe destroy request + * FCoe destroy request $$KEEP_ENDIANNESS$$ */ struct fcoe_kwqe_destroy { -#if defined(__BIG_ENDIAN) - struct fcoe_kwqe_header hdr; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; + __le16 reserved0; struct fcoe_kwqe_header hdr; -#endif - u32 reserved1[7]; + __le32 reserved1[7]; }; /* - * FCoe statistics request + * FCoe statistics request $$KEEP_ENDIANNESS$$ */ struct fcoe_kwqe_stat { -#if defined(__BIG_ENDIAN) + __le16 reserved0; struct fcoe_kwqe_header hdr; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - struct fcoe_kwqe_header hdr; -#endif - u32 stat_params_addr_lo; - u32 stat_params_addr_hi; - u32 reserved1[5]; + __le32 stat_params_addr_lo; + __le32 stat_params_addr_hi; + __le32 reserved1[5]; }; /* - * FCoE KWQ WQE + * FCoE KWQ WQE $$KEEP_ENDIANNESS$$ */ union fcoe_kwqe { struct fcoe_kwqe_init1 init1; @@ -662,19 +542,42 @@ union fcoe_kwqe { struct fcoe_kwqe_stat statistics; }; -struct fcoe_mul_sges_ctx { - struct regpair cur_sge_addr; -#if defined(__BIG_ENDIAN) - u8 sgl_size; - u8 cur_sge_idx; - u16 cur_sge_off; -#elif defined(__LITTLE_ENDIAN) - u16 cur_sge_off; - u8 cur_sge_idx; - u8 sgl_size; -#endif + + + + + + + + + + + + + + + +/* + * TX SGL context $$KEEP_ENDIANNESS$$ + */ +union fcoe_sgl_union_ctx { + struct fcoe_cached_sge_ctx cached_sge; + struct fcoe_ext_mul_sges_ctx sgl; + __le32 opaque[5]; }; +/* + * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$ + */ +struct fcoe_read_flow_info { + union fcoe_sgl_union_ctx sgl_ctx; + __le32 rsrv0[3]; +}; + + +/* + * Fcoe stat context $$KEEP_ENDIANNESS$$ + */ struct fcoe_s_stat_ctx { u8 flags; #define FCOE_S_STAT_CTX_ACTIVE (0x1<<0) @@ -693,51 +596,34 @@ struct fcoe_s_stat_ctx { #define FCOE_S_STAT_CTX_RSRV1_SHIFT 6 }; -struct fcoe_seq_ctx { -#if defined(__BIG_ENDIAN) - u16 low_seq_cnt; - struct fcoe_s_stat_ctx s_stat; - u8 seq_id; -#elif defined(__LITTLE_ENDIAN) +/* + * Fcoe rx seq context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_rx_seq_ctx { u8 seq_id; struct fcoe_s_stat_ctx s_stat; - u16 low_seq_cnt; -#endif -#if defined(__BIG_ENDIAN) - u16 err_seq_cnt; - u16 high_seq_cnt; -#elif defined(__LITTLE_ENDIAN) - u16 high_seq_cnt; - u16 err_seq_cnt; -#endif - u32 low_exp_ro; - u32 high_exp_ro; + __le16 seq_cnt; + __le32 low_exp_ro; + __le32 high_exp_ro; }; -struct fcoe_single_sge_ctx { - struct regpair cur_buf_addr; -#if defined(__BIG_ENDIAN) - u16 reserved0; - u16 cur_buf_rem; -#elif defined(__LITTLE_ENDIAN) - u16 cur_buf_rem; - u16 reserved0; -#endif -}; - -union fcoe_sgl_ctx { - struct fcoe_single_sge_ctx single_sge; - struct fcoe_mul_sges_ctx mul_sges; +/* + * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$ + */ +union fcoe_rx_wr_union_ctx { + struct fcoe_read_flow_info read_info; + union fcoe_comp_flow_info comp_info; + __le32 opaque[8]; }; /* - * FCoE SQ element + * FCoE SQ element $$KEEP_ENDIANNESS$$ */ struct fcoe_sqe { - u16 wqe; + __le16 wqe; #define FCOE_SQE_TASK_ID (0x7FFF<<0) #define FCOE_SQE_TASK_ID_SHIFT 0 #define FCOE_SQE_TOGGLE_BIT (0x1<<15) @@ -746,135 +632,141 @@ struct fcoe_sqe { -struct fcoe_task_ctx_entry_tx_only { - union fcoe_sgl_ctx sgl_ctx; +/* + * 14 regs $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_tx_only { + union fcoe_sgl_union_ctx sgl_ctx; + __le32 rsrv0; }; -struct fcoe_task_ctx_entry_txwr_rxrd { -#if defined(__BIG_ENDIAN) - u16 verify_tx_seq; +/* + * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$ + */ +union fcoe_tx_wr_rx_rd_union_ctx { + struct fcoe_fc_frame tx_frame; + struct fcoe_fcp_cmd_payload fcp_cmd; + struct fcoe_ext_cleanup_info cleanup; + struct fcoe_ext_abts_info abts; + struct fcoe_ext_fw_tx_seq_ctx tx_seq; + __le32 opaque[8]; +}; + +/* + * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_tx_wr_rx_rd_const { u8 init_flags; -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0) -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0 -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3) -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3 -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4) -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4 -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5) -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5 -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6) -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6 - u8 tx_flags; -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0) -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0 -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4) -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4 -#elif defined(__LITTLE_ENDIAN) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0 +#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3) +#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3 +#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4) +#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4 +#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5) +#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5 +#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7) +#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7 u8 tx_flags; -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0) -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0 -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4) -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4 - u8 init_flags; -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0) -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0 -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3) -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3 -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4) -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4 -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5) -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5 -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6) -#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6 - u16 verify_tx_seq; -#endif +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0 +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1 +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5) +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5 +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6 +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2 (0x1<<7) +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2_SHIFT 7 + __le16 rsrv3; + __le32 verify_tx_seq; }; /* - * Common section. Both TX and RX processing might write and read from it in - * different flows + * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$ */ -struct fcoe_task_ctx_entry_tx_rx_cmn { - u32 data_2_trns; - union fcoe_general_task_ctx general; -#if defined(__BIG_ENDIAN) - u16 tx_low_seq_cnt; - struct fcoe_s_stat_ctx tx_s_stat; - u8 tx_seq_id; -#elif defined(__LITTLE_ENDIAN) - u8 tx_seq_id; - struct fcoe_s_stat_ctx tx_s_stat; - u16 tx_low_seq_cnt; -#endif - u32 common_flags; -#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0) -#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0 -#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24) -#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24 -#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25) -#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25 -#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26) -#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26 -#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27) -#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27 -#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28) -#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28 -#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29) -#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29 -}; - -struct fcoe_task_ctx_entry_rxwr_txrd { -#if defined(__BIG_ENDIAN) - u16 rx_id; - u16 rx_flags; -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0) -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0 -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4) -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4 -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7) -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7 -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8) -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8 -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9) -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9 -#elif defined(__LITTLE_ENDIAN) - u16 rx_flags; -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0) -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0 -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4) -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4 -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7) -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7 -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8) -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8 -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9) -#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9 - u16 rx_id; -#endif +struct fcoe_tce_tx_wr_rx_rd { + union fcoe_tx_wr_rx_rd_union_ctx union_ctx; + struct fcoe_tce_tx_wr_rx_rd_const const_ctx; }; -struct fcoe_task_ctx_entry_rx_only { - struct fcoe_seq_ctx seq_ctx; - struct fcoe_seq_ctx ooo_seq_ctx; - u32 rsrv3; - union fcoe_sgl_ctx sgl_ctx; +/* + * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_wr_tx_rd_const { + __le32 data_2_trns; + __le32 init_flags; +#define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0) +#define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0 +#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24) +#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24 +}; + +/* + * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_wr_tx_rd_var { + __le16 rx_flags; +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0 +#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4) +#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4 +#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7) +#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8 +#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12) +#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15 + __le16 rx_id; + struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy; +}; + +/* + * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_wr_tx_rd { + struct fcoe_tce_rx_wr_tx_rd_const const_ctx; + struct fcoe_tce_rx_wr_tx_rd_var var_ctx; +}; + +/* + * tce_rx_only $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_only { + struct fcoe_rx_seq_ctx rx_seq_ctx; + union fcoe_rx_wr_union_ctx union_ctx; }; +/* + * task_ctx_entry $$KEEP_ENDIANNESS$$ + */ struct fcoe_task_ctx_entry { - struct fcoe_task_ctx_entry_tx_only tx_wr_only; - struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd; - struct fcoe_task_ctx_entry_tx_rx_cmn cmn; - struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd; - struct fcoe_task_ctx_entry_rx_only rx_wr_only; - u32 reserved[4]; + struct fcoe_tce_tx_only txwr_only; + struct fcoe_tce_tx_wr_rx_rd txwr_rxrd; + struct fcoe_tce_rx_wr_tx_rd rxwr_txrd; + struct fcoe_tce_rx_only rxwr_only; }; + + + + + + + + /* - * FCoE XFRQ element + * FCoE XFRQ element $$KEEP_ENDIANNESS$$ */ struct fcoe_xfrqe { - u16 wqe; + __le16 wqe; #define FCOE_XFRQE_TASK_ID (0x7FFF<<0) #define FCOE_XFRQE_TASK_ID_SHIFT 0 #define FCOE_XFRQE_TOGGLE_BIT (0x1<<15) @@ -883,22 +775,31 @@ struct fcoe_xfrqe { /* - * FCoE CONFQ element + * fcoe rx doorbell message sent to the chip $$KEEP_ENDIANNESS$$ + */ +struct b577xx_fcoe_rx_doorbell { + struct b577xx_doorbell_hdr hdr; + u8 params; +#define B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM (0x1F<<0) +#define B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT 0 +#define B577XX_FCOE_RX_DOORBELL_OPCODE (0x7<<5) +#define B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT 5 + __le16 doorbell_cq_cons; +}; + + +/* + * FCoE CONFQ element $$KEEP_ENDIANNESS$$ */ struct fcoe_confqe { -#if defined(__BIG_ENDIAN) - u16 rx_id; - u16 ox_id; -#elif defined(__LITTLE_ENDIAN) - u16 ox_id; - u16 rx_id; -#endif - u32 param; + __le16 ox_id; + __le16 rx_id; + __le32 param; }; /* - * FCoE connection data base + * FCoE conection data base */ struct fcoe_conn_db { #if defined(__BIG_ENDIAN) @@ -914,10 +815,10 @@ struct fcoe_conn_db { /* - * FCoE CQ element + * FCoE CQ element $$KEEP_ENDIANNESS$$ */ struct fcoe_cqe { - u16 wqe; + __le16 wqe; #define FCOE_CQE_CQE_INFO (0x3FFF<<0) #define FCOE_CQE_CQE_INFO_SHIFT 0 #define FCOE_CQE_CQE_TYPE (0x1<<14) @@ -928,61 +829,46 @@ struct fcoe_cqe { /* - * FCoE error/warning resporting entry + * FCoE error/warning reporting entry $$KEEP_ENDIANNESS$$ + */ +struct fcoe_partial_err_report_entry { + __le32 err_warn_bitmap_lo; + __le32 err_warn_bitmap_hi; + __le32 tx_buf_off; + __le32 rx_buf_off; +}; + +/* + * FCoE error/warning reporting entry $$KEEP_ENDIANNESS$$ */ struct fcoe_err_report_entry { - u32 err_warn_bitmap_lo; - u32 err_warn_bitmap_hi; - u32 tx_buf_off; - u32 rx_buf_off; + struct fcoe_partial_err_report_entry data; struct fcoe_fc_hdr fc_hdr; }; /* - * FCoE hash table entry (32 bytes) + * FCoE hash table entry (32 bytes) $$KEEP_ENDIANNESS$$ */ struct fcoe_hash_table_entry { -#if defined(__BIG_ENDIAN) - u8 d_id_0; - u8 s_id_2; - u8 s_id_1; - u8 s_id_0; -#elif defined(__LITTLE_ENDIAN) u8 s_id_0; u8 s_id_1; u8 s_id_2; u8 d_id_0; -#endif -#if defined(__BIG_ENDIAN) - u16 dst_mac_addr_hi; - u8 d_id_2; - u8 d_id_1; -#elif defined(__LITTLE_ENDIAN) u8 d_id_1; u8 d_id_2; - u16 dst_mac_addr_hi; -#endif - u32 dst_mac_addr_lo; -#if defined(__BIG_ENDIAN) - u16 vlan_id; - u16 src_mac_addr_hi; -#elif defined(__LITTLE_ENDIAN) - u16 src_mac_addr_hi; - u16 vlan_id; -#endif - u32 src_mac_addr_lo; -#if defined(__BIG_ENDIAN) - u16 reserved1; - u8 reserved0; - u8 vlan_flag; -#elif defined(__LITTLE_ENDIAN) + __le16 dst_mac_addr_hi; + __le16 dst_mac_addr_mid; + __le16 dst_mac_addr_lo; + __le16 src_mac_addr_hi; + __le16 vlan_id; + __le16 src_mac_addr_lo; + __le16 src_mac_addr_mid; u8 vlan_flag; u8 reserved0; - u16 reserved1; -#endif - u32 reserved2; - u32 field_id; + __le16 reserved1; + __le32 reserved2; + __le32 field_id; #define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0) #define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0 #define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24) @@ -991,11 +877,27 @@ struct fcoe_hash_table_entry { #define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31 }; + /* - * FCoE pending work request CQE + * FCoE LCQ element $$KEEP_ENDIANNESS$$ + */ +struct fcoe_lcqe { + __le32 wqe; +#define FCOE_LCQE_TASK_ID (0xFFFF<<0) +#define FCOE_LCQE_TASK_ID_SHIFT 0 +#define FCOE_LCQE_LCQE_TYPE (0xFF<<16) +#define FCOE_LCQE_LCQE_TYPE_SHIFT 16 +#define FCOE_LCQE_RESERVED (0xFF<<24) +#define FCOE_LCQE_RESERVED_SHIFT 24 +}; + + + +/* + * FCoE pending work request CQE $$KEEP_ENDIANNESS$$ */ struct fcoe_pend_wq_cqe { - u16 wqe; + __le16 wqe; #define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0) #define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0 #define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14) @@ -1006,53 +908,61 @@ struct fcoe_pend_wq_cqe { /* - * FCoE RX statistics parameters section#0 + * FCoE RX statistics parameters section#0 $$KEEP_ENDIANNESS$$ */ struct fcoe_rx_stat_params_section0 { - u32 fcoe_ver_cnt; - u32 fcoe_rx_pkt_cnt; - u32 fcoe_rx_byte_cnt; - u32 fcoe_rx_drop_pkt_cnt; + __le32 fcoe_rx_pkt_cnt; + __le32 fcoe_rx_byte_cnt; }; /* - * FCoE RX statistics parameters section#1 + * FCoE RX statistics parameters section#1 $$KEEP_ENDIANNESS$$ */ struct fcoe_rx_stat_params_section1 { - u32 fc_crc_cnt; - u32 eofa_del_cnt; - u32 miss_frame_cnt; - u32 seq_timeout_cnt; - u32 drop_seq_cnt; - u32 fcoe_rx_drop_pkt_cnt; - u32 fcp_rx_pkt_cnt; - u32 reserved0; + __le32 fcoe_ver_cnt; + __le32 fcoe_rx_drop_pkt_cnt; +}; + + +/* + * FCoE RX statistics parameters section#2 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_rx_stat_params_section2 { + __le32 fc_crc_cnt; + __le32 eofa_del_cnt; + __le32 miss_frame_cnt; + __le32 seq_timeout_cnt; + __le32 drop_seq_cnt; + __le32 fcoe_rx_drop_pkt_cnt; + __le32 fcp_rx_pkt_cnt; + __le32 reserved0; }; /* - * FCoE TX statistics parameters + * FCoE TX statistics parameters $$KEEP_ENDIANNESS$$ */ struct fcoe_tx_stat_params { - u32 fcoe_tx_pkt_cnt; - u32 fcoe_tx_byte_cnt; - u32 fcp_tx_pkt_cnt; - u32 reserved0; + __le32 fcoe_tx_pkt_cnt; + __le32 fcoe_tx_byte_cnt; + __le32 fcp_tx_pkt_cnt; + __le32 reserved0; }; /* - * FCoE statistics parameters + * FCoE statistics parameters $$KEEP_ENDIANNESS$$ */ struct fcoe_statistics_params { struct fcoe_tx_stat_params tx_stat; struct fcoe_rx_stat_params_section0 rx_stat0; struct fcoe_rx_stat_params_section1 rx_stat1; + struct fcoe_rx_stat_params_section2 rx_stat2; }; /* - * FCoE t2 hash table entry (64 bytes) + * FCoE t2 hash table entry (64 bytes) $$KEEP_ENDIANNESS$$ */ struct fcoe_t2_hash_table_entry { struct fcoe_hash_table_entry data; @@ -1060,11 +970,13 @@ struct fcoe_t2_hash_table_entry { struct regpair reserved0[3]; }; + + /* - * FCoE unsolicited CQE + * FCoE unsolicited CQE $$KEEP_ENDIANNESS$$ */ struct fcoe_unsolicited_cqe { - u16 wqe; + __le16 wqe; #define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0) #define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0 #define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2) @@ -1075,6 +987,4 @@ struct fcoe_unsolicited_cqe { #define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15 }; - - #endif /* __57XX_FCOE_HSI_LINUX_LE__ */ diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 0a404bfb44f..42228ca5a9d 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -2,7 +2,7 @@ #define _BNX2FC_H_ /* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver. * - * Copyright (c) 2008 - 2010 Broadcom Corporation + * Copyright (c) 2008 - 2011 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -62,7 +62,7 @@ #include "bnx2fc_constants.h" #define BNX2FC_NAME "bnx2fc" -#define BNX2FC_VERSION "1.0.1" +#define BNX2FC_VERSION "1.0.4" #define PFX "bnx2fc: " @@ -141,6 +141,10 @@ #define BNX2FC_RNID_HBA 0x7 +#define SRR_RETRY_COUNT 5 +#define REC_RETRY_COUNT 1 +#define BNX2FC_NUM_ERR_BITS 63 + /* bnx2fc driver uses only one instance of fcoe_percpu_s */ extern struct fcoe_percpu_s bnx2fc_global; @@ -152,20 +156,14 @@ struct bnx2fc_percpu_s { spinlock_t fp_work_lock; }; - struct bnx2fc_hba { - struct list_head link; + struct list_head list; struct cnic_dev *cnic; struct pci_dev *pcidev; - struct net_device *netdev; struct net_device *phys_dev; unsigned long reg_with_cnic; #define BNX2FC_CNIC_REGISTERED 1 - struct packet_type fcoe_packet_type; - struct packet_type fip_packet_type; struct bnx2fc_cmd_mgr *cmd_mgr; - struct workqueue_struct *timer_work_queue; - struct kref kref; spinlock_t hba_lock; struct mutex hba_mutex; unsigned long adapter_state; @@ -173,14 +171,9 @@ struct bnx2fc_hba { #define ADAPTER_STATE_GOING_DOWN 1 #define ADAPTER_STATE_LINK_DOWN 2 #define ADAPTER_STATE_READY 3 - u32 flags; - unsigned long init_done; - #define BNX2FC_FW_INIT_DONE 0 - #define BNX2FC_CTLR_INIT_DONE 1 - #define BNX2FC_CREATE_DONE 2 - struct fcoe_ctlr ctlr; - u8 vlan_enabled; - int vlan_id; + unsigned long flags; + #define BNX2FC_FLAG_FW_INIT_DONE 0 + #define BNX2FC_FLAG_DESTROY_CMPL 1 u32 next_conn_id; struct fcoe_task_ctx_entry **task_ctx; dma_addr_t *task_ctx_dma; @@ -199,38 +192,46 @@ struct bnx2fc_hba { char *dummy_buffer; dma_addr_t dummy_buf_dma; + /* Active list of offloaded sessions */ + struct bnx2fc_rport **tgt_ofld_list; + + /* statistics */ struct fcoe_statistics_params *stats_buffer; dma_addr_t stats_buf_dma; - - /* - * PCI related info. - */ - u16 pci_did; - u16 pci_vid; - u16 pci_sdid; - u16 pci_svid; - u16 pci_func; - u16 pci_devno; - - struct task_struct *l2_thread; - - /* linkdown handling */ - wait_queue_head_t shutdown_wait; - int wait_for_link_down; + struct completion stat_req_done; /*destroy handling */ struct timer_list destroy_timer; wait_queue_head_t destroy_wait; - /* Active list of offloaded sessions */ - struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS]; + /* linkdown handling */ + wait_queue_head_t shutdown_wait; + int wait_for_link_down; int num_ofld_sess; + struct list_head vports; +}; - /* statistics */ - struct completion stat_req_done; +struct bnx2fc_interface { + struct list_head list; + unsigned long if_flags; + #define BNX2FC_CTLR_INIT_DONE 0 + struct bnx2fc_hba *hba; + struct net_device *netdev; + struct packet_type fcoe_packet_type; + struct packet_type fip_packet_type; + struct workqueue_struct *timer_work_queue; + struct kref kref; + struct fcoe_ctlr ctlr; + u8 vlan_enabled; + int vlan_id; }; -#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr) +#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr) + +struct bnx2fc_lport { + struct list_head list; + struct fc_lport *lport; +}; struct bnx2fc_cmd_mgr { struct bnx2fc_hba *hba; @@ -247,9 +248,11 @@ struct bnx2fc_rport { struct fc_rport_priv *rdata; void __iomem *ctx_base; #define DPM_TRIGER_TYPE 0x40 + u32 io_timeout; u32 fcoe_conn_id; u32 context_id; u32 sid; + int dev_type; unsigned long flags; #define BNX2FC_FLAG_SESSION_READY 0x1 @@ -257,14 +260,18 @@ struct bnx2fc_rport { #define BNX2FC_FLAG_DISABLED 0x3 #define BNX2FC_FLAG_DESTROYED 0x4 #define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5 -#define BNX2FC_FLAG_DESTROY_CMPL 0x6 -#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x7 -#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8 -#define BNX2FC_FLAG_EXPL_LOGO 0x9 +#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x6 +#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7 +#define BNX2FC_FLAG_EXPL_LOGO 0x8 + u8 src_addr[ETH_ALEN]; u32 max_sqes; u32 max_rqes; u32 max_cqes; + atomic_t free_sqes; + + struct b577xx_doorbell_set_prod sq_db; + struct b577xx_fcoe_rx_doorbell rx_db; struct fcoe_sqe *sq; dma_addr_t sq_dma; @@ -274,7 +281,7 @@ struct bnx2fc_rport { struct fcoe_cqe *cq; dma_addr_t cq_dma; - u32 cq_cons_idx; + u16 cq_cons_idx; u8 cq_curr_toggle_bit; u32 cq_mem_size; @@ -317,12 +324,9 @@ struct bnx2fc_rport { spinlock_t cq_lock; atomic_t num_active_ios; u32 flush_in_prog; - unsigned long work_time_slice; unsigned long timestamp; struct list_head free_task_list; struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1]; - atomic_t pi; - atomic_t ci; struct list_head active_cmd_queue; struct list_head els_queue; struct list_head io_retire_queue; @@ -357,6 +361,8 @@ struct bnx2fc_els_cb_arg { struct bnx2fc_cmd *aborted_io_req; struct bnx2fc_cmd *io_req; u16 l2_oxid; + u32 offset; + enum fc_rctl r_ctl; }; /* bnx2fc command structure */ @@ -370,6 +376,7 @@ struct bnx2fc_cmd { #define BNX2FC_ABTS 3 #define BNX2FC_ELS 4 #define BNX2FC_CLEANUP 5 +#define BNX2FC_SEQ_CLEANUP 6 u8 io_req_flags; struct kref refcount; struct fcoe_port *port; @@ -383,6 +390,7 @@ struct bnx2fc_cmd { struct completion tm_done; int wait_for_comp; u16 xid; + struct fcoe_err_report_entry err_entry; struct fcoe_task_ctx_entry *task; struct io_bdt *bd_tbl; struct fcp_rsp *rsp; @@ -399,6 +407,12 @@ struct bnx2fc_cmd { #define BNX2FC_FLAG_IO_COMPL 0x9 #define BNX2FC_FLAG_ELS_DONE 0xa #define BNX2FC_FLAG_ELS_TIMEOUT 0xb +#define BNX2FC_FLAG_CMD_LOST 0xc +#define BNX2FC_FLAG_SRR_SENT 0xd + u8 rec_retry; + u8 srr_retry; + u32 srr_offset; + u8 srr_rctl; u32 fcp_resid; u32 fcp_rsp_len; u32 fcp_sns_len; @@ -423,11 +437,13 @@ struct bnx2fc_work { struct bnx2fc_unsol_els { struct fc_lport *lport; struct fc_frame *fp; + struct bnx2fc_hba *hba; struct work_struct unsol_els_work; }; +struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt); struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type); void bnx2fc_cmd_release(struct kref *ref); int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd); @@ -465,6 +481,10 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req); void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u16 orig_xid); +void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnup_req, + struct fcoe_task_ctx_entry *task, + struct bnx2fc_cmd *orig_io_req, + u32 offset); void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task); void bnx2fc_init_task(struct bnx2fc_cmd *io_req, @@ -505,6 +525,7 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did, struct fc_frame *, void *), void *arg, u32 timeout); +void bnx2fc_arm_cq(struct bnx2fc_rport *tgt); int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt); void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe); struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, @@ -513,5 +534,13 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, unsigned char *buf, u32 frame_len, u16 l2_oxid); int bnx2fc_send_stat_req(struct bnx2fc_hba *hba); +int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_cmd *io_req); +int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req); +int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl); +void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req, + struct fcoe_task_ctx_entry *task, + u8 rx_state); +int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, + enum fc_rctl r_ctl); #endif diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h index fe7769173c4..399cda047a7 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_constants.h +++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h @@ -5,6 +5,12 @@ * This file defines HSI constants for the FCoE flows */ +/* Current FCoE HSI version number composed of two fields (16 bit) */ +/* Implies on a change broken previous HSI */ +#define FCOE_HSI_MAJOR_VERSION (1) +/* Implies on a change which does not broken previous HSI */ +#define FCOE_HSI_MINOR_VERSION (1) + /* KWQ/KCQ FCoE layer code */ #define FCOE_KWQE_LAYER_CODE (7) @@ -40,21 +46,62 @@ #define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3) #define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4) #define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5) +#define FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION (0x6) + +/* CQE type */ +#define FCOE_PENDING_CQE_TYPE 0 +#define FCOE_UNSOLIC_CQE_TYPE 1 /* Unsolicited CQE type */ #define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0 #define FCOE_ERROR_DETECTION_CQE_TYPE 1 #define FCOE_WARNING_DETECTION_CQE_TYPE 2 +/* E_D_TOV timer resolution in ms */ +#define FCOE_E_D_TOV_TIMER_RESOLUTION_MS (20) + +/* E_D_TOV timer resolution for SDM (4 micro) */ +#define FCOE_E_D_TOV_SDM_TIMER_RESOLUTION \ + (FCOE_E_D_TOV_TIMER_RESOLUTION_MS * 1000 / 4) + +/* REC timer resolution in ms */ +#define FCOE_REC_TIMER_RESOLUTION_MS (20) + +/* REC timer resolution for SDM (4 micro) */ +#define FCOE_REC_SDM_TIMER_RESOLUTION (FCOE_REC_TIMER_RESOLUTION_MS * 1000 / 4) + +/* E_D_TOV timer default wraparound value (2 sec) in 20 ms resolution */ +#define FCOE_E_D_TOV_DEFAULT_WRAPAROUND_VAL \ + (2000 / FCOE_E_D_TOV_TIMER_RESOLUTION_MS) + +/* REC_TOV timer default wraparound value (3 sec) in 20 ms resolution */ +#define FCOE_REC_TOV_DEFAULT_WRAPAROUND_VAL \ + (3000 / FCOE_REC_TIMER_RESOLUTION_MS) + +#define FCOE_NUM_OF_TIMER_TASKS (8 * 1024) + +#define FCOE_NUM_OF_CACHED_TASKS_TIMER (8) + /* Task context constants */ +/******** Remove FCP_CMD write tce sleep ***********************/ +/* In case timer services are required then shall be updated by Xstorm after + * start processing the task. In case no timer facilities are required then the + * driver would initialize the state to this value + * +#define FCOE_TASK_TX_STATE_NORMAL 0 + * After driver has initialize the task in case timer services required * +#define FCOE_TASK_TX_STATE_INIT 1 +******** Remove FCP_CMD write tce sleep ***********************/ /* After driver has initialize the task in case timer services required */ #define FCOE_TASK_TX_STATE_INIT 0 /* In case timer services are required then shall be updated by Xstorm after * start processing the task. In case no timer facilities are required then the - * driver would initialize the state to this value */ + * driver would initialize the state to this value + */ #define FCOE_TASK_TX_STATE_NORMAL 1 /* Task is under abort procedure. Updated in order to stop processing of - * pending WQEs on this task */ + * pending WQEs on this task + */ #define FCOE_TASK_TX_STATE_ABORT 2 /* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */ #define FCOE_TASK_TX_STATE_ERROR 3 @@ -66,17 +113,8 @@ #define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6 /* For sequence cleanup request task */ #define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7 -/* Mark task as aborted and indicate that ABTS was not transmitted */ -#define FCOE_TASK_TX_STATE_BEFORE_ABTS_TX 8 -/* Mark task as aborted and indicate that ABTS was transmitted */ -#define FCOE_TASK_TX_STATE_AFTER_ABTS_TX 9 /* For completion the ABTS task. */ -#define FCOE_TASK_TX_STATE_ABTS_TX_COMPLETED 10 -/* Mark task as aborted and indicate that Exchange cleanup was not transmitted - */ -#define FCOE_TASK_TX_STATE_BEFORE_EXCHANGE_CLEANUP_TX 11 -/* Mark task as aborted and indicate that Exchange cleanup was transmitted */ -#define FCOE_TASK_TX_STATE_AFTER_EXCHANGE_CLEANUP_TX 12 +#define FCOE_TASK_TX_STATE_ABTS_TX 8 #define FCOE_TASK_RX_STATE_NORMAL 0 #define FCOE_TASK_RX_STATE_COMPLETED 1 @@ -86,25 +124,25 @@ #define FCOE_TASK_RX_STATE_WARNING 3 /* For E_D_T_TOV timer expiration in Ustorm */ #define FCOE_TASK_RX_STATE_ERROR 4 -/* ABTS ACC arrived wait for local completion to finally complete the task. */ -#define FCOE_TASK_RX_STATE_ABTS_ACC_ARRIVED 5 -/* local completion arrived wait for ABTS ACC to finally complete the task. */ -#define FCOE_TASK_RX_STATE_ABTS_LOCAL_COMP_ARRIVED 6 +/* FW only: First visit at rx-path, part of the abts round trip */ +#define FCOE_TASK_RX_STATE_ABTS_IN_PROCESS 5 +/* FW only: Second visit at rx-path, after ABTS frame transmitted */ +#define FCOE_TASK_RX_STATE_ABTS_TRANSMITTED 6 /* Special completion indication in case of task was aborted. */ #define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7 -/* Special completion indication in case of task was cleaned. */ -#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 8 -/* Special completion indication (in task requested the exchange cleanup) in - * case cleaned task is in non-valid. */ -#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 9 +/* FW only: First visit at rx-path, part of the cleanup round trip */ +#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_IN_PROCESS 8 +/* FW only: Special completion indication in case of task was cleaned. */ +#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 9 +/* Not in used: Special completion indication (in task requested the exchange + * cleanup) in case cleaned task is in non-valid. + */ +#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 10 /* Special completion indication (in task requested the sequence cleanup) in - * case cleaned task was already returned to normal. */ -#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 10 -/* Exchange cleanup arrived wait until xfer will be handled to finally - * complete the task. */ -#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_ARRIVED 11 -/* Xfer handled, wait for exchange cleanup to finally complete the task. */ -#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_HANDLED_XFER 12 + * case cleaned task was already returned to normal. + */ +#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 11 + #define FCOE_TASK_TYPE_WRITE 0 #define FCOE_TASK_TYPE_READ 1 @@ -120,11 +158,40 @@ #define FCOE_TASK_CLASS_TYPE_3 0 #define FCOE_TASK_CLASS_TYPE_2 1 +/* FCoE/FC packet fields */ +#define FCOE_ETH_TYPE 0x8906 + +/* FCoE maximum elements in hash table */ +#define FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW 8 + +/* FCoE half of the elements in hash table */ +#define FCOE_HALF_ELEMENTS_IN_HASH_TABLE_ROW \ + (FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW / 2) + +/* FcoE number of cached T2 entries */ +#define T_FCOE_NUMBER_OF_CACHED_T2_ENTRIES (4) + +/* FCoE maximum elements in hash table */ +#define FCOE_HASH_TBL_CHUNK_SIZE 16384 + /* Everest FCoE connection type */ #define B577XX_FCOE_CONNECTION_TYPE 4 -/* Error codes for Error Reporting in fast path flows */ -/* XFER error codes */ +/* FCoE number of rows (in log). This number derives + * from the maximum connections supported which is 2048. + * TBA: Need a different constant for E2 + */ +#define FCOE_MAX_NUM_SESSIONS_LOG 11 + +#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 + +/* Error codes for Error Reporting in slow path flows */ +#define FCOE_SLOW_PATH_ERROR_CODE_TOO_MANY_FUNCS 0 +#define FCOE_SLOW_PATH_ERROR_CODE_NO_LICENSE 1 + +/* Error codes for Error Reporting in fast path flows + * XFER error codes + */ #define FCOE_ERROR_CODE_XFER_OOO_RO 0 #define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1 #define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2 @@ -155,17 +222,17 @@ #define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23 #define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24 #define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25 -#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26 -#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27 +#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26 +#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27 #define FCOE_ERROR_CODE_DATA_FCTL 28 /* Middle path error codes */ -#define FCOE_ERROR_CODE_MIDPATH_TYPE_NOT_ELS 29 +#define FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE 29 #define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30 #define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31 #define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32 #define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33 -#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_FCTL 34 +#define FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL 34 #define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35 #define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36 @@ -173,7 +240,7 @@ #define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37 #define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38 #define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39 -#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40 +#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40 #define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41 /* Common error codes */ @@ -185,7 +252,7 @@ #define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47 #define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48 #define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49 -#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50 +#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50 /* Unsolicited Rx error codes */ #define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51 diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h index 7f6aff68cc5..3416d9a746c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_debug.h +++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h @@ -21,21 +21,21 @@ extern unsigned int bnx2fc_debug_level; #define BNX2FC_ELS_DBG(fmt, arg...) \ BNX2FC_CHK_LOGGING(LOG_ELS, \ - printk(KERN_ALERT PFX fmt, ##arg)) + printk(KERN_INFO PFX fmt, ##arg)) #define BNX2FC_MISC_DBG(fmt, arg...) \ BNX2FC_CHK_LOGGING(LOG_MISC, \ - printk(KERN_ALERT PFX fmt, ##arg)) + printk(KERN_INFO PFX fmt, ##arg)) #define BNX2FC_IO_DBG(io_req, fmt, arg...) \ do { \ if (!io_req || !io_req->port || !io_req->port->lport || \ !io_req->port->lport->host) \ BNX2FC_CHK_LOGGING(LOG_IO, \ - printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ + printk(KERN_INFO PFX "NULL " fmt, ##arg)); \ else \ BNX2FC_CHK_LOGGING(LOG_IO, \ - shost_printk(KERN_ALERT, \ + shost_printk(KERN_INFO, \ (io_req)->port->lport->host, \ PFX "xid:0x%x " fmt, \ (io_req)->xid, ##arg)); \ @@ -46,10 +46,10 @@ extern unsigned int bnx2fc_debug_level; if (!tgt || !tgt->port || !tgt->port->lport || \ !tgt->port->lport->host || !tgt->rport) \ BNX2FC_CHK_LOGGING(LOG_TGT, \ - printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ + printk(KERN_INFO PFX "NULL " fmt, ##arg)); \ else \ BNX2FC_CHK_LOGGING(LOG_TGT, \ - shost_printk(KERN_ALERT, \ + shost_printk(KERN_INFO, \ (tgt)->port->lport->host, \ PFX "port:%x " fmt, \ (tgt)->rport->port_id, ##arg)); \ @@ -60,10 +60,10 @@ extern unsigned int bnx2fc_debug_level; do { \ if (!lport || !lport->host) \ BNX2FC_CHK_LOGGING(LOG_HBA, \ - printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ + printk(KERN_INFO PFX "NULL " fmt, ##arg)); \ else \ BNX2FC_CHK_LOGGING(LOG_HBA, \ - shost_printk(KERN_ALERT, lport->host, \ + shost_printk(KERN_INFO, lport->host, \ PFX fmt, ##arg)); \ } while (0) diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c index 52c358427ce..d66dcbd0df1 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_els.c +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c @@ -3,7 +3,7 @@ * This file contains helper routines that handle ELS requests * and responses. * - * Copyright (c) 2008 - 2010 Broadcom Corporation + * Copyright (c) 2008 - 2011 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -83,7 +83,7 @@ int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req) rrq.rrq_cmd = ELS_RRQ; hton24(rrq.rrq_s_id, sid); rrq.rrq_ox_id = htons(aborted_io_req->xid); - rrq.rrq_rx_id = htons(aborted_io_req->task->rx_wr_tx_rd.rx_id); + rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id); retry_rrq: rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq), @@ -253,13 +253,417 @@ int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp) return rc; } +void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg) +{ + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr, *fh; + struct bnx2fc_cmd *srr_req; + struct bnx2fc_cmd *orig_io_req; + struct fc_frame *fp; + unsigned char *buf; + void *resp_buf; + u32 resp_len, hdr_len; + u8 opcode; + int rc = 0; + + orig_io_req = cb_arg->aborted_io_req; + srr_req = cb_arg->io_req; + if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) { + BNX2FC_IO_DBG(srr_req, "srr_compl: xid - 0x%x completed", + orig_io_req->xid); + goto srr_compl_done; + } + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { + BNX2FC_IO_DBG(srr_req, "rec abts in prog " + "orig_io - 0x%x\n", + orig_io_req->xid); + goto srr_compl_done; + } + if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) { + /* SRR timedout */ + BNX2FC_IO_DBG(srr_req, "srr timed out, abort " + "orig_io - 0x%x\n", + orig_io_req->xid); + rc = bnx2fc_initiate_abts(srr_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " + "failed. issue cleanup\n"); + bnx2fc_initiate_cleanup(srr_req); + } + orig_io_req->srr_retry++; + if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) { + struct bnx2fc_rport *tgt = orig_io_req->tgt; + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_send_srr(orig_io_req, + orig_io_req->srr_offset, + orig_io_req->srr_rctl); + spin_lock_bh(&tgt->tgt_lock); + if (!rc) + goto srr_compl_done; + } + + rc = bnx2fc_initiate_abts(orig_io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " + "failed xid = 0x%x. issue cleanup\n", + orig_io_req->xid); + bnx2fc_initiate_cleanup(orig_io_req); + } + goto srr_compl_done; + } + mp_req = &(srr_req->mp_req); + fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + resp_buf = mp_req->resp_buf; + + hdr_len = sizeof(*fc_hdr); + buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); + if (!buf) { + printk(KERN_ERR PFX "srr buf: mem alloc failure\n"); + goto srr_compl_done; + } + memcpy(buf, fc_hdr, hdr_len); + memcpy(buf + hdr_len, resp_buf, resp_len); + + fp = fc_frame_alloc(NULL, resp_len); + if (!fp) { + printk(KERN_ERR PFX "fc_frame_alloc failure\n"); + goto free_buf; + } + + fh = (struct fc_frame_header *) fc_frame_header_get(fp); + /* Copy FC Frame header and payload into the frame */ + memcpy(fh, buf, hdr_len + resp_len); + + opcode = fc_frame_payload_op(fp); + switch (opcode) { + case ELS_LS_ACC: + BNX2FC_IO_DBG(srr_req, "SRR success\n"); + break; + case ELS_LS_RJT: + BNX2FC_IO_DBG(srr_req, "SRR rejected\n"); + rc = bnx2fc_initiate_abts(orig_io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " + "failed xid = 0x%x. issue cleanup\n", + orig_io_req->xid); + bnx2fc_initiate_cleanup(orig_io_req); + } + break; + default: + BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n", + opcode); + break; + } + fc_frame_free(fp); +free_buf: + kfree(buf); +srr_compl_done: + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); +} + +void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg) +{ + struct bnx2fc_cmd *orig_io_req, *new_io_req; + struct bnx2fc_cmd *rec_req; + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr, *fh; + struct fc_els_ls_rjt *rjt; + struct fc_els_rec_acc *acc; + struct bnx2fc_rport *tgt; + struct fcoe_err_report_entry *err_entry; + struct scsi_cmnd *sc_cmd; + enum fc_rctl r_ctl; + unsigned char *buf; + void *resp_buf; + struct fc_frame *fp; + u8 opcode; + u32 offset; + u32 e_stat; + u32 resp_len, hdr_len; + int rc = 0; + bool send_seq_clnp = false; + bool abort_io = false; + + BNX2FC_MISC_DBG("Entered rec_compl callback\n"); + rec_req = cb_arg->io_req; + orig_io_req = cb_arg->aborted_io_req; + BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid); + tgt = orig_io_req->tgt; + + if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) { + BNX2FC_IO_DBG(rec_req, "completed" + "orig_io - 0x%x\n", + orig_io_req->xid); + goto rec_compl_done; + } + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { + BNX2FC_IO_DBG(rec_req, "abts in prog " + "orig_io - 0x%x\n", + orig_io_req->xid); + goto rec_compl_done; + } + /* Handle REC timeout case */ + if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) { + BNX2FC_IO_DBG(rec_req, "timed out, abort " + "orig_io - 0x%x\n", + orig_io_req->xid); + /* els req is timed out. send abts for els */ + rc = bnx2fc_initiate_abts(rec_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " + "failed. issue cleanup\n"); + bnx2fc_initiate_cleanup(rec_req); + } + orig_io_req->rec_retry++; + /* REC timedout. send ABTS to the orig IO req */ + if (orig_io_req->rec_retry <= REC_RETRY_COUNT) { + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_send_rec(orig_io_req); + spin_lock_bh(&tgt->tgt_lock); + if (!rc) + goto rec_compl_done; + } + rc = bnx2fc_initiate_abts(orig_io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " + "failed xid = 0x%x. issue cleanup\n", + orig_io_req->xid); + bnx2fc_initiate_cleanup(orig_io_req); + } + goto rec_compl_done; + } + mp_req = &(rec_req->mp_req); + fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + acc = resp_buf = mp_req->resp_buf; + + hdr_len = sizeof(*fc_hdr); + + buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); + if (!buf) { + printk(KERN_ERR PFX "rec buf: mem alloc failure\n"); + goto rec_compl_done; + } + memcpy(buf, fc_hdr, hdr_len); + memcpy(buf + hdr_len, resp_buf, resp_len); + + fp = fc_frame_alloc(NULL, resp_len); + if (!fp) { + printk(KERN_ERR PFX "fc_frame_alloc failure\n"); + goto free_buf; + } + + fh = (struct fc_frame_header *) fc_frame_header_get(fp); + /* Copy FC Frame header and payload into the frame */ + memcpy(fh, buf, hdr_len + resp_len); + + opcode = fc_frame_payload_op(fp); + if (opcode == ELS_LS_RJT) { + BNX2FC_IO_DBG(rec_req, "opcode is RJT\n"); + rjt = fc_frame_payload_get(fp, sizeof(*rjt)); + if ((rjt->er_reason == ELS_RJT_LOGIC || + rjt->er_reason == ELS_RJT_UNAB) && + rjt->er_explan == ELS_EXPL_OXID_RXID) { + BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n"); + new_io_req = bnx2fc_cmd_alloc(tgt); + if (!new_io_req) + goto abort_io; + new_io_req->sc_cmd = orig_io_req->sc_cmd; + /* cleanup orig_io_req that is with the FW */ + set_bit(BNX2FC_FLAG_CMD_LOST, + &orig_io_req->req_flags); + bnx2fc_initiate_cleanup(orig_io_req); + /* Post a new IO req with the same sc_cmd */ + BNX2FC_IO_DBG(rec_req, "Post IO request again\n"); + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_post_io_req(tgt, new_io_req); + spin_lock_bh(&tgt->tgt_lock); + if (!rc) + goto free_frame; + BNX2FC_IO_DBG(rec_req, "REC: io post err\n"); + } +abort_io: + rc = bnx2fc_initiate_abts(orig_io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " + "failed. issue cleanup\n"); + bnx2fc_initiate_cleanup(orig_io_req); + } + } else if (opcode == ELS_LS_ACC) { + /* REVISIT: Check if the exchange is already aborted */ + offset = ntohl(acc->reca_fc4value); + e_stat = ntohl(acc->reca_e_stat); + if (e_stat & ESB_ST_SEQ_INIT) { + BNX2FC_IO_DBG(rec_req, "target has the seq init\n"); + goto free_frame; + } + BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n", + e_stat, offset); + /* Seq initiative is with us */ + err_entry = (struct fcoe_err_report_entry *) + &orig_io_req->err_entry; + sc_cmd = orig_io_req->sc_cmd; + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + /* SCSI WRITE command */ + if (offset == orig_io_req->data_xfer_len) { + BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n"); + /* FCP_RSP lost */ + r_ctl = FC_RCTL_DD_CMD_STATUS; + offset = 0; + } else { + /* start transmitting from offset */ + BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n"); + send_seq_clnp = true; + r_ctl = FC_RCTL_DD_DATA_DESC; + if (bnx2fc_initiate_seq_cleanup(orig_io_req, + offset, r_ctl)) + abort_io = true; + /* XFER_RDY */ + } + } else { + /* SCSI READ command */ + if (err_entry->data.rx_buf_off == + orig_io_req->data_xfer_len) { + /* FCP_RSP lost */ + BNX2FC_IO_DBG(rec_req, "READ - resp lost\n"); + r_ctl = FC_RCTL_DD_CMD_STATUS; + offset = 0; + } else { + /* request retransmission from this offset */ + send_seq_clnp = true; + offset = err_entry->data.rx_buf_off; + BNX2FC_IO_DBG(rec_req, "RD DATA lost\n"); + /* FCP_DATA lost */ + r_ctl = FC_RCTL_DD_SOL_DATA; + if (bnx2fc_initiate_seq_cleanup(orig_io_req, + offset, r_ctl)) + abort_io = true; + } + } + if (abort_io) { + rc = bnx2fc_initiate_abts(orig_io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts" + " failed. issue cleanup\n"); + bnx2fc_initiate_cleanup(orig_io_req); + } + } else if (!send_seq_clnp) { + BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n"); + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); + spin_lock_bh(&tgt->tgt_lock); + + if (rc) { + BNX2FC_IO_DBG(rec_req, "Unable to send SRR" + " IO will abort\n"); + } + } + } +free_frame: + fc_frame_free(fp); +free_buf: + kfree(buf); +rec_compl_done: + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); + kfree(cb_arg); +} + +int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req) +{ + struct fc_els_rec rec; + struct bnx2fc_rport *tgt = orig_io_req->tgt; + struct fc_lport *lport = tgt->rdata->local_port; + struct bnx2fc_els_cb_arg *cb_arg = NULL; + u32 sid = tgt->sid; + u32 r_a_tov = lport->r_a_tov; + int rc; + + BNX2FC_IO_DBG(orig_io_req, "Sending REC\n"); + memset(&rec, 0, sizeof(rec)); + + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n"); + rc = -ENOMEM; + goto rec_err; + } + kref_get(&orig_io_req->refcount); + + cb_arg->aborted_io_req = orig_io_req; + + rec.rec_cmd = ELS_REC; + hton24(rec.rec_s_id, sid); + rec.rec_ox_id = htons(orig_io_req->xid); + rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id); + + rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec), + bnx2fc_rec_compl, cb_arg, + r_a_tov); +rec_err: + if (rc) { + BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n"); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + kfree(cb_arg); + } + return rc; +} + +int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl) +{ + struct fcp_srr srr; + struct bnx2fc_rport *tgt = orig_io_req->tgt; + struct fc_lport *lport = tgt->rdata->local_port; + struct bnx2fc_els_cb_arg *cb_arg = NULL; + u32 r_a_tov = lport->r_a_tov; + int rc; + + BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n"); + memset(&srr, 0, sizeof(srr)); + + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n"); + rc = -ENOMEM; + goto srr_err; + } + kref_get(&orig_io_req->refcount); + + cb_arg->aborted_io_req = orig_io_req; + + srr.srr_op = ELS_SRR; + srr.srr_ox_id = htons(orig_io_req->xid); + srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id); + srr.srr_rel_off = htonl(offset); + srr.srr_r_ctl = r_ctl; + orig_io_req->srr_offset = offset; + orig_io_req->srr_rctl = r_ctl; + + rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr), + bnx2fc_srr_compl, cb_arg, + r_a_tov); +srr_err: + if (rc) { + BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n"); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + kfree(cb_arg); + } else + set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags); + + return rc; +} + static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, void *data, u32 data_len, void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec) { struct fcoe_port *port = tgt->port; - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; struct fc_rport *rport = tgt->rport; struct fc_lport *lport = port->lport; struct bnx2fc_cmd *els_req; @@ -274,12 +678,12 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, rc = fc_remote_port_chkready(rport); if (rc) { - printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op); + printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op); rc = -EINVAL; goto els_err; } if (lport->state != LPORT_ST_READY || !(lport->link_up)) { - printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op); + printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op); rc = -EINVAL; goto els_err; } @@ -305,7 +709,7 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req); rc = bnx2fc_init_mp_req(els_req); if (rc == FAILED) { - printk(KERN_ALERT PFX "ELS MP request init failed\n"); + printk(KERN_ERR PFX "ELS MP request init failed\n"); spin_lock_bh(&tgt->tgt_lock); kref_put(&els_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); @@ -324,7 +728,7 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { memcpy(mp_req->req_buf, data, data_len); } else { - printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op); + printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op); els_req->cb_func = NULL; els_req->cb_arg = NULL; spin_lock_bh(&tgt->tgt_lock); @@ -342,9 +746,14 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, did = tgt->rport->port_id; sid = tgt->sid; - __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, - FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | - FC_FC_SEQ_INIT, 0); + if (op == ELS_SRR) + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid, + FC_TYPE_FCP, FC_FC_FIRST_SEQ | + FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + else + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, + FC_TYPE_ELS, FC_FC_FIRST_SEQ | + FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); /* Obtain exchange id */ xid = els_req->xid; @@ -352,7 +761,8 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, index = xid % BNX2FC_TASKS_PER_PAGE; /* Initialize task context for this IO request */ - task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; task = &(task_page[index]); bnx2fc_init_mp_task(els_req, task); @@ -417,12 +827,13 @@ void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req, hdr = (u64 *)fc_hdr; temp_hdr = (u64 *) - &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; + &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; hdr[0] = cpu_to_be64(temp_hdr[0]); hdr[1] = cpu_to_be64(temp_hdr[1]); hdr[2] = cpu_to_be64(temp_hdr[2]); - mp_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off; + mp_req->resp_len = + task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; /* Parse ELS response */ if ((els_req->cb_func) && (els_req->cb_arg)) { @@ -495,8 +906,8 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did, void *arg, u32 timeout) { struct fcoe_port *port = lport_priv(lport); - struct bnx2fc_hba *hba = port->priv; - struct fcoe_ctlr *fip = &hba->ctlr; + struct bnx2fc_interface *interface = port->priv; + struct fcoe_ctlr *fip = &interface->ctlr; struct fc_frame_header *fh = fc_frame_header_get(fp); switch (op) { diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index ab255fbc7f3..7cb2cd48b17 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -3,7 +3,7 @@ * cnic modules to create FCoE instances, send/receive non-offloaded * FIP/FCoE packets, listen to link events etc. * - * Copyright (c) 2008 - 2010 Broadcom Corporation + * Copyright (c) 2008 - 2011 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -15,13 +15,14 @@ #include "bnx2fc.h" static struct list_head adapter_list; +static struct list_head if_list; static u32 adapter_count; static DEFINE_MUTEX(bnx2fc_dev_lock); DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); #define DRV_MODULE_NAME "bnx2fc" #define DRV_MODULE_VERSION BNX2FC_VERSION -#define DRV_MODULE_RELDATE "Mar 17, 2011" +#define DRV_MODULE_RELDATE "Jun 23, 2011" static char version[] __devinitdata = @@ -61,7 +62,7 @@ static int bnx2fc_disable(struct net_device *netdev); static void bnx2fc_recv_frame(struct sk_buff *skb); -static void bnx2fc_start_disc(struct bnx2fc_hba *hba); +static void bnx2fc_start_disc(struct bnx2fc_interface *interface); static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); static int bnx2fc_net_config(struct fc_lport *lp); static int bnx2fc_lport_config(struct fc_lport *lport); @@ -70,18 +71,20 @@ static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba); static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba); static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); -static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, +static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, struct device *parent, int npiv); static void bnx2fc_destroy_work(struct work_struct *work); static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); +static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device + *phys_dev); static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic); static int bnx2fc_fw_init(struct bnx2fc_hba *hba); static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba); static void bnx2fc_port_shutdown(struct fc_lport *lport); -static void bnx2fc_stop(struct bnx2fc_hba *hba); +static void bnx2fc_stop(struct bnx2fc_interface *interface); static int __init bnx2fc_mod_init(void); static void __exit bnx2fc_mod_exit(void); @@ -142,7 +145,8 @@ static void bnx2fc_abort_io(struct fc_lport *lport) static void bnx2fc_cleanup(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; struct bnx2fc_rport *tgt; int i; @@ -219,7 +223,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) struct fcoe_crc_eof *cp; struct sk_buff *skb; struct fc_frame_header *fh; - struct bnx2fc_hba *hba; + struct bnx2fc_interface *interface; + struct bnx2fc_hba *hba; struct fcoe_port *port; struct fcoe_hdr *hp; struct bnx2fc_rport *tgt; @@ -230,7 +235,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) int wlen, rc = 0; port = (struct fcoe_port *)lport_priv(lport); - hba = port->priv; + interface = port->priv; + hba = interface->hba; fh = fc_frame_header_get(fp); @@ -242,12 +248,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) } if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { - if (!hba->ctlr.sel_fcf) { + if (!interface->ctlr.sel_fcf) { BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n"); kfree_skb(skb); return -EINVAL; } - if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb)) + if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb)) return 0; } @@ -316,19 +322,19 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) skb_reset_network_header(skb); skb->mac_len = elen; skb->protocol = htons(ETH_P_FCOE); - skb->dev = hba->netdev; + skb->dev = interface->netdev; /* fill up mac and fcoe headers */ eh = eth_hdr(skb); eh->h_proto = htons(ETH_P_FCOE); - if (hba->ctlr.map_dest) + if (interface->ctlr.map_dest) fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); else /* insert GW address */ - memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN); + memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN); - if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN)) - memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN); + if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN)) + memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN); else memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); @@ -377,22 +383,23 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *olddev) { struct fc_lport *lport; - struct bnx2fc_hba *hba; + struct bnx2fc_interface *interface; struct fc_frame_header *fh; struct fcoe_rcv_info *fr; struct fcoe_percpu_s *bg; unsigned short oxid; - hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type); - lport = hba->ctlr.lp; + interface = container_of(ptype, struct bnx2fc_interface, + fcoe_packet_type); + lport = interface->ctlr.lp; if (unlikely(lport == NULL)) { - printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n"); + printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n"); goto err; } if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { - printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n"); + printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n"); goto err; } @@ -411,7 +418,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, fr = fcoe_dev_from_skb(skb); fr->fr_dev = lport; - fr->ptype = ptype; bg = &bnx2fc_global; spin_lock_bh(&bg->fcoe_rx_list.lock); @@ -469,7 +475,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) fr = fcoe_dev_from_skb(skb); lport = fr->fr_dev; if (unlikely(lport == NULL)) { - printk(KERN_ALERT PFX "Invalid lport struct\n"); + printk(KERN_ERR PFX "Invalid lport struct\n"); kfree_skb(skb); return; } @@ -594,7 +600,8 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) struct fc_host_statistics *bnx2fc_stats; struct fc_lport *lport = shost_priv(shost); struct fcoe_port *port = lport_priv(lport); - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; struct fcoe_statistics_params *fw_stats; int rc = 0; @@ -612,7 +619,7 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); return bnx2fc_stats; } - bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat1.fc_crc_cnt; + bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat2.fc_crc_cnt; bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt; bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4; bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt; @@ -631,7 +638,7 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) { struct fcoe_port *port = lport_priv(lport); - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; struct Scsi_Host *shost = lport->host; int rc = 0; @@ -654,7 +661,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) fc_host_max_npiv_vports(lport->host) = USHRT_MAX; sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s", BNX2FC_NAME, BNX2FC_VERSION, - hba->netdev->name); + interface->netdev->name); return 0; } @@ -662,8 +669,8 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) static void bnx2fc_link_speed_update(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); - struct bnx2fc_hba *hba = port->priv; - struct net_device *netdev = hba->netdev; + struct bnx2fc_interface *interface = port->priv; + struct net_device *netdev = interface->netdev; struct ethtool_cmd ecmd; if (!dev_ethtool_get_settings(netdev, &ecmd)) { @@ -679,6 +686,9 @@ static void bnx2fc_link_speed_update(struct fc_lport *lport) case SPEED_1000: lport->link_speed = FC_PORTSPEED_1GBIT; break; + case SPEED_2500: + lport->link_speed = FC_PORTSPEED_2GBIT; + break; case SPEED_10000: lport->link_speed = FC_PORTSPEED_10GBIT; break; @@ -688,7 +698,8 @@ static void bnx2fc_link_speed_update(struct fc_lport *lport) static int bnx2fc_link_ok(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; struct net_device *dev = hba->phys_dev; int rc = 0; @@ -710,7 +721,7 @@ static int bnx2fc_link_ok(struct fc_lport *lport) */ void bnx2fc_get_link_state(struct bnx2fc_hba *hba) { - if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state)) + if (test_bit(__LINK_STATE_NOCARRIER, &hba->phys_dev->state)) set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); else clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); @@ -719,11 +730,13 @@ void bnx2fc_get_link_state(struct bnx2fc_hba *hba) static int bnx2fc_net_config(struct fc_lport *lport) { struct bnx2fc_hba *hba; + struct bnx2fc_interface *interface; struct fcoe_port *port; u64 wwnn, wwpn; port = lport_priv(lport); - hba = port->priv; + interface = port->priv; + hba = interface->hba; /* require support for get_pauseparam ethtool op. */ if (!hba->phys_dev->ethtool_ops || @@ -740,11 +753,11 @@ static int bnx2fc_net_config(struct fc_lport *lport) bnx2fc_link_speed_update(lport); if (!lport->vport) { - wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0); + wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 1, 0); BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); fc_set_wwnn(lport, wwnn); - wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0); + wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 2, 0); BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); fc_set_wwpn(lport, wwpn); } @@ -756,9 +769,9 @@ static void bnx2fc_destroy_timer(unsigned long data) { struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data; - BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - " + BNX2FC_MISC_DBG("ERROR:bnx2fc_destroy_timer - " "Destroy compl not received!!\n"); - hba->flags |= BNX2FC_FLAG_DESTROY_CMPL; + set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); wake_up_interruptible(&hba->destroy_wait); } @@ -767,57 +780,44 @@ static void bnx2fc_destroy_timer(unsigned long data) * * @context: adapter structure pointer * @event: event type + * @vlan_id: vlan id - associated vlan id with this event * * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and * NETDEV_CHANGE_MTU events */ -static void bnx2fc_indicate_netevent(void *context, unsigned long event) +static void bnx2fc_indicate_netevent(void *context, unsigned long event, + u16 vlan_id) { struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; - struct fc_lport *lport = hba->ctlr.lp; + struct fc_lport *lport; struct fc_lport *vport; + struct bnx2fc_interface *interface; + int wait_for_upload = 0; u32 link_possible = 1; - if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { - BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n", - hba->netdev->name, event); + /* Ignore vlans for now */ + if (vlan_id != 0) return; - } - - /* - * ASSUMPTION: - * indicate_netevent cannot be called from cnic unless bnx2fc - * does register_device - */ - BUG_ON(!lport); - - BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n", - hba->netdev->name, event); switch (event) { case NETDEV_UP: - BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n", - hba->adapter_state); if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) printk(KERN_ERR "indicate_netevent: "\ - "adapter is not UP!!\n"); + "hba is not UP!!\n"); break; case NETDEV_DOWN: - BNX2FC_HBA_DBG(lport, "Port down\n"); clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); link_possible = 0; break; case NETDEV_GOING_DOWN: - BNX2FC_HBA_DBG(lport, "Port going down\n"); set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); link_possible = 0; break; case NETDEV_CHANGE: - BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n"); break; default: @@ -825,15 +825,22 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event) return; } - bnx2fc_link_speed_update(lport); + mutex_lock(&bnx2fc_dev_lock); + list_for_each_entry(interface, &if_list, list) { - if (link_possible && !bnx2fc_link_ok(lport)) { - printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n"); - fcoe_ctlr_link_up(&hba->ctlr); - } else { - printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n"); - if (fcoe_ctlr_link_down(&hba->ctlr)) { - clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); + if (interface->hba != hba) + continue; + + lport = interface->ctlr.lp; + BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n", + interface->netdev->name, event); + + bnx2fc_link_speed_update(lport); + + if (link_possible && !bnx2fc_link_ok(lport)) { + printk(KERN_ERR "indicate_netevent: ctlr_link_up\n"); + fcoe_ctlr_link_up(&interface->ctlr); + } else if (fcoe_ctlr_link_down(&interface->ctlr)) { mutex_lock(&lport->lp_mutex); list_for_each_entry(vport, &lport->vports, list) fc_host_port_type(vport->host) = @@ -844,24 +851,26 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event) get_cpu())->LinkFailureCount++; put_cpu(); fcoe_clean_pending_queue(lport); + wait_for_upload = 1; + } + } + mutex_unlock(&bnx2fc_dev_lock); - init_waitqueue_head(&hba->shutdown_wait); - BNX2FC_HBA_DBG(lport, "indicate_netevent " - "num_ofld_sess = %d\n", - hba->num_ofld_sess); - hba->wait_for_link_down = 1; - BNX2FC_HBA_DBG(lport, "waiting for uploads to " - "compl proc = %s\n", - current->comm); - wait_event_interruptible(hba->shutdown_wait, - (hba->num_ofld_sess == 0)); - BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n", + if (wait_for_upload) { + clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); + init_waitqueue_head(&hba->shutdown_wait); + BNX2FC_MISC_DBG("indicate_netevent " + "num_ofld_sess = %d\n", hba->num_ofld_sess); - hba->wait_for_link_down = 0; + hba->wait_for_link_down = 1; + wait_event_interruptible(hba->shutdown_wait, + (hba->num_ofld_sess == 0)); + BNX2FC_MISC_DBG("wakeup - num_ofld_sess = %d\n", + hba->num_ofld_sess); + hba->wait_for_link_down = 0; - if (signal_pending(current)) - flush_signals(current); - } + if (signal_pending(current)) + flush_signals(current); } } @@ -880,23 +889,12 @@ static int bnx2fc_libfc_config(struct fc_lport *lport) static int bnx2fc_em_config(struct fc_lport *lport) { - struct fcoe_port *port = lport_priv(lport); - struct bnx2fc_hba *hba = port->priv; - if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID, FCOE_MAX_XID, NULL)) { printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n"); return -ENOMEM; } - hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID, - BNX2FC_MAX_XID); - - if (!hba->cmd_mgr) { - printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); - fc_exch_mgr_free(lport); - return -ENOMEM; - } return 0; } @@ -909,11 +907,8 @@ static int bnx2fc_lport_config(struct fc_lport *lport) lport->e_d_tov = 2 * 1000; lport->r_a_tov = 10 * 1000; - /* REVISIT: enable when supporting tape devices lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); - */ - lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS); lport->does_npiv = 1; memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen)); @@ -943,9 +938,10 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { - struct bnx2fc_hba *hba; - hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type); - fcoe_ctlr_recv(&hba->ctlr, skb); + struct bnx2fc_interface *interface; + interface = container_of(ptype, struct bnx2fc_interface, + fip_packet_type); + fcoe_ctlr_recv(&interface->ctlr, skb); return 0; } @@ -996,17 +992,17 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled) struct Scsi_Host *shost = vport_to_shost(vport); struct fc_lport *n_port = shost_priv(shost); struct fcoe_port *port = lport_priv(n_port); - struct bnx2fc_hba *hba = port->priv; - struct net_device *netdev = hba->netdev; + struct bnx2fc_interface *interface = port->priv; + struct net_device *netdev = interface->netdev; struct fc_lport *vn_port; - if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { + if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) { printk(KERN_ERR PFX "vn ports cannot be created on" - "this hba\n"); + "this interface\n"); return -EIO; } mutex_lock(&bnx2fc_dev_lock); - vn_port = bnx2fc_if_create(hba, &vport->dev, 1); + vn_port = bnx2fc_if_create(interface, &vport->dev, 1); mutex_unlock(&bnx2fc_dev_lock); if (IS_ERR(vn_port)) { @@ -1056,10 +1052,10 @@ static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable) } -static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba) +static int bnx2fc_netdev_setup(struct bnx2fc_interface *interface) { - struct net_device *netdev = hba->netdev; - struct net_device *physdev = hba->phys_dev; + struct net_device *netdev = interface->netdev; + struct net_device *physdev = interface->hba->phys_dev; struct netdev_hw_addr *ha; int sel_san_mac = 0; @@ -1074,7 +1070,8 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba) if ((ha->type == NETDEV_HW_ADDR_T_SAN) && (is_valid_ether_addr(ha->addr))) { - memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN); + memcpy(interface->ctlr.ctl_src_addr, ha->addr, + ETH_ALEN); sel_san_mac = 1; BNX2FC_MISC_DBG("Found SAN MAC\n"); } @@ -1084,15 +1081,15 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba) if (!sel_san_mac) return -ENODEV; - hba->fip_packet_type.func = bnx2fc_fip_recv; - hba->fip_packet_type.type = htons(ETH_P_FIP); - hba->fip_packet_type.dev = netdev; - dev_add_pack(&hba->fip_packet_type); + interface->fip_packet_type.func = bnx2fc_fip_recv; + interface->fip_packet_type.type = htons(ETH_P_FIP); + interface->fip_packet_type.dev = netdev; + dev_add_pack(&interface->fip_packet_type); - hba->fcoe_packet_type.func = bnx2fc_rcv; - hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); - hba->fcoe_packet_type.dev = netdev; - dev_add_pack(&hba->fcoe_packet_type); + interface->fcoe_packet_type.func = bnx2fc_rcv; + interface->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); + interface->fcoe_packet_type.dev = netdev; + dev_add_pack(&interface->fcoe_packet_type); return 0; } @@ -1128,53 +1125,54 @@ static void bnx2fc_release_transport(void) static void bnx2fc_interface_release(struct kref *kref) { - struct bnx2fc_hba *hba; + struct bnx2fc_interface *interface; struct net_device *netdev; - struct net_device *phys_dev; - hba = container_of(kref, struct bnx2fc_hba, kref); + interface = container_of(kref, struct bnx2fc_interface, kref); BNX2FC_MISC_DBG("Interface is being released\n"); - netdev = hba->netdev; - phys_dev = hba->phys_dev; + netdev = interface->netdev; /* tear-down FIP controller */ - if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done)) - fcoe_ctlr_destroy(&hba->ctlr); + if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags)) + fcoe_ctlr_destroy(&interface->ctlr); + + kfree(interface); - /* Free the command manager */ - if (hba->cmd_mgr) { - bnx2fc_cmd_mgr_free(hba->cmd_mgr); - hba->cmd_mgr = NULL; - } dev_put(netdev); module_put(THIS_MODULE); } -static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba) +static inline void bnx2fc_interface_get(struct bnx2fc_interface *interface) { - kref_get(&hba->kref); + kref_get(&interface->kref); } -static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba) +static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface) { - kref_put(&hba->kref, bnx2fc_interface_release); + kref_put(&interface->kref, bnx2fc_interface_release); } -static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba) +static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba) { + /* Free the command manager */ + if (hba->cmd_mgr) { + bnx2fc_cmd_mgr_free(hba->cmd_mgr); + hba->cmd_mgr = NULL; + } + kfree(hba->tgt_ofld_list); bnx2fc_unbind_pcidev(hba); kfree(hba); } /** - * bnx2fc_interface_create - create a new fcoe instance + * bnx2fc_hba_create - create a new bnx2fc hba * * @cnic: pointer to cnic device * - * Creates a new FCoE instance on the given device which include allocating - * hba structure, scsi_host and lport structures. + * Creates a new FCoE hba on the given device. + * */ -static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic) +static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) { struct bnx2fc_hba *hba; int rc; @@ -1189,64 +1187,83 @@ static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic) hba->cnic = cnic; rc = bnx2fc_bind_pcidev(hba); - if (rc) + if (rc) { + printk(KERN_ERR PFX "create_adapter: bind error\n"); goto bind_err; + } hba->phys_dev = cnic->netdev; - /* will get overwritten after we do vlan discovery */ - hba->netdev = hba->phys_dev; + hba->next_conn_id = 0; + + hba->tgt_ofld_list = + kzalloc(sizeof(struct bnx2fc_rport *) * BNX2FC_NUM_MAX_SESS, + GFP_KERNEL); + if (!hba->tgt_ofld_list) { + printk(KERN_ERR PFX "Unable to allocate tgt offload list\n"); + goto tgtofld_err; + } + + hba->num_ofld_sess = 0; + + hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID, + BNX2FC_MAX_XID); + if (!hba->cmd_mgr) { + printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); + goto cmgr_err; + } init_waitqueue_head(&hba->shutdown_wait); init_waitqueue_head(&hba->destroy_wait); + INIT_LIST_HEAD(&hba->vports); return hba; + +cmgr_err: + kfree(hba->tgt_ofld_list); +tgtofld_err: + bnx2fc_unbind_pcidev(hba); bind_err: - printk(KERN_ERR PFX "create_interface: bind error\n"); kfree(hba); return NULL; } -static int bnx2fc_interface_setup(struct bnx2fc_hba *hba, - enum fip_state fip_mode) +struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba, + struct net_device *netdev, + enum fip_state fip_mode) { + struct bnx2fc_interface *interface; int rc = 0; - struct net_device *netdev = hba->netdev; - struct fcoe_ctlr *fip = &hba->ctlr; + interface = kzalloc(sizeof(*interface), GFP_KERNEL); + if (!interface) { + printk(KERN_ERR PFX "Unable to allocate interface structure\n"); + return NULL; + } dev_hold(netdev); - kref_init(&hba->kref); - - hba->flags = 0; + kref_init(&interface->kref); + interface->hba = hba; + interface->netdev = netdev; /* Initialize FIP */ - memset(fip, 0, sizeof(*fip)); - fcoe_ctlr_init(fip, fip_mode); - hba->ctlr.send = bnx2fc_fip_send; - hba->ctlr.update_mac = bnx2fc_update_src_mac; - hba->ctlr.get_src_addr = bnx2fc_get_src_mac; - set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done); - - rc = bnx2fc_netdev_setup(hba); - if (rc) - goto setup_err; + fcoe_ctlr_init(&interface->ctlr, fip_mode); + interface->ctlr.send = bnx2fc_fip_send; + interface->ctlr.update_mac = bnx2fc_update_src_mac; + interface->ctlr.get_src_addr = bnx2fc_get_src_mac; + set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags); - hba->next_conn_id = 0; + rc = bnx2fc_netdev_setup(interface); + if (!rc) + return interface; - memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list)); - hba->num_ofld_sess = 0; - - return 0; - -setup_err: - fcoe_ctlr_destroy(&hba->ctlr); + fcoe_ctlr_destroy(&interface->ctlr); dev_put(netdev); - bnx2fc_interface_put(hba); - return rc; + kfree(interface); + return NULL; } /** * bnx2fc_if_create - Create FCoE instance on a given interface * - * @hba: FCoE interface to create a local port on + * @interface: FCoE interface to create a local port on * @parent: Device pointer to be the parent in sysfs for the SCSI host * @npiv: Indicates if the port is vport or not * @@ -1254,15 +1271,23 @@ setup_err: * * Returns: Allocated fc_lport or an error pointer */ -static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, +static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, struct device *parent, int npiv) { struct fc_lport *lport, *n_port; struct fcoe_port *port; struct Scsi_Host *shost; struct fc_vport *vport = dev_to_vport(parent); + struct bnx2fc_lport *blport; + struct bnx2fc_hba *hba; int rc = 0; + blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); + if (!blport) { + BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n"); + return NULL; + } + /* Allocate Scsi_Host structure */ if (!npiv) lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port)); @@ -1271,12 +1296,12 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, if (!lport) { printk(KERN_ERR PFX "could not allocate scsi host structure\n"); - return NULL; + goto free_blport; } shost = lport->host; port = lport_priv(lport); port->lport = lport; - port->priv = hba; + port->priv = interface; INIT_WORK(&port->destroy_work, bnx2fc_destroy_work); /* Configure fcoe_port */ @@ -1300,7 +1325,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, rc = bnx2fc_shost_config(lport, parent); if (rc) { printk(KERN_ERR PFX "Couldnt configure shost for %s\n", - hba->netdev->name); + interface->netdev->name); goto lp_config_err; } @@ -1326,30 +1351,38 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, goto shost_err; } - bnx2fc_interface_get(hba); + bnx2fc_interface_get(interface); + + hba = interface->hba; + spin_lock_bh(&hba->hba_lock); + blport->lport = lport; + list_add_tail(&blport->list, &hba->vports); + spin_unlock_bh(&hba->hba_lock); + return lport; shost_err: scsi_remove_host(shost); lp_config_err: scsi_host_put(lport->host); +free_blport: + kfree(blport); return NULL; } -static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba) +static void bnx2fc_netdev_cleanup(struct bnx2fc_interface *interface) { /* Dont listen for Ethernet packets anymore */ - __dev_remove_pack(&hba->fcoe_packet_type); - __dev_remove_pack(&hba->fip_packet_type); + __dev_remove_pack(&interface->fcoe_packet_type); + __dev_remove_pack(&interface->fip_packet_type); synchronize_net(); } -static void bnx2fc_if_destroy(struct fc_lport *lport) +static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba) { struct fcoe_port *port = lport_priv(lport); - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_lport *blport, *tmp; - BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n"); /* Stop the transmit retry timer */ del_timer_sync(&port->timer); @@ -1372,10 +1405,17 @@ static void bnx2fc_if_destroy(struct fc_lport *lport) /* Free memory used by statistical counters */ fc_lport_free_stats(lport); + spin_lock_bh(&hba->hba_lock); + list_for_each_entry_safe(blport, tmp, &hba->vports, list) { + if (blport->lport == lport) { + list_del(&blport->list); + kfree(blport); + } + } + spin_unlock_bh(&hba->hba_lock); + /* Release Scsi_Host */ scsi_host_put(lport->host); - - bnx2fc_interface_put(hba); } /** @@ -1390,46 +1430,31 @@ static void bnx2fc_if_destroy(struct fc_lport *lport) */ static int bnx2fc_destroy(struct net_device *netdev) { - struct bnx2fc_hba *hba = NULL; - struct net_device *phys_dev; + struct bnx2fc_interface *interface = NULL; + struct bnx2fc_hba *hba; + struct fc_lport *lport; int rc = 0; rtnl_lock(); - mutex_lock(&bnx2fc_dev_lock); - /* obtain physical netdev */ - if (netdev->priv_flags & IFF_802_1Q_VLAN) - phys_dev = vlan_dev_real_dev(netdev); - else { - printk(KERN_ERR PFX "Not a vlan device\n"); - rc = -ENODEV; - goto netdev_err; - } - hba = bnx2fc_hba_lookup(phys_dev); - if (!hba || !hba->ctlr.lp) { + interface = bnx2fc_interface_lookup(netdev); + if (!interface || !interface->ctlr.lp) { rc = -ENODEV; - printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n"); + printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n"); goto netdev_err; } - if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { - printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n"); - goto netdev_err; - } - - bnx2fc_netdev_cleanup(hba); - - bnx2fc_stop(hba); - - bnx2fc_if_destroy(hba->ctlr.lp); + hba = interface->hba; - destroy_workqueue(hba->timer_work_queue); + bnx2fc_netdev_cleanup(interface); + lport = interface->ctlr.lp; + bnx2fc_stop(interface); + list_del(&interface->list); + destroy_workqueue(interface->timer_work_queue); + bnx2fc_interface_put(interface); + bnx2fc_if_destroy(lport, hba); - if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) - bnx2fc_fw_destroy(hba); - - clear_bit(BNX2FC_CREATE_DONE, &hba->init_done); netdev_err: mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); @@ -1440,16 +1465,20 @@ static void bnx2fc_destroy_work(struct work_struct *work) { struct fcoe_port *port; struct fc_lport *lport; + struct bnx2fc_interface *interface; + struct bnx2fc_hba *hba; port = container_of(work, struct fcoe_port, destroy_work); lport = port->lport; + interface = port->priv; + hba = interface->hba; BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n"); bnx2fc_port_shutdown(lport); rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); - bnx2fc_if_destroy(lport); + bnx2fc_if_destroy(lport, hba); mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); } @@ -1521,28 +1550,27 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) static void bnx2fc_ulp_start(void *handle) { struct bnx2fc_hba *hba = handle; - struct fc_lport *lport = hba->ctlr.lp; + struct bnx2fc_interface *interface; + struct fc_lport *lport; - BNX2FC_MISC_DBG("Entered %s\n", __func__); mutex_lock(&bnx2fc_dev_lock); - if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) - goto start_disc; - - if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) + if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) bnx2fc_fw_init(hba); -start_disc: - mutex_unlock(&bnx2fc_dev_lock); - BNX2FC_MISC_DBG("bnx2fc started.\n"); - /* Kick off Fabric discovery*/ - if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { - printk(KERN_ERR PFX "ulp_init: start discovery\n"); - lport->tt.frame_send = bnx2fc_xmit; - bnx2fc_start_disc(hba); + list_for_each_entry(interface, &if_list, list) { + if (interface->hba == hba) { + lport = interface->ctlr.lp; + /* Kick off Fabric discovery*/ + printk(KERN_ERR PFX "ulp_init: start discovery\n"); + lport->tt.frame_send = bnx2fc_xmit; + bnx2fc_start_disc(interface); + } } + + mutex_unlock(&bnx2fc_dev_lock); } static void bnx2fc_port_shutdown(struct fc_lport *lport) @@ -1552,37 +1580,25 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport) fc_lport_destroy(lport); } -static void bnx2fc_stop(struct bnx2fc_hba *hba) +static void bnx2fc_stop(struct bnx2fc_interface *interface) { struct fc_lport *lport; struct fc_lport *vport; - BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__, - hba->init_done); - if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) && - test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { - lport = hba->ctlr.lp; - bnx2fc_port_shutdown(lport); - BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d " - "offloaded sessions\n", - hba->num_ofld_sess); - wait_event_interruptible(hba->shutdown_wait, - (hba->num_ofld_sess == 0)); - mutex_lock(&lport->lp_mutex); - list_for_each_entry(vport, &lport->vports, list) - fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN; - mutex_unlock(&lport->lp_mutex); - fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; - fcoe_ctlr_link_down(&hba->ctlr); - fcoe_clean_pending_queue(lport); - - mutex_lock(&hba->hba_mutex); - clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); - clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) + return; - clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); - mutex_unlock(&hba->hba_mutex); - } + lport = interface->ctlr.lp; + bnx2fc_port_shutdown(lport); + + mutex_lock(&lport->lp_mutex); + list_for_each_entry(vport, &lport->vports, list) + fc_host_port_type(vport->host) = + FC_PORTTYPE_UNKNOWN; + mutex_unlock(&lport->lp_mutex); + fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; + fcoe_ctlr_link_down(&interface->ctlr); + fcoe_clean_pending_queue(lport); } static int bnx2fc_fw_init(struct bnx2fc_hba *hba) @@ -1621,8 +1637,7 @@ static int bnx2fc_fw_init(struct bnx2fc_hba *hba) } - /* Mark HBA to indicate that the FW INIT is done */ - set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done); + set_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags); return 0; err_unbind: @@ -1633,7 +1648,7 @@ err_out: static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) { - if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { + if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) { if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) { init_timer(&hba->destroy_timer); hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT + @@ -1642,8 +1657,8 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) hba->destroy_timer.data = (unsigned long)hba; add_timer(&hba->destroy_timer); wait_event_interruptible(hba->destroy_wait, - (hba->flags & - BNX2FC_FLAG_DESTROY_CMPL)); + test_bit(BNX2FC_FLAG_DESTROY_CMPL, + &hba->flags)); /* This should never happen */ if (signal_pending(current)) flush_signals(current); @@ -1664,40 +1679,57 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) */ static void bnx2fc_ulp_stop(void *handle) { - struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle; + struct bnx2fc_hba *hba = handle; + struct bnx2fc_interface *interface; printk(KERN_ERR "ULP_STOP\n"); mutex_lock(&bnx2fc_dev_lock); - bnx2fc_stop(hba); + if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) + goto exit; + list_for_each_entry(interface, &if_list, list) { + if (interface->hba == hba) + bnx2fc_stop(interface); + } + BUG_ON(hba->num_ofld_sess != 0); + + mutex_lock(&hba->hba_mutex); + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); + clear_bit(ADAPTER_STATE_GOING_DOWN, + &hba->adapter_state); + + clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); + mutex_unlock(&hba->hba_mutex); + bnx2fc_fw_destroy(hba); +exit: mutex_unlock(&bnx2fc_dev_lock); } -static void bnx2fc_start_disc(struct bnx2fc_hba *hba) +static void bnx2fc_start_disc(struct bnx2fc_interface *interface) { struct fc_lport *lport; int wait_cnt = 0; BNX2FC_MISC_DBG("Entered %s\n", __func__); /* Kick off FIP/FLOGI */ - if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { + if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) { printk(KERN_ERR PFX "Init not done yet\n"); return; } - lport = hba->ctlr.lp; + lport = interface->ctlr.lp; BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); if (!bnx2fc_link_ok(lport)) { BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); - fcoe_ctlr_link_up(&hba->ctlr); + fcoe_ctlr_link_up(&interface->ctlr); fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; - set_bit(ADAPTER_STATE_READY, &hba->adapter_state); + set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); } /* wait for the FCF to be selected before issuing FLOGI */ - while (!hba->ctlr.sel_fcf) { + while (!interface->ctlr.sel_fcf) { msleep(250); /* give up after 3 secs */ if (++wait_cnt > 12) @@ -1723,15 +1755,15 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev) BNX2FC_MISC_DBG("Entered %s\n", __func__); /* bnx2fc works only when bnx2x is loaded */ - if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { + if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) || + (dev->max_fcoe_conn == 0)) { printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s," - " flags: %lx\n", - dev->netdev->name, dev->flags); + " flags: %lx fcoe_conn: %d\n", + dev->netdev->name, dev->flags, dev->max_fcoe_conn); return; } - /* Configure FCoE interface */ - hba = bnx2fc_interface_create(dev); + hba = bnx2fc_hba_create(dev); if (!hba) { printk(KERN_ERR PFX "hba initialization failed\n"); return; @@ -1739,7 +1771,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev) /* Add HBA to the adapter list */ mutex_lock(&bnx2fc_dev_lock); - list_add_tail(&hba->link, &adapter_list); + list_add_tail(&hba->list, &adapter_list); adapter_count++; mutex_unlock(&bnx2fc_dev_lock); @@ -1747,7 +1779,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev) rc = dev->register_device(dev, CNIC_ULP_FCOE, (void *) hba); if (rc) - printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc); + printk(KERN_ERR PFX "register_device failed, rc = %d\n", rc); else set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); } @@ -1755,52 +1787,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev) static int bnx2fc_disable(struct net_device *netdev) { - struct bnx2fc_hba *hba; - struct net_device *phys_dev; - struct ethtool_drvinfo drvinfo; + struct bnx2fc_interface *interface; int rc = 0; rtnl_lock(); - mutex_lock(&bnx2fc_dev_lock); - /* obtain physical netdev */ - if (netdev->priv_flags & IFF_802_1Q_VLAN) - phys_dev = vlan_dev_real_dev(netdev); - else { - printk(KERN_ERR PFX "Not a vlan device\n"); - rc = -ENODEV; - goto nodev; - } - - /* verify if the physical device is a netxtreme2 device */ - if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { - memset(&drvinfo, 0, sizeof(drvinfo)); - phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); - if (strcmp(drvinfo.driver, "bnx2x")) { - printk(KERN_ERR PFX "Not a netxtreme2 device\n"); - rc = -ENODEV; - goto nodev; - } - } else { - printk(KERN_ERR PFX "unable to obtain drv_info\n"); - rc = -ENODEV; - goto nodev; - } - - printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n"); - - /* obtain hba and initialize rest of the structure */ - hba = bnx2fc_hba_lookup(phys_dev); - if (!hba || !hba->ctlr.lp) { + interface = bnx2fc_interface_lookup(netdev); + if (!interface || !interface->ctlr.lp) { rc = -ENODEV; - printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n"); + printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n"); } else { - fcoe_ctlr_link_down(&hba->ctlr); - fcoe_clean_pending_queue(hba->ctlr.lp); + fcoe_ctlr_link_down(&interface->ctlr); + fcoe_clean_pending_queue(interface->ctlr.lp); } -nodev: mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); return rc; @@ -1809,48 +1810,19 @@ nodev: static int bnx2fc_enable(struct net_device *netdev) { - struct bnx2fc_hba *hba; - struct net_device *phys_dev; - struct ethtool_drvinfo drvinfo; + struct bnx2fc_interface *interface; int rc = 0; rtnl_lock(); - - BNX2FC_MISC_DBG("Entered %s\n", __func__); mutex_lock(&bnx2fc_dev_lock); - /* obtain physical netdev */ - if (netdev->priv_flags & IFF_802_1Q_VLAN) - phys_dev = vlan_dev_real_dev(netdev); - else { - printk(KERN_ERR PFX "Not a vlan device\n"); + interface = bnx2fc_interface_lookup(netdev); + if (!interface || !interface->ctlr.lp) { rc = -ENODEV; - goto nodev; - } - /* verify if the physical device is a netxtreme2 device */ - if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { - memset(&drvinfo, 0, sizeof(drvinfo)); - phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); - if (strcmp(drvinfo.driver, "bnx2x")) { - printk(KERN_ERR PFX "Not a netxtreme2 device\n"); - rc = -ENODEV; - goto nodev; - } - } else { - printk(KERN_ERR PFX "unable to obtain drv_info\n"); - rc = -ENODEV; - goto nodev; - } + printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n"); + } else if (!bnx2fc_link_ok(interface->ctlr.lp)) + fcoe_ctlr_link_up(&interface->ctlr); - /* obtain hba and initialize rest of the structure */ - hba = bnx2fc_hba_lookup(phys_dev); - if (!hba || !hba->ctlr.lp) { - rc = -ENODEV; - printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n"); - } else if (!bnx2fc_link_ok(hba->ctlr.lp)) - fcoe_ctlr_link_up(&hba->ctlr); - -nodev: mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); return rc; @@ -1868,6 +1840,7 @@ nodev: */ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) { + struct bnx2fc_interface *interface; struct bnx2fc_hba *hba; struct net_device *phys_dev; struct fc_lport *lport; @@ -1903,7 +1876,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { memset(&drvinfo, 0, sizeof(drvinfo)); phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); - if (strcmp(drvinfo.driver, "bnx2x")) { + if (strncmp(drvinfo.driver, "bnx2x", strlen("bnx2x"))) { printk(KERN_ERR PFX "Not a netxtreme2 device\n"); rc = -EINVAL; goto netdev_err; @@ -1914,7 +1887,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) goto netdev_err; } - /* obtain hba and initialize rest of the structure */ + /* obtain interface and initialize rest of the structure */ hba = bnx2fc_hba_lookup(phys_dev); if (!hba) { rc = -ENODEV; @@ -1922,67 +1895,61 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) goto netdev_err; } - if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { - rc = bnx2fc_fw_init(hba); - if (rc) - goto netdev_err; - } - - if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { + if (bnx2fc_interface_lookup(netdev)) { rc = -EEXIST; goto netdev_err; } - /* update netdev with vlan netdev */ - hba->netdev = netdev; - hba->vlan_id = vlan_id; - hba->vlan_enabled = 1; - - rc = bnx2fc_interface_setup(hba, fip_mode); - if (rc) { - printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n"); + interface = bnx2fc_interface_create(hba, netdev, fip_mode); + if (!interface) { + printk(KERN_ERR PFX "bnx2fc_interface_create failed\n"); goto ifput_err; } - hba->timer_work_queue = + interface->vlan_id = vlan_id; + interface->vlan_enabled = 1; + + interface->timer_work_queue = create_singlethread_workqueue("bnx2fc_timer_wq"); - if (!hba->timer_work_queue) { + if (!interface->timer_work_queue) { printk(KERN_ERR PFX "ulp_init could not create timer_wq\n"); rc = -EINVAL; goto ifput_err; } - lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0); + lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0); if (!lport) { printk(KERN_ERR PFX "Failed to create interface (%s)\n", netdev->name); - bnx2fc_netdev_cleanup(hba); + bnx2fc_netdev_cleanup(interface); rc = -EINVAL; goto if_create_err; } + /* Add interface to if_list */ + list_add_tail(&interface->list, &if_list); + lport->boot_time = jiffies; /* Make this master N_port */ - hba->ctlr.lp = lport; + interface->ctlr.lp = lport; - set_bit(BNX2FC_CREATE_DONE, &hba->init_done); - printk(KERN_ERR PFX "create: START DISC\n"); - bnx2fc_start_disc(hba); + BNX2FC_HBA_DBG(lport, "create: START DISC\n"); + bnx2fc_start_disc(interface); /* * Release from kref_init in bnx2fc_interface_setup, on success * lport should be holding a reference taken in bnx2fc_if_create */ - bnx2fc_interface_put(hba); + bnx2fc_interface_put(interface); /* put netdev that was held while calling dev_get_by_name */ mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); return 0; if_create_err: - destroy_workqueue(hba->timer_work_queue); + destroy_workqueue(interface->timer_work_queue); ifput_err: - bnx2fc_interface_put(hba); + bnx2fc_interface_put(interface); netdev_err: module_put(THIS_MODULE); mod_err: @@ -1992,7 +1959,7 @@ mod_err: } /** - * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance + * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance * * @cnic: Pointer to cnic device instance * @@ -2012,19 +1979,30 @@ static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic) return NULL; } -static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev) +static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device + *netdev) +{ + struct bnx2fc_interface *interface; + + /* Called with bnx2fc_dev_lock held */ + list_for_each_entry(interface, &if_list, list) { + if (interface->netdev == netdev) + return interface; + } + return NULL; +} + +static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device + *phys_dev) { - struct list_head *list; - struct list_head *temp; struct bnx2fc_hba *hba; /* Called with bnx2fc_dev_lock held */ - list_for_each_safe(list, temp, &adapter_list) { - hba = (struct bnx2fc_hba *)list; + list_for_each_entry(hba, &adapter_list, list) { if (hba->phys_dev == phys_dev) return hba; } - printk(KERN_ERR PFX "hba_lookup: hba NULL\n"); + printk(KERN_ERR PFX "adapter_lookup: hba NULL\n"); return NULL; } @@ -2036,6 +2014,8 @@ static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev) static void bnx2fc_ulp_exit(struct cnic_dev *dev) { struct bnx2fc_hba *hba; + struct bnx2fc_interface *interface, *tmp; + struct fc_lport *lport; BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n"); @@ -2054,13 +2034,20 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev) return; } - list_del_init(&hba->link); + list_del_init(&hba->list); adapter_count--; - if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { + list_for_each_entry_safe(interface, tmp, &if_list, list) { /* destroy not called yet, move to quiesced list */ - bnx2fc_netdev_cleanup(hba); - bnx2fc_if_destroy(hba->ctlr.lp); + if (interface->hba == hba) { + bnx2fc_netdev_cleanup(interface); + bnx2fc_stop(interface); + + list_del(&interface->list); + lport = interface->ctlr.lp; + bnx2fc_interface_put(interface); + bnx2fc_if_destroy(lport, hba); + } } mutex_unlock(&bnx2fc_dev_lock); @@ -2068,7 +2055,7 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev) /* unregister cnic device */ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); - bnx2fc_interface_destroy(hba); + bnx2fc_hba_destroy(hba); } /** @@ -2224,6 +2211,7 @@ static int __init bnx2fc_mod_init(void) } INIT_LIST_HEAD(&adapter_list); + INIT_LIST_HEAD(&if_list); mutex_init(&bnx2fc_dev_lock); adapter_count = 0; @@ -2301,16 +2289,17 @@ static void __exit bnx2fc_mod_exit(void) mutex_unlock(&bnx2fc_dev_lock); /* Unregister with cnic */ - list_for_each_entry_safe(hba, next, &to_be_deleted, link) { - list_del_init(&hba->link); - printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n", - hba, atomic_read(&hba->kref.refcount)); + list_for_each_entry_safe(hba, next, &to_be_deleted, list) { + list_del_init(&hba->list); + printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p\n", + hba); bnx2fc_ulp_stop(hba); /* unregister cnic device */ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) - hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); - bnx2fc_interface_destroy(hba); + hba->cnic->unregister_device(hba->cnic, + CNIC_ULP_FCOE); + bnx2fc_hba_destroy(hba); } cnic_unregister_driver(CNIC_ULP_FCOE); diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index f756d5f85c7..72cfb14acd3 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -2,7 +2,7 @@ * This file contains the code that low level functions that interact * with 57712 FCoE firmware. * - * Copyright (c) 2008 - 2010 Broadcom Corporation + * Copyright (c) 2008 - 2011 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -23,7 +23,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *ofld_kcqe); static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, - struct fcoe_kcqe *conn_destroy); + struct fcoe_kcqe *destroy_kcqe); int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) { @@ -67,7 +67,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) int rc = 0; if (!hba->cnic) { - printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n"); + printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n"); return -ENODEV; } @@ -100,6 +100,10 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION; + fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION; + + fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; fcoe_init2.hash_tbl_pbl_addr_hi = (u32) ((u64) hba->hash_tbl_pbl_dma >> 32); @@ -122,6 +126,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) fcoe_init3.error_bit_map_lo = 0xffffffff; fcoe_init3.error_bit_map_hi = 0xffffffff; + fcoe_init3.perf_config = 1; kwqe_arr[0] = (struct kwqe *) &fcoe_init1; kwqe_arr[1] = (struct kwqe *) &fcoe_init2; @@ -161,7 +166,8 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port, struct bnx2fc_rport *tgt) { struct fc_lport *lport = port->lport; - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; struct kwqe *kwqe_arr[4]; struct fcoe_kwqe_conn_offload1 ofld_req1; struct fcoe_kwqe_conn_offload2 ofld_req2; @@ -223,7 +229,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port, ofld_req3.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); - ofld_req3.vlan_tag = hba->vlan_id << + ofld_req3.vlan_tag = interface->vlan_id << FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; @@ -273,8 +279,20 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port, ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); + /* + * Info from PRLI response, this info is used for sequence level error + * recovery support + */ + if (tgt->dev_type == TYPE_TAPE) { + ofld_req3.flags |= 1 << + FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT; + ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED) + ? 1 : 0) << + FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT); + } + /* vlan flag */ - ofld_req3.flags |= (hba->vlan_enabled << + ofld_req3.flags |= (interface->vlan_enabled << FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); /* C2_VALID and ACK flags are not set as they are not suppported */ @@ -289,19 +307,20 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port, ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; - ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5]; + ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5]; /* local mac */ - ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4]; - ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3]; - ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2]; - ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1]; - ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0]; - ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ - ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; - ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; - ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; - ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; - ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; + ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4]; + ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3]; + ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; + ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; + ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; + ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; + /* fcf mac */ + ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; + ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; + ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; + ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; + ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); @@ -331,7 +350,8 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port, struct bnx2fc_rport *tgt) { struct kwqe *kwqe_arr[2]; - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; struct fcoe_kwqe_conn_enable_disable enbl_req; struct fc_lport *lport = port->lport; struct fc_rport *rport = tgt->rport; @@ -345,20 +365,21 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port, enbl_req.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); - enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5]; + enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5]; /* local mac */ - enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4]; - enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3]; - enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2]; - enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1]; - enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0]; - - enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ - enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; - enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; - enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; - enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; - enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; + enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4]; + enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3]; + enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2]; + enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1]; + enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; + memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); + + enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; + enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; + enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; + enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; + enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; + enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; port_id = fc_host_port_id(lport->host); if (port_id != tgt->sid) { @@ -374,10 +395,10 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port, enbl_req.d_id[0] = (port_id & 0x000000FF); enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; - enbl_req.vlan_tag = hba->vlan_id << + enbl_req.vlan_tag = interface->vlan_id << FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; - enbl_req.vlan_flag = hba->vlan_enabled; + enbl_req.vlan_flag = interface->vlan_enabled; enbl_req.context_id = tgt->context_id; enbl_req.conn_id = tgt->fcoe_conn_id; @@ -397,7 +418,8 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port, int bnx2fc_send_session_disable_req(struct fcoe_port *port, struct bnx2fc_rport *tgt) { - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; struct fcoe_kwqe_conn_enable_disable disable_req; struct kwqe *kwqe_arr[2]; struct fc_rport *rport = tgt->rport; @@ -411,18 +433,19 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port, disable_req.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); - disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5]; - disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3]; - disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2]; - disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1]; - disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0]; + disable_req.src_mac_addr_lo[0] = tgt->src_addr[5]; + disable_req.src_mac_addr_lo[1] = tgt->src_addr[4]; + disable_req.src_mac_addr_mid[0] = tgt->src_addr[3]; + disable_req.src_mac_addr_mid[1] = tgt->src_addr[2]; + disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; + disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; - disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ - disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; - disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; - disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; - disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; - disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; + disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; + disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; + disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; + disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; + disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; + disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; port_id = tgt->sid; disable_req.s_id[0] = (port_id & 0x000000FF); @@ -436,11 +459,11 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port, disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; disable_req.context_id = tgt->context_id; disable_req.conn_id = tgt->fcoe_conn_id; - disable_req.vlan_tag = hba->vlan_id << + disable_req.vlan_tag = interface->vlan_id << FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; disable_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; - disable_req.vlan_flag = hba->vlan_enabled; + disable_req.vlan_flag = interface->vlan_enabled; kwqe_arr[0] = (struct kwqe *) &disable_req; @@ -480,16 +503,36 @@ int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, return rc; } +static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport) +{ + struct bnx2fc_lport *blport; + + spin_lock_bh(&hba->hba_lock); + list_for_each_entry(blport, &hba->vports, list) { + if (blport->lport == lport) { + spin_unlock_bh(&hba->hba_lock); + return true; + } + } + spin_unlock_bh(&hba->hba_lock); + return false; + +} + + static void bnx2fc_unsol_els_work(struct work_struct *work) { struct bnx2fc_unsol_els *unsol_els; struct fc_lport *lport; + struct bnx2fc_hba *hba; struct fc_frame *fp; unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work); lport = unsol_els->lport; fp = unsol_els->fp; - fc_exch_recv(lport, fp); + hba = unsol_els->hba; + if (is_valid_lport(hba, lport)) + fc_exch_recv(lport, fp); kfree(unsol_els); } @@ -499,6 +542,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, { struct fcoe_port *port = tgt->port; struct fc_lport *lport = port->lport; + struct bnx2fc_interface *interface = port->priv; struct bnx2fc_unsol_els *unsol_els; struct fc_frame_header *fh; struct fc_frame *fp; @@ -559,6 +603,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, fr_eof(fp) = FC_EOF_T; fr_crc(fp) = cpu_to_le32(~crc); unsol_els->lport = lport; + unsol_els->hba = interface->hba; unsol_els->fp = fp; INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); @@ -580,9 +625,12 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) u32 frame_len, len; struct bnx2fc_cmd *io_req = NULL; struct fcoe_task_ctx_entry *task, *task_page; - struct bnx2fc_hba *hba = tgt->port->priv; + struct bnx2fc_interface *interface = tgt->port->priv; + struct bnx2fc_hba *hba = interface->hba; int task_idx, index; int rc = 0; + u64 err_warn_bit_map; + u8 err_warn = 0xff; BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); @@ -640,44 +688,48 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) xid = err_entry->fc_hdr.ox_id; BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid); BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n", - err_entry->err_warn_bitmap_hi, - err_entry->err_warn_bitmap_lo); + err_entry->data.err_warn_bitmap_hi, + err_entry->data.err_warn_bitmap_lo); BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", - err_entry->tx_buf_off, err_entry->rx_buf_off); + err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); - bnx2fc_return_rqe(tgt, 1); if (xid > BNX2FC_MAX_XID) { BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); - spin_unlock_bh(&tgt->tgt_lock); - break; + goto ret_err_rqe; } task_idx = xid / BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; task_page = (struct fcoe_task_ctx_entry *) - hba->task_ctx[task_idx]; + hba->task_ctx[task_idx]; task = &(task_page[index]); io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; - if (!io_req) { - spin_unlock_bh(&tgt->tgt_lock); - break; - } + if (!io_req) + goto ret_err_rqe; if (io_req->cmd_type != BNX2FC_SCSI_CMD) { printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); - spin_unlock_bh(&tgt->tgt_lock); - break; + goto ret_err_rqe; } if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags)) { BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " "progress.. ignore unsol err\n"); - spin_unlock_bh(&tgt->tgt_lock); - break; + goto ret_err_rqe; + } + + err_warn_bit_map = (u64) + ((u64)err_entry->data.err_warn_bitmap_hi << 32) | + (u64)err_entry->data.err_warn_bitmap_lo; + for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { + if (err_warn_bit_map & (u64)((u64)1 << i)) { + err_warn = i; + break; + } } /* @@ -687,26 +739,61 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) * logging out the target, when the ABTS eventually * times out. */ - if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, - &io_req->req_flags)) { - /* - * Cancel the timeout_work, as we received IO - * completion with FW error. - */ - if (cancel_delayed_work(&io_req->timeout_work)) - kref_put(&io_req->refcount, - bnx2fc_cmd_release); /* timer hold */ - - rc = bnx2fc_initiate_abts(io_req); - if (rc != SUCCESS) { - BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts " - "failed. issue cleanup\n"); - rc = bnx2fc_initiate_cleanup(io_req); - BUG_ON(rc); - } - } else + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " "in ABTS processing\n", xid); + goto ret_err_rqe; + } + BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn); + if (tgt->dev_type != TYPE_TAPE) + goto skip_rec; + switch (err_warn) { + case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION: + case FCOE_ERROR_CODE_DATA_OOO_RO: + case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT: + case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET: + case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ: + case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET: + BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n", + xid); + memset(&io_req->err_entry, 0, + sizeof(struct fcoe_err_report_entry)); + memcpy(&io_req->err_entry, err_entry, + sizeof(struct fcoe_err_report_entry)); + if (!test_bit(BNX2FC_FLAG_SRR_SENT, + &io_req->req_flags)) { + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_send_rec(io_req); + spin_lock_bh(&tgt->tgt_lock); + + if (rc) + goto skip_rec; + } else + printk(KERN_ERR PFX "SRR in progress\n"); + goto ret_err_rqe; + break; + default: + break; + } + +skip_rec: + set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags); + /* + * Cancel the timeout_work, as we received IO + * completion with FW error. + */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, bnx2fc_cmd_release); + + rc = bnx2fc_initiate_abts(io_req); + if (rc != SUCCESS) { + printk(KERN_ERR PFX "err_warn: initiate_abts " + "failed xid = 0x%x. issue cleanup\n", + io_req->xid); + bnx2fc_initiate_cleanup(io_req); + } +ret_err_rqe: + bnx2fc_return_rqe(tgt, 1); spin_unlock_bh(&tgt->tgt_lock); break; @@ -722,11 +809,52 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) xid = cpu_to_be16(err_entry->fc_hdr.ox_id); BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid); BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x", - err_entry->err_warn_bitmap_hi, - err_entry->err_warn_bitmap_lo); + err_entry->data.err_warn_bitmap_hi, + err_entry->data.err_warn_bitmap_lo); BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", - err_entry->tx_buf_off, err_entry->rx_buf_off); + err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); + + if (xid > BNX2FC_MAX_XID) { + BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); + goto ret_warn_rqe; + } + + err_warn_bit_map = (u64) + ((u64)err_entry->data.err_warn_bitmap_hi << 32) | + (u64)err_entry->data.err_warn_bitmap_lo; + for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { + if (err_warn_bit_map & (u64) (1 << i)) { + err_warn = i; + break; + } + } + BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn); + + task_idx = xid / BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; + task = &(task_page[index]); + io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; + if (!io_req) + goto ret_warn_rqe; + + if (io_req->cmd_type != BNX2FC_SCSI_CMD) { + printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); + goto ret_warn_rqe; + } + + memset(&io_req->err_entry, 0, + sizeof(struct fcoe_err_report_entry)); + memcpy(&io_req->err_entry, err_entry, + sizeof(struct fcoe_err_report_entry)); + if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION) + /* REC_TOV is not a warning code */ + BUG_ON(1); + else + BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n"); +ret_warn_rqe: bnx2fc_return_rqe(tgt, 1); spin_unlock_bh(&tgt->tgt_lock); break; @@ -742,7 +870,8 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) struct fcoe_task_ctx_entry *task; struct fcoe_task_ctx_entry *task_page; struct fcoe_port *port = tgt->port; - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; struct bnx2fc_cmd *io_req; int task_idx, index; u16 xid; @@ -753,7 +882,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) spin_lock_bh(&tgt->tgt_lock); xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; if (xid >= BNX2FC_MAX_TASKS) { - printk(KERN_ALERT PFX "ERROR:xid out of range\n"); + printk(KERN_ERR PFX "ERROR:xid out of range\n"); spin_unlock_bh(&tgt->tgt_lock); return; } @@ -762,9 +891,9 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; task = &(task_page[index]); - num_rq = ((task->rx_wr_tx_rd.rx_flags & - FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >> - FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT); + num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & + FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >> + FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT); io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; @@ -777,22 +906,19 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) /* Timestamp IO completion time */ cmd_type = io_req->cmd_type; - /* optimized completion path */ - if (cmd_type == BNX2FC_SCSI_CMD) { - rx_state = ((task->rx_wr_tx_rd.rx_flags & - FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >> - FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT); + rx_state = ((task->rxwr_txrd.var_ctx.rx_flags & + FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >> + FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT); + /* Process other IO completion types */ + switch (cmd_type) { + case BNX2FC_SCSI_CMD: if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); spin_unlock_bh(&tgt->tgt_lock); return; } - } - /* Process other IO completion types */ - switch (cmd_type) { - case BNX2FC_SCSI_CMD: if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) bnx2fc_process_abts_compl(io_req, task, num_rq); else if (rx_state == @@ -819,8 +945,16 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) break; case BNX2FC_ELS: - BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n"); - bnx2fc_process_els_compl(io_req, task, num_rq); + if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) + bnx2fc_process_els_compl(io_req, task, num_rq); + else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) + bnx2fc_process_abts_compl(io_req, task, num_rq); + else if (rx_state == + FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) + bnx2fc_process_cleanup_compl(io_req, task, num_rq); + else + printk(KERN_ERR PFX "Invalid rx state = %d\n", + rx_state); break; case BNX2FC_CLEANUP: @@ -828,6 +962,13 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) kref_put(&io_req->refcount, bnx2fc_cmd_release); break; + case BNX2FC_SEQ_CLEANUP: + BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n", + io_req->xid); + bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + break; + default: printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); break; @@ -835,6 +976,20 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) spin_unlock_bh(&tgt->tgt_lock); } +void bnx2fc_arm_cq(struct bnx2fc_rport *tgt) +{ + struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; + u32 msg; + + wmb(); + rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit << + FCOE_CQE_TOGGLE_BIT_SHIFT); + msg = *((u32 *)rx_db); + writel(cpu_to_le32(msg), tgt->ctx_base); + mmiowb(); + +} + struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) { struct bnx2fc_work *work; @@ -853,8 +1008,8 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) struct fcoe_cqe *cq; u32 cq_cons; struct fcoe_cqe *cqe; + u32 num_free_sqes = 0; u16 wqe; - bool more_cqes_found = false; /* * cq_lock is a low contention lock used to protect @@ -872,62 +1027,53 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) cq_cons = tgt->cq_cons_idx; cqe = &cq[cq_cons]; - do { - more_cqes_found ^= true; - - while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == - (tgt->cq_curr_toggle_bit << - FCOE_CQE_TOGGLE_BIT_SHIFT)) { + while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == + (tgt->cq_curr_toggle_bit << + FCOE_CQE_TOGGLE_BIT_SHIFT)) { - /* new entry on the cq */ - if (wqe & FCOE_CQE_CQE_TYPE) { - /* Unsolicited event notification */ - bnx2fc_process_unsol_compl(tgt, wqe); - } else { - struct bnx2fc_work *work = NULL; - struct bnx2fc_percpu_s *fps = NULL; - unsigned int cpu = wqe % num_possible_cpus(); - - fps = &per_cpu(bnx2fc_percpu, cpu); - spin_lock_bh(&fps->fp_work_lock); - if (unlikely(!fps->iothread)) - goto unlock; - - work = bnx2fc_alloc_work(tgt, wqe); - if (work) - list_add_tail(&work->list, - &fps->work_list); + /* new entry on the cq */ + if (wqe & FCOE_CQE_CQE_TYPE) { + /* Unsolicited event notification */ + bnx2fc_process_unsol_compl(tgt, wqe); + } else { + /* Pending work request completion */ + struct bnx2fc_work *work = NULL; + struct bnx2fc_percpu_s *fps = NULL; + unsigned int cpu = wqe % num_possible_cpus(); + + fps = &per_cpu(bnx2fc_percpu, cpu); + spin_lock_bh(&fps->fp_work_lock); + if (unlikely(!fps->iothread)) + goto unlock; + + work = bnx2fc_alloc_work(tgt, wqe); + if (work) + list_add_tail(&work->list, + &fps->work_list); unlock: - spin_unlock_bh(&fps->fp_work_lock); + spin_unlock_bh(&fps->fp_work_lock); - /* Pending work request completion */ - if (fps->iothread && work) - wake_up_process(fps->iothread); - else - bnx2fc_process_cq_compl(tgt, wqe); - } - cqe++; - tgt->cq_cons_idx++; - - if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { - tgt->cq_cons_idx = 0; - cqe = cq; - tgt->cq_curr_toggle_bit = - 1 - tgt->cq_curr_toggle_bit; - } + /* Pending work request completion */ + if (fps->iothread && work) + wake_up_process(fps->iothread); + else + bnx2fc_process_cq_compl(tgt, wqe); } - /* Re-arm CQ */ - if (more_cqes_found) { - tgt->conn_db->cq_arm.lo = -1; - wmb(); + cqe++; + tgt->cq_cons_idx++; + num_free_sqes++; + + if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { + tgt->cq_cons_idx = 0; + cqe = cq; + tgt->cq_curr_toggle_bit = + 1 - tgt->cq_curr_toggle_bit; } - } while (more_cqes_found); - - /* - * Commit tgt->cq_cons_idx change to the memory - * spin_lock implies full memory barrier, no need to smp_wmb - */ - + } + if (num_free_sqes) { + bnx2fc_arm_cq(tgt); + atomic_add(num_free_sqes, &tgt->free_sqes); + } spin_unlock_bh(&tgt->cq_lock); return 0; } @@ -947,7 +1093,7 @@ static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { - printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id); + printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id); return; } @@ -968,6 +1114,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, { struct bnx2fc_rport *tgt; struct fcoe_port *port; + struct bnx2fc_interface *interface; u32 conn_id; u32 context_id; int rc; @@ -982,8 +1129,9 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", ofld_kcqe->fcoe_conn_context_id); port = tgt->port; - if (hba != tgt->port->priv) { - printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n"); + interface = tgt->port->priv; + if (hba != interface->hba) { + printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n"); goto ofld_cmpl_err; } /* @@ -1004,7 +1152,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, /* now enable the session */ rc = bnx2fc_send_session_enable_req(port, tgt); if (rc) { - printk(KERN_ALERT PFX "enable session failed\n"); + printk(KERN_ERR PFX "enable session failed\n"); goto ofld_cmpl_err; } } @@ -1027,6 +1175,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *ofld_kcqe) { struct bnx2fc_rport *tgt; + struct bnx2fc_interface *interface; u32 conn_id; u32 context_id; @@ -1034,7 +1183,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, conn_id = ofld_kcqe->fcoe_conn_id; tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { - printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n"); + printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n"); return; } @@ -1046,16 +1195,17 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, * and enable */ if (tgt->context_id != context_id) { - printk(KERN_ALERT PFX "context id mis-match\n"); + printk(KERN_ERR PFX "context id mis-match\n"); return; } - if (hba != tgt->port->priv) { - printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); + interface = tgt->port->priv; + if (hba != interface->hba) { + printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); goto enbl_cmpl_err; } - if (ofld_kcqe->completion_status) { + if (ofld_kcqe->completion_status) goto enbl_cmpl_err; - } else { + else { /* enable successful - rport ready for issuing IOs */ set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); @@ -1078,14 +1228,14 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba, conn_id = disable_kcqe->fcoe_conn_id; tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { - printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n"); + printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n"); return; } BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id); if (disable_kcqe->completion_status) { - printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n", + printk(KERN_ERR PFX "Disable failed with cmpl status %d\n", disable_kcqe->completion_status); return; } else { @@ -1107,14 +1257,14 @@ static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, conn_id = destroy_kcqe->fcoe_conn_id; tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { - printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n"); + printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n"); return; } BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id); if (destroy_kcqe->completion_status) { - printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n", + printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n", destroy_kcqe->completion_status); return; } else { @@ -1141,7 +1291,12 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code) case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: printk(KERN_ERR PFX "init_failure due to NIC error\n"); break; - + case FCOE_KCQE_COMPLETION_STATUS_ERROR: + printk(KERN_ERR PFX "init failure due to compl status err\n"); + break; + case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION: + printk(KERN_ERR PFX "init failure due to HSI mismatch\n"); + break; default: printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); } @@ -1200,7 +1355,7 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], } else { printk(KERN_ERR PFX "DESTROY success\n"); } - hba->flags |= BNX2FC_FLAG_DESTROY_CMPL; + set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); wake_up_interruptible(&hba->destroy_wait); break; @@ -1222,7 +1377,7 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], case FCOE_KCQE_OPCODE_FCOE_ERROR: /* fall thru */ default: - printk(KERN_ALERT PFX "unknown opcode 0x%x\n", + printk(KERN_ERR PFX "unknown opcode 0x%x\n", kcqe->op_code); } } @@ -1247,21 +1402,14 @@ void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid) void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) { - struct b577xx_doorbell_set_prod ev_doorbell; + struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; u32 msg; wmb(); - - memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod)); - ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE; - - ev_doorbell.prod = tgt->sq_prod_idx | + sq_db->prod = tgt->sq_prod_idx | (tgt->sq_curr_toggle_bit << 15); - ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE << - B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT; - msg = *((u32 *)&ev_doorbell); + msg = *((u32 *)sq_db); writel(cpu_to_le32(msg), tgt->ctx_base); - mmiowb(); } @@ -1272,7 +1420,8 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) struct fcoe_port *port = tgt->port; u32 reg_off; resource_size_t reg_base; - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; reg_base = pci_resource_start(hba->pcidev, BNX2X_DOORBELL_PCI_BAR); @@ -1311,6 +1460,96 @@ void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items) tgt->conn_db->rq_prod = tgt->rq_prod_idx; } +void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req, + struct fcoe_task_ctx_entry *task, + struct bnx2fc_cmd *orig_io_req, + u32 offset) +{ + struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd; + struct bnx2fc_rport *tgt = seq_clnp_req->tgt; + struct bnx2fc_interface *interface = tgt->port->priv; + struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl; + struct fcoe_task_ctx_entry *orig_task; + struct fcoe_task_ctx_entry *task_page; + struct fcoe_ext_mul_sges_ctx *sgl; + u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP; + u8 orig_task_type; + u16 orig_xid = orig_io_req->xid; + u32 context_id = tgt->context_id; + u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma; + u32 orig_offset = offset; + int bd_count; + int orig_task_idx, index; + int i; + + memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); + + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) + orig_task_type = FCOE_TASK_TYPE_WRITE; + else + orig_task_type = FCOE_TASK_TYPE_READ; + + /* Tx flags */ + task->txwr_rxrd.const_ctx.tx_flags = + FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP << + FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; + /* init flags */ + task->txwr_rxrd.const_ctx.init_flags = task_type << + FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; + task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; + task->rxwr_txrd.const_ctx.init_flags = context_id << + FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; + task->rxwr_txrd.const_ctx.init_flags = context_id << + FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; + + task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; + + task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0; + task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset; + + bd_count = orig_io_req->bd_tbl->bd_valid; + + /* obtain the appropriate bd entry from relative offset */ + for (i = 0; i < bd_count; i++) { + if (offset < bd[i].buf_len) + break; + offset -= bd[i].buf_len; + } + phys_addr += (i * sizeof(struct fcoe_bd_ctx)); + + if (orig_task_type == FCOE_TASK_TYPE_WRITE) { + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = + (u32)phys_addr; + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = + (u32)((u64)phys_addr >> 32); + task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = + bd_count; + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off = + offset; /* adjusted offset */ + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i; + } else { + orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE; + index = orig_xid % BNX2FC_TASKS_PER_PAGE; + + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[orig_task_idx]; + orig_task = &(task_page[index]); + + /* Multiple SGEs were used for this IO */ + sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; + sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr; + sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32); + sgl->mul_sgl.sgl_size = bd_count; + sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */ + sgl->mul_sgl.cur_sge_idx = i; + + memset(&task->rxwr_only.rx_seq_ctx, 0, + sizeof(struct fcoe_rx_seq_ctx)); + task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset; + task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset; + } +} void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u16 orig_xid) @@ -1322,18 +1561,31 @@ void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); /* Tx Write Rx Read */ - task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << - FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; - task->tx_wr_rx_rd.init_flags = task_type << - FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; - task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << - FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; - /* Common */ - task->cmn.common_flags = context_id << - FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; - task->cmn.general.cleanup_info.task_id = orig_xid; - - + /* init flags */ + task->txwr_rxrd.const_ctx.init_flags = task_type << + FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; + task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; + if (tgt->dev_type == TYPE_TAPE) + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_TAPE << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + else + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_DISK << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; + + /* Tx flags */ + task->txwr_rxrd.const_ctx.tx_flags = + FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << + FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; + + /* Rx Read Tx Write */ + task->rxwr_txrd.const_ctx.init_flags = context_id << + FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; + task->rxwr_txrd.var_ctx.rx_flags |= 1 << + FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; } void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, @@ -1342,6 +1594,7 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); struct bnx2fc_rport *tgt = io_req->tgt; struct fc_frame_header *fc_hdr; + struct fcoe_ext_mul_sges_ctx *sgl; u8 task_type = 0; u64 *hdr; u64 temp_hdr[3]; @@ -1367,47 +1620,54 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, /* Tx only */ if ((task_type == FCOE_TASK_TYPE_MIDPATH) || (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { - task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_req_bd_dma; - task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = (u32)((u64)mp_req->mp_req_bd_dma >> 32); - task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1; - BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n", - (unsigned long long)mp_req->mp_req_bd_dma); + task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1; } /* Tx Write Rx Read */ - task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT << - FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; - task->tx_wr_rx_rd.init_flags = task_type << - FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; - task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK << - FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT; - task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << - FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; - - /* Common */ - task->cmn.data_2_trns = io_req->data_xfer_len; - context_id = tgt->context_id; - task->cmn.common_flags = context_id << - FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; - task->cmn.common_flags |= 1 << - FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT; - task->cmn.common_flags |= 1 << - FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT; + /* init flags */ + task->txwr_rxrd.const_ctx.init_flags = task_type << + FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; + if (tgt->dev_type == TYPE_TAPE) + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_TAPE << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + else + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_DISK << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; + + /* tx flags */ + task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT << + FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; /* Rx Write Tx Read */ + task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; + + /* rx flags */ + task->rxwr_txrd.var_ctx.rx_flags |= 1 << + FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; + + context_id = tgt->context_id; + task->rxwr_txrd.const_ctx.init_flags = context_id << + FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; + fc_hdr = &(mp_req->req_fc_hdr); if (task_type == FCOE_TASK_TYPE_MIDPATH) { fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); fc_hdr->fh_rx_id = htons(0xffff); - task->rx_wr_tx_rd.rx_id = 0xffff; + task->rxwr_txrd.var_ctx.rx_id = 0xffff; } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); } /* Fill FC Header into middle path buffer */ - hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; + hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr; memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); hdr[0] = cpu_to_be64(temp_hdr[0]); hdr[1] = cpu_to_be64(temp_hdr[1]); @@ -1415,12 +1675,12 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, /* Rx Only */ if (task_type == FCOE_TASK_TYPE_MIDPATH) { + sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; - task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = - (u32)mp_req->mp_resp_bd_dma; - task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = + sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma; + sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)mp_req->mp_resp_bd_dma >> 32); - task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1; + sgl->mul_sgl.sgl_size = 1; } } @@ -1431,6 +1691,9 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req, struct scsi_cmnd *sc_cmd = io_req->sc_cmd; struct io_bdt *bd_tbl = io_req->bd_tbl; struct bnx2fc_rport *tgt = io_req->tgt; + struct fcoe_cached_sge_ctx *cached_sge; + struct fcoe_ext_mul_sges_ctx *sgl; + int dev_type = tgt->dev_type; u64 *fcp_cmnd; u64 tmp_fcp_cmnd[4]; u32 context_id; @@ -1448,48 +1711,54 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req, task_type = FCOE_TASK_TYPE_READ; /* Tx only */ + bd_count = bd_tbl->bd_valid; if (task_type == FCOE_TASK_TYPE_WRITE) { - task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = - (u32)bd_tbl->bd_tbl_dma; - task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = - (u32)((u64)bd_tbl->bd_tbl_dma >> 32); - task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = - bd_tbl->bd_valid; + if ((dev_type == TYPE_DISK) && (bd_count == 1)) { + struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; + + task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo = + fcoe_bd_tbl->buf_addr_lo; + task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi = + fcoe_bd_tbl->buf_addr_hi; + task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem = + fcoe_bd_tbl->buf_len; + + task->txwr_rxrd.const_ctx.init_flags |= 1 << + FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; + } else { + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = + (u32)bd_tbl->bd_tbl_dma; + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = + (u32)((u64)bd_tbl->bd_tbl_dma >> 32); + task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = + bd_tbl->bd_valid; + } } /*Tx Write Rx Read */ /* Init state to NORMAL */ - task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL << - FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; - task->tx_wr_rx_rd.init_flags = task_type << - FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; - task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK << - FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT; - task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << - FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; - - /* Common */ - task->cmn.data_2_trns = io_req->data_xfer_len; - context_id = tgt->context_id; - task->cmn.common_flags = context_id << - FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; - task->cmn.common_flags |= 1 << - FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT; - task->cmn.common_flags |= 1 << - FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT; - - /* Set initiative ownership */ - task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT; + task->txwr_rxrd.const_ctx.init_flags |= task_type << + FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; + if (dev_type == TYPE_TAPE) + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_TAPE << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + else + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_DISK << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; + /* tx flags */ + task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL << + FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; /* Set initial seq counter */ - task->cmn.tx_low_seq_cnt = 1; - - /* Set state to "waiting for the first packet" */ - task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME; + task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1; /* Fill FCP_CMND IU */ fcp_cmnd = (u64 *) - task->cmn.general.cmd_info.fcp_cmd_payload.opaque; + task->txwr_rxrd.union_ctx.fcp_cmd.opaque; bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); /* swap fcp_cmnd */ @@ -1501,33 +1770,61 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req, } /* Rx Write Tx Read */ - task->rx_wr_tx_rd.rx_id = 0xffff; + task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; - /* Rx Only */ - if (task_type == FCOE_TASK_TYPE_READ) { + context_id = tgt->context_id; + task->rxwr_txrd.const_ctx.init_flags = context_id << + FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; + + /* rx flags */ + /* Set state to "waiting for the first packet" */ + task->rxwr_txrd.var_ctx.rx_flags |= 1 << + FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; + + task->rxwr_txrd.var_ctx.rx_id = 0xffff; - bd_count = bd_tbl->bd_valid; + /* Rx Only */ + cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge; + sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; + bd_count = bd_tbl->bd_valid; + if (task_type == FCOE_TASK_TYPE_READ && + dev_type == TYPE_DISK) { if (bd_count == 1) { struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; - task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo = - fcoe_bd_tbl->buf_addr_lo; - task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi = - fcoe_bd_tbl->buf_addr_hi; - task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem = - fcoe_bd_tbl->buf_len; - task->tx_wr_rx_rd.init_flags |= 1 << - FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT; + cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; + cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; + cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; + task->txwr_rxrd.const_ctx.init_flags |= 1 << + FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; + } else if (bd_count == 2) { + struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; + + cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; + cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; + cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; + + fcoe_bd_tbl++; + cached_sge->second_buf_addr.lo = + fcoe_bd_tbl->buf_addr_lo; + cached_sge->second_buf_addr.hi = + fcoe_bd_tbl->buf_addr_hi; + cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len; + task->txwr_rxrd.const_ctx.init_flags |= 1 << + FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; } else { - task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = - (u32)bd_tbl->bd_tbl_dma; - task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = + sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; + sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)bd_tbl->bd_tbl_dma >> 32); - task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = - bd_tbl->bd_valid; + sgl->mul_sgl.sgl_size = bd_count; } + } else { + sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; + sgl->mul_sgl.cur_sge_addr.hi = + (u32)((u64)bd_tbl->bd_tbl_dma >> 32); + sgl->mul_sgl.sgl_size = bd_count; } } diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index b5b5c346d77..6cc3789075b 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1,7 +1,7 @@ /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. * IO manager and SCSI IO processing. * - * Copyright (c) 2008 - 2010 Broadcom Corporation + * Copyright (c) 2008 - 2011 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -18,8 +18,6 @@ static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, int bd_index); static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); -static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, - struct bnx2fc_cmd *io_req); static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, @@ -29,10 +27,11 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, unsigned int timer_msec) { - struct bnx2fc_hba *hba = io_req->port->priv; + struct bnx2fc_interface *interface = io_req->port->priv; - if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work, - msecs_to_jiffies(timer_msec))) + if (queue_delayed_work(interface->timer_work_queue, + &io_req->timeout_work, + msecs_to_jiffies(timer_msec))) kref_get(&io_req->refcount); } @@ -217,6 +216,11 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) return; BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); + if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) { + /* Do not call scsi done for this IO */ + return; + } + bnx2fc_unmap_sg_list(io_req); io_req->sc_cmd = NULL; if (!sc_cmd) { @@ -419,12 +423,13 @@ free_cmgr: struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) { struct fcoe_port *port = tgt->port; - struct bnx2fc_hba *hba = port->priv; - struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; struct bnx2fc_cmd *io_req; struct list_head *listp; struct io_bdt *bd_tbl; int index = RESERVE_FREE_LIST_INDEX; + u32 free_sqes; u32 max_sqes; u16 xid; @@ -445,8 +450,10 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) * cmgr lock */ spin_lock_bh(&cmd_mgr->free_list_lock[index]); + free_sqes = atomic_read(&tgt->free_sqes); if ((list_empty(&(cmd_mgr->free_list[index]))) || - (tgt->num_active_ios.counter >= max_sqes)) { + (tgt->num_active_ios.counter >= max_sqes) || + (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " "ios(%d):sqes(%d)\n", tgt->num_active_ios.counter, tgt->max_sqes); @@ -463,6 +470,7 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) xid = io_req->xid; cmd_mgr->cmds[xid] = io_req; atomic_inc(&tgt->num_active_ios); + atomic_dec(&tgt->free_sqes); spin_unlock_bh(&cmd_mgr->free_list_lock[index]); INIT_LIST_HEAD(&io_req->link); @@ -481,14 +489,16 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) kref_init(&io_req->refcount); return io_req; } -static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) + +struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) { struct fcoe_port *port = tgt->port; - struct bnx2fc_hba *hba = port->priv; - struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; struct bnx2fc_cmd *io_req; struct list_head *listp; struct io_bdt *bd_tbl; + u32 free_sqes; u32 max_sqes; u16 xid; int index = get_cpu(); @@ -499,8 +509,10 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) * cmgr lock */ spin_lock_bh(&cmd_mgr->free_list_lock[index]); + free_sqes = atomic_read(&tgt->free_sqes); if ((list_empty(&cmd_mgr->free_list[index])) || - (tgt->num_active_ios.counter >= max_sqes)) { + (tgt->num_active_ios.counter >= max_sqes) || + (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { spin_unlock_bh(&cmd_mgr->free_list_lock[index]); put_cpu(); return NULL; @@ -513,6 +525,7 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) xid = io_req->xid; cmd_mgr->cmds[xid] = io_req; atomic_inc(&tgt->num_active_ios); + atomic_dec(&tgt->free_sqes); spin_unlock_bh(&cmd_mgr->free_list_lock[index]); put_cpu(); @@ -562,7 +575,8 @@ void bnx2fc_cmd_release(struct kref *ref) static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) { struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); - struct bnx2fc_hba *hba = io_req->port->priv; + struct bnx2fc_interface *interface = io_req->port->priv; + struct bnx2fc_hba *hba = interface->hba; size_t sz = sizeof(struct fcoe_bd_ctx); /* clear tm flags */ @@ -598,7 +612,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) struct bnx2fc_mp_req *mp_req; struct fcoe_bd_ctx *mp_req_bd; struct fcoe_bd_ctx *mp_resp_bd; - struct bnx2fc_hba *hba = io_req->port->priv; + struct bnx2fc_interface *interface = io_req->port->priv; + struct bnx2fc_hba *hba = interface->hba; dma_addr_t addr; size_t sz; @@ -674,7 +689,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); struct fc_rport_libfc_priv *rp = rport->dd_data; struct fcoe_port *port; - struct bnx2fc_hba *hba; + struct bnx2fc_interface *interface; struct bnx2fc_rport *tgt; struct bnx2fc_cmd *io_req; struct bnx2fc_mp_req *tm_req; @@ -691,10 +706,10 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) lport = shost_priv(host); port = lport_priv(lport); - hba = port->priv; + interface = port->priv; if (rport == NULL) { - printk(KERN_ALERT PFX "device_reset: rport is NULL\n"); + printk(KERN_ERR PFX "device_reset: rport is NULL\n"); rc = FAILED; goto tmf_err; } @@ -737,7 +752,9 @@ retry_tmf: rc = bnx2fc_init_mp_req(io_req); if (rc == FAILED) { printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); + spin_lock_bh(&tgt->tgt_lock); kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); goto tmf_err; } @@ -766,7 +783,8 @@ retry_tmf: index = xid % BNX2FC_TASKS_PER_PAGE; /* Initialize task context for this IO request */ - task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; task = &(task_page[index]); bnx2fc_init_mp_task(io_req, task); @@ -798,10 +816,10 @@ retry_tmf: spin_unlock_bh(&tgt->tgt_lock); if (!rc) { - printk(KERN_ERR PFX "task mgmt command failed...\n"); + BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n"); rc = FAILED; } else { - printk(KERN_ERR PFX "task mgmt command success...\n"); + BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n"); rc = SUCCESS; } tmf_err: @@ -814,7 +832,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) struct bnx2fc_rport *tgt = io_req->tgt; struct fc_rport *rport = tgt->rport; struct fc_rport_priv *rdata = tgt->rdata; - struct bnx2fc_hba *hba; + struct bnx2fc_interface *interface; struct fcoe_port *port; struct bnx2fc_cmd *abts_io_req; struct fcoe_task_ctx_entry *task; @@ -831,7 +849,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); port = io_req->port; - hba = port->priv; + interface = port->priv; lport = port->lport; if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { @@ -841,7 +859,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) } if (rport == NULL) { - printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n"); + printk(KERN_ERR PFX "initiate_abts: rport is NULL\n"); rc = FAILED; goto abts_err; } @@ -873,7 +891,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) /* Obtain oxid and rxid for the original exchange to be aborted */ fc_hdr->fh_ox_id = htons(io_req->xid); - fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id); + fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id); sid = tgt->sid; did = rport->port_id; @@ -888,7 +906,8 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) index = xid % BNX2FC_TASKS_PER_PAGE; /* Initialize task context for this IO request */ - task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; task = &(task_page[index]); bnx2fc_init_mp_task(abts_io_req, task); @@ -916,11 +935,81 @@ abts_err: return rc; } +int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, + enum fc_rctl r_ctl) +{ + struct fc_lport *lport; + struct bnx2fc_rport *tgt = orig_io_req->tgt; + struct bnx2fc_interface *interface; + struct fcoe_port *port; + struct bnx2fc_cmd *seq_clnp_req; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct bnx2fc_els_cb_arg *cb_arg = NULL; + int task_idx, index; + u16 xid; + int rc = 0; + + BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n", + orig_io_req->xid); + kref_get(&orig_io_req->refcount); + + port = orig_io_req->port; + interface = port->priv; + lport = port->lport; + + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n"); + rc = -ENOMEM; + goto cleanup_err; + } + + seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP); + if (!seq_clnp_req) { + printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); + rc = -ENOMEM; + kfree(cb_arg); + goto cleanup_err; + } + /* Initialize rest of io_req fields */ + seq_clnp_req->sc_cmd = NULL; + seq_clnp_req->port = port; + seq_clnp_req->tgt = tgt; + seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */ + + xid = seq_clnp_req->xid; + + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; + task = &(task_page[index]); + cb_arg->aborted_io_req = orig_io_req; + cb_arg->io_req = seq_clnp_req; + cb_arg->r_ctl = r_ctl; + cb_arg->offset = offset; + seq_clnp_req->cb_arg = cb_arg; + + printk(KERN_ERR PFX "call init_seq_cleanup_task\n"); + bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset); + + /* Obtain free SQ entry */ + bnx2fc_add_2_sq(tgt, xid); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); +cleanup_err: + return rc; +} + int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) { struct fc_lport *lport; struct bnx2fc_rport *tgt = io_req->tgt; - struct bnx2fc_hba *hba; + struct bnx2fc_interface *interface; struct fcoe_port *port; struct bnx2fc_cmd *cleanup_io_req; struct fcoe_task_ctx_entry *task; @@ -933,7 +1022,7 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); port = io_req->port; - hba = port->priv; + interface = port->priv; lport = port->lport; cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); @@ -955,7 +1044,8 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) index = xid % BNX2FC_TASKS_PER_PAGE; /* Initialize task context for this IO request */ - task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; task = &(task_page[index]); orig_xid = io_req->xid; @@ -1023,7 +1113,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) lport = shost_priv(sc_cmd->device->host); if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { - printk(KERN_ALERT PFX "eh_abort: link not ready\n"); + printk(KERN_ERR PFX "eh_abort: link not ready\n"); return rc; } @@ -1054,7 +1144,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) * io_req is no longer in the active_q. */ if (tgt->flush_in_prog) { - printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " + printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " "flush in progress\n", io_req->xid); kref_put(&io_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); @@ -1062,7 +1152,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) } if (io_req->on_active_queue == 0) { - printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " + printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " "not on active_q\n", io_req->xid); /* * This condition can happen only due to the FW bug, @@ -1100,7 +1190,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); rc = bnx2fc_initiate_abts(io_req); } else { - printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " + printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " "already in abts processing\n", io_req->xid); kref_put(&io_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); @@ -1141,6 +1231,42 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) return rc; } +void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req, + struct fcoe_task_ctx_entry *task, + u8 rx_state) +{ + struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg; + struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req; + u32 offset = cb_arg->offset; + enum fc_rctl r_ctl = cb_arg->r_ctl; + int rc = 0; + struct bnx2fc_rport *tgt = orig_io_req->tgt; + + BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x" + "cmd_type = %d\n", + seq_clnp_req->xid, seq_clnp_req->cmd_type); + + if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) { + printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n", + seq_clnp_req->xid); + goto free_cb_arg; + } + kref_get(&orig_io_req->refcount); + + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); + spin_lock_bh(&tgt->tgt_lock); + + if (rc) + printk(KERN_ERR PFX "clnup_compl: Unable to send SRR" + " IO will abort\n"); + seq_clnp_req->cb_arg = NULL; + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); +free_cb_arg: + kfree(cb_arg); + return; +} + void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u8 num_rq) @@ -1189,7 +1315,7 @@ void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, kref_put(&io_req->refcount, bnx2fc_cmd_release); /* drop timer hold */ - r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl; + r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl; switch (r_ctl) { case FC_RCTL_BA_ACC: @@ -1344,12 +1470,13 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, fc_hdr = &(tm_req->resp_fc_hdr); hdr = (u64 *)fc_hdr; temp_hdr = (u64 *) - &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; + &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; hdr[0] = cpu_to_be64(temp_hdr[0]); hdr[1] = cpu_to_be64(temp_hdr[1]); hdr[2] = cpu_to_be64(temp_hdr[2]); - tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off; + tm_req->resp_len = + task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; rsp_buf = tm_req->resp_buf; @@ -1369,7 +1496,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, fc_hdr->fh_r_ctl); } if (!sc_cmd->SCp.ptr) { - printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n"); + printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n"); return; } switch (io_req->fcp_status) { @@ -1401,7 +1528,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, io_req->on_tmf_queue = 0; } else { - printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n"); + printk(KERN_ERR PFX "Command not on active_cmd_queue!\n"); return; } @@ -1588,7 +1715,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { /* Invalid sense sense length. */ - printk(KERN_ALERT PFX "invalid sns length %d\n", + printk(KERN_ERR PFX "invalid sns length %d\n", rq_buff_len); /* reset rq_buff_len */ rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; @@ -1724,7 +1851,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, /* Fetch fcp_rsp from task context and perform cmd completion */ fcp_rsp = (struct fcoe_fcp_rsp_payload *) - &(task->cmn.general.rsp_info.fcp_rsp.payload); + &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload); /* parse fcp_rsp and obtain sense data from RQ if available */ bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); @@ -1734,7 +1861,6 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, printk(KERN_ERR PFX "SCp.ptr is NULL\n"); return; } - io_req->sc_cmd = NULL; if (io_req->on_active_queue) { list_del_init(&io_req->link); @@ -1754,6 +1880,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, } bnx2fc_unmap_sg_list(io_req); + io_req->sc_cmd = NULL; switch (io_req->fcp_status) { case FC_GOOD: @@ -1771,7 +1898,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, scsi_set_resid(sc_cmd, io_req->fcp_resid); break; default: - printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n", + printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n", io_req->fcp_status); break; } @@ -1780,14 +1907,15 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, kref_put(&io_req->refcount, bnx2fc_cmd_release); } -static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, +int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_cmd *io_req) { struct fcoe_task_ctx_entry *task; struct fcoe_task_ctx_entry *task_page; struct scsi_cmnd *sc_cmd = io_req->sc_cmd; struct fcoe_port *port = tgt->port; - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; struct fc_lport *lport = port->lport; struct fcoe_dev_stats *stats; int task_idx, index; @@ -1845,7 +1973,8 @@ static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, } /* Time IO req */ - bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); + if (tgt->io_timeout) + bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); /* Obtain free SQ entry */ bnx2fc_add_2_sq(tgt, xid); diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index a2e3830bd26..d5311b577cc 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -2,7 +2,7 @@ * Handles operations such as session offload/upload etc, and manages * session resources such as connection id and qp resources. * - * Copyright (c) 2008 - 2010 Broadcom Corporation + * Copyright (c) 2008 - 2011 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -65,7 +65,8 @@ static void bnx2fc_offload_session(struct fcoe_port *port, { struct fc_lport *lport = rdata->local_port; struct fc_rport *rport = rdata->rport; - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; int rval; int i = 0; @@ -133,6 +134,8 @@ retry_ofld: /* upload will take care of cleaning up sess resc */ lport->tt.rport_logoff(rdata); } + /* Arm CQ */ + bnx2fc_arm_cq(tgt); return; ofld_err: @@ -235,7 +238,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) static void bnx2fc_upload_session(struct fcoe_port *port, struct bnx2fc_rport *tgt) { - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n", tgt->num_active_ios.counter); @@ -314,7 +318,10 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, { struct fc_rport *rport = rdata->rport; - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; + struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; tgt->rport = rport; tgt->rdata = rdata; @@ -335,6 +342,7 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, tgt->max_sqes = BNX2FC_SQ_WQES_MAX; tgt->max_rqes = BNX2FC_RQ_WQES_MAX; tgt->max_cqes = BNX2FC_CQ_WQES_MAX; + atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX); /* Initialize the toggle bit */ tgt->sq_curr_toggle_bit = 1; @@ -345,7 +353,25 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, tgt->rq_cons_idx = 0; atomic_set(&tgt->num_active_ios, 0); - tgt->work_time_slice = 2; + if (rdata->flags & FC_RP_FLAGS_RETRY) { + tgt->dev_type = TYPE_TAPE; + tgt->io_timeout = 0; /* use default ULP timeout */ + } else { + tgt->dev_type = TYPE_DISK; + tgt->io_timeout = BNX2FC_IO_TIMEOUT; + } + + /* initialize sq doorbell */ + sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE; + sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE << + B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT; + /* initialize rx doorbell */ + rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) | + (0x1 << B577XX_DOORBELL_HDR_DB_TYPE_SHIFT) | + (B577XX_FCOE_CONNECTION_TYPE << + B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT)); + rx_db->params = (0x2 << B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT) | + (0x3 << B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT); spin_lock_init(&tgt->tgt_lock); spin_lock_init(&tgt->cq_lock); @@ -377,7 +403,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport, enum fc_rport_event event) { struct fcoe_port *port = lport_priv(lport); - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; struct fc_rport *rport = rdata->rport; struct fc_rport_libfc_priv *rp; struct bnx2fc_rport *tgt; @@ -388,7 +415,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport, switch (event) { case RPORT_EV_READY: if (!rport) { - printk(KERN_ALERT PFX "rport is NULL: ERROR!\n"); + printk(KERN_ERR PFX "rport is NULL: ERROR!\n"); break; } @@ -400,7 +427,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport, * We should not come here, as lport will * take care of fabric login */ - printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n", + printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n", rdata->ids.port_id); break; } @@ -468,7 +495,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport, break; if (!rport) { - printk(KERN_ALERT PFX "%x - rport not created Yet!!\n", + printk(KERN_INFO PFX "%x - rport not created Yet!!\n", port_id); break; } @@ -522,7 +549,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport, struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, u32 port_id) { - struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; struct bnx2fc_rport *tgt; struct fc_rport_priv *rdata; int i; @@ -537,7 +565,7 @@ struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, "obtained\n"); return tgt; } else { - printk(KERN_ERR PFX "rport 0x%x " + BNX2FC_TGT_DBG(tgt, "rport 0x%x " "is in DELETED state\n", rdata->ids.port_id); return NULL; @@ -618,7 +646,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, &tgt->sq_dma, GFP_KERNEL); if (!tgt->sq) { - printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n", + printk(KERN_ERR PFX "unable to allocate SQ memory %d\n", tgt->sq_mem_size); goto mem_alloc_failure; } @@ -631,7 +659,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, &tgt->cq_dma, GFP_KERNEL); if (!tgt->cq) { - printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n", + printk(KERN_ERR PFX "unable to allocate CQ memory %d\n", tgt->cq_mem_size); goto mem_alloc_failure; } @@ -644,7 +672,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, &tgt->rq_dma, GFP_KERNEL); if (!tgt->rq) { - printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n", + printk(KERN_ERR PFX "unable to allocate RQ memory %d\n", tgt->rq_mem_size); goto mem_alloc_failure; } @@ -656,7 +684,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, &tgt->rq_pbl_dma, GFP_KERNEL); if (!tgt->rq_pbl) { - printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n", + printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n", tgt->rq_pbl_size); goto mem_alloc_failure; } @@ -682,7 +710,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, &tgt->xferq_dma, GFP_KERNEL); if (!tgt->xferq) { - printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n", + printk(KERN_ERR PFX "unable to allocate XFERQ %d\n", tgt->xferq_mem_size); goto mem_alloc_failure; } @@ -696,7 +724,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, &tgt->confq_dma, GFP_KERNEL); if (!tgt->confq) { - printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n", + printk(KERN_ERR PFX "unable to allocate CONFQ %d\n", tgt->confq_mem_size); goto mem_alloc_failure; } @@ -711,7 +739,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->confq_pbl_size, &tgt->confq_pbl_dma, GFP_KERNEL); if (!tgt->confq_pbl) { - printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n", + printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n", tgt->confq_pbl_size); goto mem_alloc_failure; } @@ -736,7 +764,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, tgt->conn_db_mem_size, &tgt->conn_db_dma, GFP_KERNEL); if (!tgt->conn_db) { - printk(KERN_ALERT PFX "unable to allocate conn_db %d\n", + printk(KERN_ERR PFX "unable to allocate conn_db %d\n", tgt->conn_db_mem_size); goto mem_alloc_failure; } @@ -752,14 +780,12 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, &tgt->lcq_dma, GFP_KERNEL); if (!tgt->lcq) { - printk(KERN_ALERT PFX "unable to allocate lcq %d\n", + printk(KERN_ERR PFX "unable to allocate lcq %d\n", tgt->lcq_mem_size); goto mem_alloc_failure; } memset(tgt->lcq, 0, tgt->lcq_mem_size); - /* Arm CQ */ - tgt->conn_db->cq_arm.lo = -1; tgt->conn_db->rq_prod = 0x8000; return 0; @@ -787,6 +813,8 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, iounmap(tgt->ctx_base); tgt->ctx_base = NULL; } + + spin_lock_bh(&tgt->cq_lock); /* Free LCQ */ if (tgt->lcq) { dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, @@ -828,17 +856,16 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, tgt->rq = NULL; } /* Free CQ */ - spin_lock_bh(&tgt->cq_lock); if (tgt->cq) { dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, tgt->cq, tgt->cq_dma); tgt->cq = NULL; } - spin_unlock_bh(&tgt->cq_lock); /* Free SQ */ if (tgt->sq) { dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, tgt->sq, tgt->sq_dma); tgt->sq = NULL; } + spin_unlock_bh(&tgt->cq_lock); } diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h index 30e6bdbd65a..57515f1f169 100644 --- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h +++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h @@ -1,6 +1,6 @@ /* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI * - * Copyright (c) 2006 - 2010 Broadcom Corporation + * Copyright (c) 2006 - 2011 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -125,7 +125,7 @@ /* SQ/RQ/CQ DB structure sizes */ #define ISCSI_SQ_DB_SIZE (16) -#define ISCSI_RQ_DB_SIZE (16) +#define ISCSI_RQ_DB_SIZE (64) #define ISCSI_CQ_DB_SIZE (80) #define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h index dad6c8a3431..72118db89a2 100644 --- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h +++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h @@ -1,6 +1,6 @@ /* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI. * - * Copyright (c) 2006 - 2010 Broadcom Corporation + * Copyright (c) 2006 - 2011 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -707,8 +707,10 @@ struct iscsi_kwqe_conn_update { #define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 -#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4) -#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4 +#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4) +#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4 +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6) +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6 #elif defined(__LITTLE_ENDIAN) u8 conn_flags; #define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0) @@ -719,8 +721,10 @@ struct iscsi_kwqe_conn_update { #define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 -#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4) -#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4 +#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4) +#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4 +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6) +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6 u8 reserved2; u8 max_outstanding_r2ts; u8 session_error_recovery_level; diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index 6bdd25a93db..dc5700765db 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -1,6 +1,6 @@ /* bnx2i.h: Broadcom NetXtreme II iSCSI driver. * - * Copyright (c) 2006 - 2010 Broadcom Corporation + * Copyright (c) 2006 - 2011 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * @@ -22,11 +22,14 @@ #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/interrupt.h> +#include <linux/delay.h> #include <linux/sched.h> #include <linux/in.h> #include <linux/kfifo.h> #include <linux/netdevice.h> #include <linux/completion.h> +#include <linux/kthread.h> +#include <linux/cpu.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> @@ -202,10 +205,13 @@ struct io_bdt { /** * bnx2i_cmd - iscsi command structure * + * @hdr: iSCSI header + * @conn: iscsi_conn pointer * @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd * @sg: SG list * @io_tbl: buffer descriptor (BD) table * @bd_tbl_dma: buffer descriptor (BD) table's dma address + * @req: bnx2i specific command request struct */ struct bnx2i_cmd { struct iscsi_hdr hdr; @@ -229,6 +235,7 @@ struct bnx2i_cmd { * @gen_pdu: login/nopout/logout pdu resources * @violation_notified: bit mask used to track iscsi error/warning messages * already printed out + * @work_cnt: keeps track of the number of outstanding work * * iSCSI connection structure */ @@ -252,6 +259,8 @@ struct bnx2i_conn { */ struct generic_pdu_resc gen_pdu; u64 violation_notified; + + atomic_t work_cnt; }; @@ -478,7 +487,7 @@ struct bnx2i_5771x_cq_db { struct bnx2i_5771x_sq_rq_db { u16 prod_idx; - u8 reserved0[14]; /* Pad structure size to 16 bytes */ + u8 reserved0[62]; /* Pad structure size to 64 bytes */ }; @@ -661,7 +670,6 @@ enum { * @hba: adapter to which this connection belongs * @conn: iscsi connection this EP is linked to * @cls_ep: associated iSCSI endpoint pointer - * @sess: iscsi session this EP is linked to * @cm_sk: cnic sock struct * @hba_age: age to detect if 'iscsid' issues ep_disconnect() * after HBA reset is completed by bnx2i/cnic/bnx2 @@ -687,7 +695,7 @@ struct bnx2i_endpoint { u32 hba_age; u32 state; unsigned long timestamp; - int num_active_cmds; + atomic_t num_active_cmds; u32 ec_shift; struct qp_info qp; @@ -700,6 +708,19 @@ struct bnx2i_endpoint { }; +struct bnx2i_work { + struct list_head list; + struct iscsi_session *session; + struct bnx2i_conn *bnx2i_conn; + struct cqe cqe; +}; + +struct bnx2i_percpu_s { + struct task_struct *iothread; + struct list_head work_list; + spinlock_t p_work_lock; +}; + /* Global variables */ extern unsigned int error_mask1, error_mask2; @@ -783,7 +804,7 @@ extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list( struct bnx2i_hba *hba, u32 iscsi_cid); extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep); -extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action); +extern int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action); extern int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep); @@ -793,4 +814,8 @@ extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn); extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn); extern void bnx2i_print_recv_state(struct bnx2i_conn *conn); +extern int bnx2i_percpu_io_thread(void *arg); +extern int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe); #endif diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 5c54a2d9b83..9ae80cd5953 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -1,6 +1,6 @@ /* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver. * - * Copyright (c) 2006 - 2010 Broadcom Corporation + * Copyright (c) 2006 - 2011 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * @@ -17,6 +17,8 @@ #include <scsi/libiscsi.h> #include "bnx2i.h" +DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu); + /** * bnx2i_get_cid_num - get cid from ep * @ep: endpoint pointer @@ -131,16 +133,16 @@ static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code) * the driver. EQ event is generated CQ index is hit or at least 1 CQ is * outstanding and on chip timer expires */ -void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action) +int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action) { struct bnx2i_5771x_cq_db *cq_db; u16 cq_index; - u16 next_index; + u16 next_index = 0; u32 num_active_cmds; /* Coalesce CQ entries only on 10G devices */ if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) - return; + return 0; /* Do not update CQ DB multiple times before firmware writes * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious @@ -150,16 +152,17 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action) if (action != CNIC_ARM_CQE_FP) if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF) - return; + return 0; if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) { - num_active_cmds = ep->num_active_cmds; + num_active_cmds = atomic_read(&ep->num_active_cmds); if (num_active_cmds <= event_coal_min) next_index = 1; - else - next_index = event_coal_min + - ((num_active_cmds - event_coal_min) >> - ep->ec_shift); + else { + next_index = num_active_cmds >> ep->ec_shift; + if (next_index > num_active_cmds - event_coal_min) + next_index = num_active_cmds - event_coal_min; + } if (!next_index) next_index = 1; cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1; @@ -170,6 +173,7 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action) cq_db->sqn[0] = cq_index; } + return next_index; } @@ -265,7 +269,7 @@ static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count) struct bnx2i_5771x_sq_rq_db *sq_db; struct bnx2i_endpoint *ep = bnx2i_conn->ep; - ep->num_active_cmds++; + atomic_inc(&ep->num_active_cmds); wmb(); /* flush SQ WQE memory before the doorbell is rung */ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt; @@ -328,11 +332,11 @@ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn, { struct bnx2i_cmd *bnx2i_cmd; struct bnx2i_login_request *login_wqe; - struct iscsi_login *login_hdr; + struct iscsi_login_req *login_hdr; u32 dword; bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; - login_hdr = (struct iscsi_login *)task->hdr; + login_hdr = (struct iscsi_login_req *)task->hdr; login_wqe = (struct bnx2i_login_request *) bnx2i_conn->ep->qp.sq_prod_qe; @@ -430,7 +434,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn, default: tmfabort_wqe->ref_itt = RESERVED_ITT; } - memcpy(scsi_lun, tmfabort_hdr->lun, sizeof(struct scsi_lun)); + memcpy(scsi_lun, &tmfabort_hdr->lun, sizeof(struct scsi_lun)); tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]); tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]); @@ -547,7 +551,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, nopout_wqe->op_code = nopout_hdr->opcode; nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL; - memcpy(nopout_wqe->lun, nopout_hdr->lun, 8); + memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8); if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { u32 tmp = nopout_wqe->lun[0]; @@ -1331,24 +1335,25 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) /** * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion. - * @conn: iscsi connection + * @session: iscsi session + * @bnx2i_conn: bnx2i connection * @cqe: pointer to newly DMA'ed CQE entry for processing * * process SCSI CMD Response CQE & complete the request to SCSI-ML */ -static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, - struct bnx2i_conn *bnx2i_conn, - struct cqe *cqe) +int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) { struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; struct bnx2i_cmd_response *resp_cqe; struct bnx2i_cmd *bnx2i_cmd; struct iscsi_task *task; - struct iscsi_cmd_rsp *hdr; + struct iscsi_scsi_rsp *hdr; u32 datalen = 0; resp_cqe = (struct bnx2i_cmd_response *)cqe; - spin_lock(&session->lock); + spin_lock_bh(&session->lock); task = iscsi_itt_to_task(conn, resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX); if (!task) @@ -1371,7 +1376,7 @@ static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, } bnx2i_iscsi_unmap_sg_list(bnx2i_cmd); - hdr = (struct iscsi_cmd_rsp *)task->hdr; + hdr = (struct iscsi_scsi_rsp *)task->hdr; resp_cqe = (struct bnx2i_cmd_response *)cqe; hdr->opcode = resp_cqe->op_code; hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn); @@ -1409,7 +1414,7 @@ done: __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data, datalen); fail: - spin_unlock(&session->lock); + spin_unlock_bh(&session->lock); return 0; } @@ -1711,7 +1716,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session, hdr->flags = ISCSI_FLAG_CMD_FINAL; hdr->itt = task->hdr->itt; hdr->ttt = cpu_to_be32(nop_in->ttt); - memcpy(hdr->lun, nop_in->lun, 8); + memcpy(&hdr->lun, nop_in->lun, 8); } done: __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); @@ -1754,7 +1759,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session, resp_hdr->opcode = async_cqe->op_code; resp_hdr->flags = 0x80; - memcpy(resp_hdr->lun, async_cqe->lun, 8); + memcpy(&resp_hdr->lun, async_cqe->lun, 8); resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn); resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn); @@ -1836,21 +1841,136 @@ static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session, } +/** + * bnx2i_percpu_io_thread - thread per cpu for ios + * + * @arg: ptr to bnx2i_percpu_info structure + */ +int bnx2i_percpu_io_thread(void *arg) +{ + struct bnx2i_percpu_s *p = arg; + struct bnx2i_work *work, *tmp; + LIST_HEAD(work_list); + + set_user_nice(current, -20); + + while (!kthread_should_stop()) { + spin_lock_bh(&p->p_work_lock); + while (!list_empty(&p->work_list)) { + list_splice_init(&p->work_list, &work_list); + spin_unlock_bh(&p->p_work_lock); + + list_for_each_entry_safe(work, tmp, &work_list, list) { + list_del_init(&work->list); + /* work allocated in the bh, freed here */ + bnx2i_process_scsi_cmd_resp(work->session, + work->bnx2i_conn, + &work->cqe); + atomic_dec(&work->bnx2i_conn->work_cnt); + kfree(work); + } + spin_lock_bh(&p->p_work_lock); + } + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_bh(&p->p_work_lock); + schedule(); + } + __set_current_state(TASK_RUNNING); + + return 0; +} + + +/** + * bnx2i_queue_scsi_cmd_resp - queue cmd completion to the percpu thread + * @bnx2i_conn: bnx2i connection + * + * this function is called by generic KCQ handler to queue all pending cmd + * completion CQEs + * + * The implementation is to queue the cmd response based on the + * last recorded command for the given connection. The + * cpu_id gets recorded upon task_xmit. No out-of-order completion! + */ +static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct bnx2i_nop_in_msg *cqe) +{ + struct bnx2i_work *bnx2i_work = NULL; + struct bnx2i_percpu_s *p = NULL; + struct iscsi_task *task; + struct scsi_cmnd *sc; + int rc = 0; + int cpu; + + spin_lock(&session->lock); + task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data, + cqe->itt & ISCSI_CMD_RESPONSE_INDEX); + if (!task) { + spin_unlock(&session->lock); + return -EINVAL; + } + sc = task->sc; + spin_unlock(&session->lock); + + if (!blk_rq_cpu_valid(sc->request)) + cpu = smp_processor_id(); + else + cpu = sc->request->cpu; + + p = &per_cpu(bnx2i_percpu, cpu); + spin_lock(&p->p_work_lock); + if (unlikely(!p->iothread)) { + rc = -EINVAL; + goto err; + } + /* Alloc and copy to the cqe */ + bnx2i_work = kzalloc(sizeof(struct bnx2i_work), GFP_ATOMIC); + if (bnx2i_work) { + INIT_LIST_HEAD(&bnx2i_work->list); + bnx2i_work->session = session; + bnx2i_work->bnx2i_conn = bnx2i_conn; + memcpy(&bnx2i_work->cqe, cqe, sizeof(struct cqe)); + list_add_tail(&bnx2i_work->list, &p->work_list); + atomic_inc(&bnx2i_conn->work_cnt); + wake_up_process(p->iothread); + spin_unlock(&p->p_work_lock); + goto done; + } else + rc = -ENOMEM; +err: + spin_unlock(&p->p_work_lock); + bnx2i_process_scsi_cmd_resp(session, bnx2i_conn, (struct cqe *)cqe); +done: + return rc; +} + /** * bnx2i_process_new_cqes - process newly DMA'ed CQE's - * @bnx2i_conn: iscsi connection + * @bnx2i_conn: bnx2i connection * * this function is called by generic KCQ handler to process all pending CQE's */ -static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) +static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) { struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; - struct qp_info *qp = &bnx2i_conn->ep->qp; + struct qp_info *qp; struct bnx2i_nop_in_msg *nopin; int tgt_async_msg; + int cqe_cnt = 0; + + if (bnx2i_conn->ep == NULL) + return 0; + + qp = &bnx2i_conn->ep->qp; + if (!qp->cq_virt) { + printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!", + bnx2i_conn->hba->netdev->name); + goto out; + } while (1) { nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe; if (nopin->cq_req_sn != qp->cqe_exp_seq_sn) @@ -1873,8 +1993,9 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) switch (nopin->op_code) { case ISCSI_OP_SCSI_CMD_RSP: case ISCSI_OP_SCSI_DATA_IN: - bnx2i_process_scsi_cmd_resp(session, bnx2i_conn, - qp->cq_cons_qe); + /* Run the kthread engine only for data cmds + All other cmds will be completed in this bh! */ + bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin); break; case ISCSI_OP_LOGIN_RSP: bnx2i_process_login_resp(session, bnx2i_conn, @@ -1918,13 +2039,21 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", nopin->op_code); } - if (!tgt_async_msg) - bnx2i_conn->ep->num_active_cmds--; + if (!tgt_async_msg) { + if (!atomic_read(&bnx2i_conn->ep->num_active_cmds)) + printk(KERN_ALERT "bnx2i (%s): no active cmd! " + "op 0x%x\n", + bnx2i_conn->hba->netdev->name, + nopin->op_code); + else + atomic_dec(&bnx2i_conn->ep->num_active_cmds); + } cqe_out: /* clear out in production version only, till beta keep opcode * field intact, will be helpful in debugging (context dump) * nopin->op_code = 0; */ + cqe_cnt++; qp->cqe_exp_seq_sn++; if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1)) qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN; @@ -1937,6 +2066,8 @@ cqe_out: qp->cq_cons_idx++; } } +out: + return cqe_cnt; } /** @@ -1952,6 +2083,7 @@ static void bnx2i_fastpath_notification(struct bnx2i_hba *hba, { struct bnx2i_conn *bnx2i_conn; u32 iscsi_cid; + int nxt_idx; iscsi_cid = new_cqe_kcqe->iscsi_conn_id; bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); @@ -1964,9 +2096,12 @@ static void bnx2i_fastpath_notification(struct bnx2i_hba *hba, printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid); return; } + bnx2i_process_new_cqes(bnx2i_conn); - bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP); - bnx2i_process_new_cqes(bnx2i_conn); + nxt_idx = bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, + CNIC_ARM_CQE_FP); + if (nxt_idx && nxt_idx == bnx2i_process_new_cqes(bnx2i_conn)) + bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP); } @@ -2312,7 +2447,7 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba, printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid " "opcode\n", hba->netdev->name); else if (ofld_kcqe->completion_status == - ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY) + ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY) /* error status code valid only for 5771x chipset */ ep->state = EP_STATE_OFLD_FAILED_CID_BUSY; else @@ -2386,14 +2521,20 @@ static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[], * bnx2i_indicate_netevent - Generic netdev event handler * @context: adapter structure pointer * @event: event type + * @vlan_id: vlans id - associated vlan id with this event * * Handles four netdev events, NETDEV_UP, NETDEV_DOWN, * NETDEV_GOING_DOWN and NETDEV_CHANGE */ -static void bnx2i_indicate_netevent(void *context, unsigned long event) +static void bnx2i_indicate_netevent(void *context, unsigned long event, + u16 vlan_id) { struct bnx2i_hba *hba = context; + /* Ignore all netevent coming from vlans */ + if (vlan_id != 0) + return; + switch (event) { case NETDEV_UP: if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) @@ -2511,7 +2652,7 @@ static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk) static int bnx2i_send_nl_mesg(void *context, u32 msg_type, - char *buf, u16 buflen) + char *buf, u16 buflen) { struct bnx2i_hba *hba = context; int rc; diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 6adbdc34a9a..1a947f1b972 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -1,6 +1,6 @@ /* bnx2i.c: Broadcom NetXtreme II iSCSI driver. * - * Copyright (c) 2006 - 2010 Broadcom Corporation + * Copyright (c) 2006 - 2011 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * @@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); static u32 adapter_count; #define DRV_MODULE_NAME "bnx2i" -#define DRV_MODULE_VERSION "2.6.2.3" -#define DRV_MODULE_RELDATE "Dec 31, 2010" +#define DRV_MODULE_VERSION "2.7.0.3" +#define DRV_MODULE_RELDATE "Jun 15, 2011" static char version[] __devinitdata = "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ @@ -30,7 +30,7 @@ MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and " "Eddie Wai <eddie.wai@broadcom.com>"); MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711/57712" - " iSCSI Driver"); + "/57800/57810/57840 iSCSI Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); @@ -40,7 +40,7 @@ unsigned int event_coal_min = 24; module_param(event_coal_min, int, 0664); MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands"); -unsigned int event_coal_div = 1; +unsigned int event_coal_div = 2; module_param(event_coal_div, int, 0664); MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor"); @@ -66,6 +66,15 @@ MODULE_PARM_DESC(rq_size, "Configure RQ size"); u64 iscsi_error_mask = 0x00; +DEFINE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu); + +static int bnx2i_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu); +/* notification function for CPU hotplug events */ +static struct notifier_block bnx2i_cpu_notifier = { + .notifier_call = bnx2i_cpu_callback, +}; + /** * bnx2i_identify_device - identifies NetXtreme II device type @@ -88,11 +97,20 @@ void bnx2i_identify_device(struct bnx2i_hba *hba) (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) { set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type); hba->mail_queue_access = BNX2I_MQ_BIN_MODE; - } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 || - hba->pci_did == PCI_DEVICE_ID_NX2_57711 || - hba->pci_did == PCI_DEVICE_ID_NX2_57711E || - hba->pci_did == PCI_DEVICE_ID_NX2_57712 || - hba->pci_did == PCI_DEVICE_ID_NX2_57712E) + } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 || + hba->pci_did == PCI_DEVICE_ID_NX2_57711 || + hba->pci_did == PCI_DEVICE_ID_NX2_57711E || + hba->pci_did == PCI_DEVICE_ID_NX2_57712 || + hba->pci_did == PCI_DEVICE_ID_NX2_57712E || + hba->pci_did == PCI_DEVICE_ID_NX2_57800 || + hba->pci_did == PCI_DEVICE_ID_NX2_57800_MF || + hba->pci_did == PCI_DEVICE_ID_NX2_57800_VF || + hba->pci_did == PCI_DEVICE_ID_NX2_57810 || + hba->pci_did == PCI_DEVICE_ID_NX2_57810_MF || + hba->pci_did == PCI_DEVICE_ID_NX2_57810_VF || + hba->pci_did == PCI_DEVICE_ID_NX2_57840 || + hba->pci_did == PCI_DEVICE_ID_NX2_57840_MF || + hba->pci_did == PCI_DEVICE_ID_NX2_57840_VF) set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type); else printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n", @@ -163,21 +181,14 @@ void bnx2i_start(void *handle) struct bnx2i_hba *hba = handle; int i = HZ; - if (!hba->cnic->max_iscsi_conn) { - printk(KERN_ALERT "bnx2i: dev %s does not support " - "iSCSI\n", hba->netdev->name); + /* + * We should never register devices that don't support iSCSI + * (see bnx2i_init_one), so something is wrong if we try to + * start a iSCSI adapter on hardware with 0 supported iSCSI + * connections + */ + BUG_ON(!hba->cnic->max_iscsi_conn); - if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { - mutex_lock(&bnx2i_dev_lock); - list_del_init(&hba->link); - adapter_count--; - hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); - clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); - mutex_unlock(&bnx2i_dev_lock); - bnx2i_free_hba(hba); - } - return; - } bnx2i_send_fw_iscsi_init_msg(hba); while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) msleep(BNX2I_INIT_POLL_TIME); @@ -281,6 +292,13 @@ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic) int rc; mutex_lock(&bnx2i_dev_lock); + if (!cnic->max_iscsi_conn) { + printk(KERN_ALERT "bnx2i: dev %s does not support " + "iSCSI\n", hba->netdev->name); + rc = -EOPNOTSUPP; + goto out; + } + hba->cnic = cnic; rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba); if (!rc) { @@ -298,6 +316,7 @@ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic) else printk(KERN_ERR "bnx2i dev reg, unknown error, %d\n", rc); +out: mutex_unlock(&bnx2i_dev_lock); return rc; @@ -362,6 +381,91 @@ void bnx2i_ulp_exit(struct cnic_dev *dev) /** + * bnx2i_percpu_thread_create - Create a receive thread for an + * online CPU + * + * @cpu: cpu index for the online cpu + */ +static void bnx2i_percpu_thread_create(unsigned int cpu) +{ + struct bnx2i_percpu_s *p; + struct task_struct *thread; + + p = &per_cpu(bnx2i_percpu, cpu); + + thread = kthread_create(bnx2i_percpu_io_thread, (void *)p, + "bnx2i_thread/%d", cpu); + /* bind thread to the cpu */ + if (likely(!IS_ERR(thread))) { + kthread_bind(thread, cpu); + p->iothread = thread; + wake_up_process(thread); + } +} + + +static void bnx2i_percpu_thread_destroy(unsigned int cpu) +{ + struct bnx2i_percpu_s *p; + struct task_struct *thread; + struct bnx2i_work *work, *tmp; + + /* Prevent any new work from being queued for this CPU */ + p = &per_cpu(bnx2i_percpu, cpu); + spin_lock_bh(&p->p_work_lock); + thread = p->iothread; + p->iothread = NULL; + + /* Free all work in the list */ + list_for_each_entry_safe(work, tmp, &p->work_list, list) { + list_del_init(&work->list); + bnx2i_process_scsi_cmd_resp(work->session, + work->bnx2i_conn, &work->cqe); + kfree(work); + } + + spin_unlock_bh(&p->p_work_lock); + if (thread) + kthread_stop(thread); +} + + +/** + * bnx2i_cpu_callback - Handler for CPU hotplug events + * + * @nfb: The callback data block + * @action: The event triggering the callback + * @hcpu: The index of the CPU that the event is for + * + * This creates or destroys per-CPU data for iSCSI + * + * Returns NOTIFY_OK always. + */ +static int bnx2i_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned cpu = (unsigned long)hcpu; + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + printk(KERN_INFO "bnx2i: CPU %x online: Create Rx thread\n", + cpu); + bnx2i_percpu_thread_create(cpu); + break; + case CPU_DEAD: + case CPU_DEAD_FROZEN: + printk(KERN_INFO "CPU %x offline: Remove Rx thread\n", cpu); + bnx2i_percpu_thread_destroy(cpu); + break; + default: + break; + } + return NOTIFY_OK; +} + + +/** * bnx2i_mod_init - module init entry point * * initialize any driver wide global data structures such as endpoint pool, @@ -371,6 +475,8 @@ void bnx2i_ulp_exit(struct cnic_dev *dev) static int __init bnx2i_mod_init(void) { int err; + unsigned cpu = 0; + struct bnx2i_percpu_s *p; printk(KERN_INFO "%s", version); @@ -393,6 +499,20 @@ static int __init bnx2i_mod_init(void) goto unreg_xport; } + /* Create percpu kernel threads to handle iSCSI I/O completions */ + for_each_possible_cpu(cpu) { + p = &per_cpu(bnx2i_percpu, cpu); + INIT_LIST_HEAD(&p->work_list); + spin_lock_init(&p->p_work_lock); + p->iothread = NULL; + } + + for_each_online_cpu(cpu) + bnx2i_percpu_thread_create(cpu); + + /* Initialize per CPU interrupt thread */ + register_hotcpu_notifier(&bnx2i_cpu_notifier); + return 0; unreg_xport: @@ -413,6 +533,7 @@ out: static void __exit bnx2i_mod_exit(void) { struct bnx2i_hba *hba; + unsigned cpu = 0; mutex_lock(&bnx2i_dev_lock); while (!list_empty(&adapter_list)) { @@ -430,6 +551,11 @@ static void __exit bnx2i_mod_exit(void) } mutex_unlock(&bnx2i_dev_lock); + unregister_hotcpu_notifier(&bnx2i_cpu_notifier); + + for_each_online_cpu(cpu) + bnx2i_percpu_thread_destroy(cpu); + iscsi_unregister_transport(&bnx2i_iscsi_transport); cnic_unregister_driver(CNIC_ULP_ISCSI); } diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 041928b23cb..cffd4d75df5 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1,7 +1,7 @@ /* * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. * - * Copyright (c) 2006 - 2010 Broadcom Corporation + * Copyright (c) 2006 - 2011 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * @@ -27,6 +27,7 @@ static struct scsi_host_template bnx2i_host_template; */ static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */ +DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu); static int bnx2i_adapter_ready(struct bnx2i_hba *hba) { @@ -1212,9 +1213,10 @@ static int bnx2i_task_xmit(struct iscsi_task *task) struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct scsi_cmnd *sc = task->sc; struct bnx2i_cmd *cmd = task->dd_data; - struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; - if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes) + if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 > + hba->max_sqes) return -ENOMEM; /* @@ -1354,6 +1356,9 @@ bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid) bnx2i_conn = conn->dd_data; bnx2i_conn->cls_conn = cls_conn; bnx2i_conn->hba = hba; + + atomic_set(&bnx2i_conn->work_cnt, 0); + /* 'ep' ptr will be assigned in bind() call */ bnx2i_conn->ep = NULL; init_completion(&bnx2i_conn->cmd_cleanup_cmpl); @@ -1457,11 +1462,34 @@ static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn) struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct Scsi_Host *shost; struct bnx2i_hba *hba; + struct bnx2i_work *work, *tmp; + unsigned cpu = 0; + struct bnx2i_percpu_s *p; shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); hba = iscsi_host_priv(shost); bnx2i_conn_free_login_resources(hba, bnx2i_conn); + + if (atomic_read(&bnx2i_conn->work_cnt)) { + for_each_online_cpu(cpu) { + p = &per_cpu(bnx2i_percpu, cpu); + spin_lock_bh(&p->p_work_lock); + list_for_each_entry_safe(work, tmp, + &p->work_list, list) { + if (work->session == conn->session && + work->bnx2i_conn == bnx2i_conn) { + list_del_init(&work->list); + kfree(work); + if (!atomic_dec_and_test( + &bnx2i_conn->work_cnt)) + break; + } + } + spin_unlock_bh(&p->p_work_lock); + } + } + iscsi_conn_teardown(cls_conn); } @@ -1769,7 +1797,7 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, } bnx2i_ep = ep->dd_data; - bnx2i_ep->num_active_cmds = 0; + atomic_set(&bnx2i_ep->num_active_cmds, 0); iscsi_cid = bnx2i_alloc_iscsi_cid(hba); if (iscsi_cid == -1) { printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate " @@ -2163,9 +2191,9 @@ static struct scsi_host_template bnx2i_host_template = { .eh_device_reset_handler = iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .change_queue_depth = iscsi_change_queue_depth, - .can_queue = 1024, + .can_queue = 2048, .max_sectors = 127, - .cmd_per_lun = 24, + .cmd_per_lun = 128, .this_id = -1, .use_clustering = ENABLE_CLUSTERING, .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c index 9174196d903..83a77f7244d 100644 --- a/drivers/scsi/bnx2i/bnx2i_sysfs.c +++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c @@ -1,6 +1,6 @@ /* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver. * - * Copyright (c) 2004 - 2010 Broadcom Corporation + * Copyright (c) 2004 - 2011 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index fc2cdb62f53..bd22041e278 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -985,7 +985,7 @@ static int init_act_open(struct cxgbi_sock *csk) csk->saddr.sin_addr.s_addr = chba->ipv4addr; csk->rss_qid = 0; - csk->l2t = t3_l2t_get(t3dev, dst->neighbour, ndev); + csk->l2t = t3_l2t_get(t3dev, dst_get_neighbour(dst), ndev); if (!csk->l2t) { pr_err("NO l2t available.\n"); return -EINVAL; @@ -1245,7 +1245,7 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev) struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi; struct ulp_iscsi_info uinfo; unsigned int pgsz_factor[4]; - int err; + int i, err; if (ddp) { kref_get(&ddp->refcnt); @@ -1271,6 +1271,8 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev) uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; cxgbi_ddp_page_size_factor(pgsz_factor); + for (i = 0; i < 4; i++) + uinfo.pgsz_factor[i] = pgsz_factor[i]; uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT); err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index f3a4cd7cf78..ae13c4993aa 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1160,7 +1160,7 @@ static int init_act_open(struct cxgbi_sock *csk) cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); cxgbi_sock_get(csk); - csk->l2t = cxgb4_l2t_get(lldi->l2t, csk->dst->neighbour, ndev, 0); + csk->l2t = cxgb4_l2t_get(lldi->l2t, dst_get_neighbour(csk->dst), ndev, 0); if (!csk->l2t) { pr_err("%s, cannot alloc l2t.\n", ndev->name); goto rel_resource; diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index a2a9c7c6c64..77ac217ad5c 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -492,7 +492,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) goto err_out; } dst = &rt->dst; - ndev = dst->neighbour->dev; + ndev = dst_get_neighbour(dst)->dev; if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { pr_info("multi-cast route %pI4, port %u, dev %s.\n", @@ -506,7 +506,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); mtu = ndev->mtu; pr_info("rt dev %s, loopback -> %s, mtu %u.\n", - dst->neighbour->dev->name, ndev->name, mtu); + dst_get_neighbour(dst)->dev->name, ndev->name, mtu); } cdev = cxgbi_device_find_by_netdev(ndev, &port); diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index e7fc70d6b47..27c9d65d54a 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -35,7 +35,7 @@ * mode page were taken from the LSI RDAC 2.4 GPL'd * driver, and then converted to Linux conventions. */ -#define RDAC_QUIESCENCE_TIME 20; +#define RDAC_QUIESCENCE_TIME 20 /* * Page Codes */ @@ -128,25 +128,7 @@ struct c4_inquiry { u8 reserved[2]; }; -struct rdac_controller { - u8 subsys_id[SUBSYS_ID_LEN]; - u8 slot_id[SLOT_ID_LEN]; - int use_ms10; - struct kref kref; - struct list_head node; /* list of all controllers */ - union { - struct rdac_pg_legacy legacy; - struct rdac_pg_expanded expanded; - } mode_select; - u8 index; - u8 array_name[ARRAY_LABEL_LEN]; - spinlock_t ms_lock; - int ms_queued; - struct work_struct ms_work; - struct scsi_device *ms_sdev; - struct list_head ms_head; -}; - +#define UNIQUE_ID_LEN 16 struct c8_inquiry { u8 peripheral_info; u8 page_code; /* 0xC8 */ @@ -159,12 +141,31 @@ struct c8_inquiry { u8 vol_user_label_len; u8 vol_user_label[60]; u8 array_uniq_id_len; - u8 array_unique_id[16]; + u8 array_unique_id[UNIQUE_ID_LEN]; u8 array_user_label_len; u8 array_user_label[60]; u8 lun[8]; }; +struct rdac_controller { + u8 array_id[UNIQUE_ID_LEN]; + int use_ms10; + struct kref kref; + struct list_head node; /* list of all controllers */ + union { + struct rdac_pg_legacy legacy; + struct rdac_pg_expanded expanded; + } mode_select; + u8 index; + u8 array_name[ARRAY_LABEL_LEN]; + struct Scsi_Host *host; + spinlock_t ms_lock; + int ms_queued; + struct work_struct ms_work; + struct scsi_device *ms_sdev; + struct list_head ms_head; +}; + struct c2_inquiry { u8 peripheral_info; u8 page_code; /* 0xC2 */ @@ -369,16 +370,17 @@ static void release_controller(struct kref *kref) kfree(ctlr); } -static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id, - char *array_name) +static struct rdac_controller *get_controller(int index, char *array_name, + u8 *array_id, struct scsi_device *sdev) { struct rdac_controller *ctlr, *tmp; spin_lock(&list_lock); list_for_each_entry(tmp, &ctlr_list, node) { - if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) && - (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) { + if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) && + (tmp->index == index) && + (tmp->host == sdev->host)) { kref_get(&tmp->kref); spin_unlock(&list_lock); return tmp; @@ -389,16 +391,11 @@ static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id, goto done; /* initialize fields of controller */ - memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN); - memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN); + memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN); + ctlr->index = index; + ctlr->host = sdev->host; memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN); - /* update the controller index */ - if (slot_id[1] == 0x31) - ctlr->index = 0; - else - ctlr->index = 1; - kref_init(&ctlr->kref); ctlr->use_ms10 = -1; ctlr->ms_queued = 0; @@ -444,7 +441,7 @@ done: } static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, - char *array_name) + char *array_name, u8 *array_id) { int err, i; struct c8_inquiry *inqp; @@ -463,6 +460,8 @@ static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, *(array_name+i) = inqp->array_user_label[(2*i)+1]; *(array_name+ARRAY_LABEL_LEN-1) = '\0'; + memset(array_id, 0, UNIQUE_ID_LEN); + memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len); } return err; } @@ -504,16 +503,20 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) } static int initialize_controller(struct scsi_device *sdev, - struct rdac_dh_data *h, char *array_name) + struct rdac_dh_data *h, char *array_name, u8 *array_id) { - int err; + int err, index; struct c4_inquiry *inqp; err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h); if (err == SCSI_DH_OK) { inqp = &h->inq.c4; - h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id, - array_name); + /* get the controller index */ + if (inqp->slot_id[1] == 0x31) + index = 0; + else + index = 1; + h->ctlr = get_controller(index, array_name, array_id, sdev); if (!h->ctlr) err = SCSI_DH_RES_TEMP_UNAVAIL; } @@ -835,6 +838,7 @@ static int rdac_bus_attach(struct scsi_device *sdev) unsigned long flags; int err; char array_name[ARRAY_LABEL_LEN]; + char array_id[UNIQUE_ID_LEN]; scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) + sizeof(*h) , GFP_KERNEL); @@ -849,11 +853,11 @@ static int rdac_bus_attach(struct scsi_device *sdev) h->lun = UNINITIALIZED_LUN; h->state = RDAC_STATE_ACTIVE; - err = get_lun_info(sdev, h, array_name); + err = get_lun_info(sdev, h, array_name, array_id); if (err != SCSI_DH_OK) goto failed; - err = initialize_controller(sdev, h, array_name); + err = initialize_controller(sdev, h, array_name, array_id); if (err != SCSI_DH_OK) goto failed; diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h index 179ad77f6cc..bd9e31e1624 100644 --- a/drivers/scsi/dpt/dpti_i2o.h +++ b/drivers/scsi/dpt/dpti_i2o.h @@ -22,7 +22,7 @@ #include <linux/i2o-dev.h> #include <linux/notifier.h> -#include <asm/atomic.h> +#include <linux/atomic.h> /* diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 155d7b9bdea..ba710e350ac 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -99,7 +99,8 @@ static void fcoe_destroy_work(struct work_struct *); static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *, unsigned int); static int fcoe_ddp_done(struct fc_lport *, u16); - +static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *, + unsigned int); static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *); static bool fcoe_match(struct net_device *netdev); @@ -143,6 +144,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = { .frame_send = fcoe_xmit, .ddp_setup = fcoe_ddp_setup, .ddp_done = fcoe_ddp_done, + .ddp_target = fcoe_ddp_target, .elsct_send = fcoe_elsct_send, .get_lesb = fcoe_get_lesb, .lport_set_port_id = fcoe_set_port_id, @@ -429,21 +431,6 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) struct fcoe_ctlr *fip = &fcoe->ctlr; u8 flogi_maddr[ETH_ALEN]; const struct net_device_ops *ops; - struct fcoe_port *port = lport_priv(fcoe->ctlr.lp); - - FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); - - /* Logout of the fabric */ - fc_fabric_logoff(fcoe->ctlr.lp); - - /* Cleanup the fc_lport */ - fc_lport_destroy(fcoe->ctlr.lp); - - /* Stop the transmit retry timer */ - del_timer_sync(&port->timer); - - /* Free existing transmit skbs */ - fcoe_clean_pending_queue(fcoe->ctlr.lp); /* * Don't listen for Ethernet packets anymore. @@ -466,9 +453,6 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) } else dev_mc_del(netdev, FIP_ALL_ENODE_MACS); - if (!is_zero_ether_addr(port->data_src_addr)) - dev_uc_del(netdev, port->data_src_addr); - /* Tell the LLD we are done w/ FCoE */ ops = netdev->netdev_ops; if (ops->ndo_fcoe_disable) { @@ -476,6 +460,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE" " specific feature for LLD.\n"); } + + /* Release the self-reference taken during fcoe_interface_create() */ fcoe_interface_put(fcoe); } @@ -501,6 +487,19 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev, } /** + * fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame + * @port: The FCoE port + * @skb: The FIP/FCoE packet to be sent + */ +static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb) +{ + if (port->fcoe_pending_queue.qlen) + fcoe_check_wait_queue(port->lport, skb); + else if (fcoe_start_io(skb)) + fcoe_check_wait_queue(port->lport, skb); +} + +/** * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame * @fip: The FCoE controller * @skb: The FIP packet to be sent @@ -508,7 +507,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev, static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) { skb->dev = fcoe_from_ctlr(fip)->netdev; - dev_queue_xmit(skb); + fcoe_port_send(lport_priv(fip->lp), skb); } /** @@ -749,12 +748,27 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev) * The offload EM that this routine is associated with will handle any * packets that are for SCSI read requests. * + * This has been enhanced to work when FCoE stack is operating in target + * mode. + * * Returns: True for read types I/O, otherwise returns false. */ bool fcoe_oem_match(struct fc_frame *fp) { - return fc_fcp_is_read(fr_fsp(fp)) && - (fr_fsp(fp)->data_len > fcoe_ddp_min); + struct fc_frame_header *fh = fc_frame_header_get(fp); + struct fcp_cmnd *fcp; + + if (fc_fcp_is_read(fr_fsp(fp)) && + (fr_fsp(fp)->data_len > fcoe_ddp_min)) + return true; + else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) { + fcp = fc_frame_payload_get(fp, sizeof(*fcp)); + if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN && + fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) && + (fcp->fc_flags & FCP_CFL_WRDATA)) + return true; + } + return false; } /** @@ -844,6 +858,32 @@ skip_oem: */ static void fcoe_if_destroy(struct fc_lport *lport) { + struct fcoe_port *port = lport_priv(lport); + struct fcoe_interface *fcoe = port->priv; + struct net_device *netdev = fcoe->netdev; + + FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); + + /* Logout of the fabric */ + fc_fabric_logoff(lport); + + /* Cleanup the fc_lport */ + fc_lport_destroy(lport); + + /* Stop the transmit retry timer */ + del_timer_sync(&port->timer); + + /* Free existing transmit skbs */ + fcoe_clean_pending_queue(lport); + + rtnl_lock(); + if (!is_zero_ether_addr(port->data_src_addr)) + dev_uc_del(netdev, port->data_src_addr); + rtnl_unlock(); + + /* Release reference held in fcoe_if_create() */ + fcoe_interface_put(fcoe); + /* Free queued packets for the per-CPU receive threads */ fcoe_percpu_clean(lport); @@ -887,6 +927,28 @@ static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid, } /** + * fcoe_ddp_target() - Call a LLD's ddp_target through the net device + * @lport: The local port to setup DDP for + * @xid: The exchange ID for this DDP transfer + * @sgl: The scatterlist describing this transfer + * @sgc: The number of sg items + * + * Returns: 0 if the DDP context was not configured + */ +static int fcoe_ddp_target(struct fc_lport *lport, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + struct net_device *netdev = fcoe_netdev(lport); + + if (netdev->netdev_ops->ndo_fcoe_ddp_target) + return netdev->netdev_ops->ndo_fcoe_ddp_target(netdev, xid, + sgl, sgc); + + return 0; +} + + +/** * fcoe_ddp_done() - Call a LLD's ddp_done through the net device * @lport: The local port to complete DDP on * @xid: The exchange ID for this DDP transfer @@ -1206,6 +1268,26 @@ static int fcoe_cpu_callback(struct notifier_block *nfb, } /** + * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming + * command. + * + * This routine selects next CPU based on cpumask to distribute + * incoming requests in round robin. + * + * Returns: int CPU number + */ +static inline unsigned int fcoe_select_cpu(void) +{ + static unsigned int selected_cpu; + + selected_cpu = cpumask_next(selected_cpu, cpu_online_mask); + if (selected_cpu >= nr_cpu_ids) + selected_cpu = cpumask_first(cpu_online_mask); + + return selected_cpu; +} + +/** * fcoe_rcv() - Receive packets from a net device * @skb: The received packet * @netdev: The net device that the packet was received on @@ -1271,18 +1353,25 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, fr = fcoe_dev_from_skb(skb); fr->fr_dev = lport; - fr->ptype = ptype; /* * In case the incoming frame's exchange is originated from * the initiator, then received frame's exchange id is ANDed * with fc_cpu_mask bits to get the same cpu on which exchange - * was originated, otherwise just use the current cpu. + * was originated, otherwise select cpu using rx exchange id + * or fcoe_select_cpu(). */ if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX) cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask; - else - cpu = smp_processor_id(); + else { + if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN) + cpu = fcoe_select_cpu(); + else + cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask; + } + + if (cpu >= nr_cpu_ids) + goto err; fps = &per_cpu(fcoe_percpu, cpu); spin_lock_bh(&fps->fcoe_rx_list.lock); @@ -1482,11 +1571,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) /* send down to lld */ fr_dev(fp) = lport; - if (port->fcoe_pending_queue.qlen) - fcoe_check_wait_queue(lport, skb); - else if (fcoe_start_io(skb)) - fcoe_check_wait_queue(lport, skb); - + fcoe_port_send(port, skb); return 0; } @@ -1733,7 +1818,6 @@ static int fcoe_device_notification(struct notifier_block *notifier, case NETDEV_UNREGISTER: list_del(&fcoe->list); port = lport_priv(fcoe->ctlr.lp); - fcoe_interface_cleanup(fcoe); queue_work(fcoe_wq, &port->destroy_work); goto out; break; @@ -1827,22 +1911,22 @@ static int fcoe_destroy(struct net_device *netdev) { struct fcoe_interface *fcoe; struct fc_lport *lport; + struct fcoe_port *port; int rc = 0; mutex_lock(&fcoe_config_mutex); rtnl_lock(); fcoe = fcoe_hostlist_lookup_port(netdev); if (!fcoe) { - rtnl_unlock(); rc = -ENODEV; goto out_nodev; } lport = fcoe->ctlr.lp; + port = lport_priv(lport); list_del(&fcoe->list); - fcoe_interface_cleanup(fcoe); - rtnl_unlock(); - fcoe_if_destroy(lport); + queue_work(fcoe_wq, &port->destroy_work); out_nodev: + rtnl_unlock(); mutex_unlock(&fcoe_config_mutex); return rc; } @@ -1854,10 +1938,25 @@ out_nodev: static void fcoe_destroy_work(struct work_struct *work) { struct fcoe_port *port; + struct fcoe_interface *fcoe; + int npiv = 0; port = container_of(work, struct fcoe_port, destroy_work); mutex_lock(&fcoe_config_mutex); + + /* set if this is an NPIV port */ + npiv = port->lport->vport ? 1 : 0; + + fcoe = port->priv; fcoe_if_destroy(port->lport); + + /* Do not tear down the fcoe interface for NPIV port */ + if (!npiv) { + rtnl_lock(); + fcoe_interface_cleanup(fcoe); + rtnl_unlock(); + } + mutex_unlock(&fcoe_config_mutex); } @@ -1886,7 +1985,7 @@ static bool fcoe_match(struct net_device *netdev) */ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) { - int rc; + int rc = 0; struct fcoe_interface *fcoe; struct fc_lport *lport; @@ -1911,7 +2010,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) netdev->name); rc = -EIO; fcoe_interface_cleanup(fcoe); - goto out_free; + goto out_nodev; } /* Make this the "master" N_Port */ @@ -1926,17 +2025,6 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) if (!fcoe_link_ok(lport)) fcoe_ctlr_link_up(&fcoe->ctlr); - /* - * Release from init in fcoe_interface_create(), on success lport - * should be holding a reference taken in fcoe_if_create(). - */ - fcoe_interface_put(fcoe); - rtnl_unlock(); - mutex_unlock(&fcoe_config_mutex); - - return 0; -out_free: - fcoe_interface_put(fcoe); out_nodev: rtnl_unlock(); mutex_unlock(&fcoe_config_mutex); @@ -2218,7 +2306,6 @@ static void __exit fcoe_exit(void) list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) { list_del(&fcoe->list); port = lport_priv(fcoe->ctlr.lp); - fcoe_interface_cleanup(fcoe); queue_work(fcoe_wq, &port->destroy_work); } rtnl_unlock(); diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 671cde9d406..95a5ba29320 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -37,7 +37,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.5.0.1" +#define DRV_VERSION "1.5.0.2" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index bb63f1a1f80..fc98eb61e76 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -388,17 +388,6 @@ static void fnic_iounmap(struct fnic *fnic) iounmap(fnic->bar0.vaddr); } -/* - * Allocate element for mempools requiring GFP_DMA flag. - * Otherwise, checks in kmem_flagcheck() hit BUG_ON(). - */ -static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data) -{ - struct kmem_cache *mem = pool_data; - - return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA); -} - /** * fnic_get_mac() - get assigned data MAC address for FIP code. * @lport: local port. @@ -603,14 +592,12 @@ static int __devinit fnic_probe(struct pci_dev *pdev, if (!fnic->io_req_pool) goto err_out_free_resources; - pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab, - fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); + pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); if (!pool) goto err_out_free_ioreq_pool; fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; - pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab, - fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); + pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); if (!pool) goto err_out_free_dflt_pool; fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; @@ -876,7 +863,7 @@ static int __init fnic_init_module(void) len = sizeof(struct fnic_dflt_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, - SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, + SLAB_HWCACHE_ALIGN, NULL); if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) { printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n"); @@ -888,7 +875,7 @@ static int __init fnic_init_module(void) len = sizeof(struct fnic_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, - SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, + SLAB_HWCACHE_ALIGN, NULL); if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 538b31c2cf5..c40ce52ed7c 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -406,7 +406,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ if (sg_count) { io_req->sgl_list = mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], - GFP_ATOMIC | GFP_DMA); + GFP_ATOMIC); if (!io_req->sgl_list) { ret = SCSI_MLQUEUE_HOST_BUSY; scsi_dma_unmap(sc); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index c6c0434d803..ec61bdb833a 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -46,7 +46,7 @@ #include <linux/cciss_ioctl.h> #include <linux/string.h> #include <linux/bitmap.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <linux/kthread.h> #include "hpsa_cmd.h" #include "hpsa.h" @@ -1037,6 +1037,7 @@ static void complete_scsi_command(struct CommandList *cp) unsigned char sense_key; unsigned char asc; /* additional sense code */ unsigned char ascq; /* additional sense code qualifier */ + unsigned long sense_data_size; ei = cp->err_info; cmd = (struct scsi_cmnd *) cp->scsi_cmd; @@ -1051,10 +1052,14 @@ static void complete_scsi_command(struct CommandList *cp) cmd->result |= ei->ScsiStatus; /* copy the sense data whether we need to or not. */ - memcpy(cmd->sense_buffer, ei->SenseInfo, - ei->SenseLen > SCSI_SENSE_BUFFERSIZE ? - SCSI_SENSE_BUFFERSIZE : - ei->SenseLen); + if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) + sense_data_size = SCSI_SENSE_BUFFERSIZE; + else + sense_data_size = sizeof(ei->SenseInfo); + if (ei->SenseLen < sense_data_size) + sense_data_size = ei->SenseLen; + + memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); scsi_set_resid(cmd, ei->ResidualCnt); if (ei->CommandStatus == 0) { @@ -1214,8 +1219,8 @@ static void complete_scsi_command(struct CommandList *cp) dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); break; case CMD_UNSOLICITED_ABORT: - cmd->result = DID_RESET << 16; - dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " + cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ + dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited " "abort\n", cp); break; case CMD_TIMEOUT: @@ -2580,7 +2585,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) c->SG[0].Ext = 0; /* we are not chaining*/ } hpsa_scsi_do_simple_cmd_core(h, c); - hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); + if (iocommand.buf_size > 0) + hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); /* Copy the error information out */ diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 6d8dcd4dd06..7f53ceaa723 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -214,7 +214,7 @@ static void SA5_submit_command(struct ctlr_info *h, dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, c->Header.Tag.lower); writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); - (void) readl(h->vaddr + SA5_REQUEST_PORT_OFFSET); + (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); h->commands_outstanding++; if (h->commands_outstanding > h->max_outstanding) h->max_outstanding = h->commands_outstanding; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index b7650613b8c..bdfa223a7db 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -4306,8 +4306,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) spin_lock_irqsave(vhost->host->host_lock, flags); if (rc == H_CLOSED) vio_enable_interrupts(to_vio_dev(vhost->dev)); - else if (rc || (rc = ibmvfc_send_crq_init(vhost)) || - (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { + if (rc || (rc = ibmvfc_send_crq_init(vhost)) || + (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc); } diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 888086c4e70..8d636301e32 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -8778,14 +8778,14 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, if (rc != PCIBIOS_SUCCESSFUL) { dev_err(&pdev->dev, "Failed to save PCI config space\n"); rc = -EIO; - goto cleanup_nomem; + goto out_msi_disable; } if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) - goto cleanup_nomem; + goto out_msi_disable; if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) - goto cleanup_nomem; + goto out_msi_disable; if (ioa_cfg->sis64) ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) @@ -8800,7 +8800,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, if (rc < 0) { dev_err(&pdev->dev, "Couldn't allocate enough memory for device driver!\n"); - goto cleanup_nomem; + goto out_msi_disable; } /* @@ -8845,10 +8845,10 @@ out: cleanup_nolog: ipr_free_mem(ioa_cfg); -cleanup_nomem: - iounmap(ipr_regs); out_msi_disable: pci_disable_msi(pdev); +cleanup_nomem: + iounmap(ipr_regs); out_release_regions: pci_release_regions(pdev); out_scsi_host_put: diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile new file mode 100644 index 00000000000..3359e10e0d8 --- /dev/null +++ b/drivers/scsi/isci/Makefile @@ -0,0 +1,8 @@ +obj-$(CONFIG_SCSI_ISCI) += isci.o +isci-objs := init.o phy.o request.o \ + remote_device.o port.o \ + host.o task.o probe_roms.o \ + remote_node_context.o \ + remote_node_table.o \ + unsolicited_frame_control.o \ + port_config.o \ diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile new file mode 100644 index 00000000000..5f54461cabc --- /dev/null +++ b/drivers/scsi/isci/firmware/Makefile @@ -0,0 +1,19 @@ +# Makefile for create_fw +# +CC=gcc +CFLAGS=-c -Wall -O2 -g +LDFLAGS= +SOURCES=create_fw.c +OBJECTS=$(SOURCES:.cpp=.o) +EXECUTABLE=create_fw + +all: $(SOURCES) $(EXECUTABLE) + +$(EXECUTABLE): $(OBJECTS) + $(CC) $(LDFLAGS) $(OBJECTS) -o $@ + +.c.o: + $(CC) $(CFLAGS) $< -O $@ + +clean: + rm -f *.o $(EXECUTABLE) diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README new file mode 100644 index 00000000000..8056d2bd233 --- /dev/null +++ b/drivers/scsi/isci/firmware/README @@ -0,0 +1,36 @@ +This defines the temporary binary blow we are to pass to the SCU +driver to emulate the binary firmware that we will eventually be +able to access via NVRAM on the SCU controller. + +The current size of the binary blob is expected to be 149 bytes or larger + +Header Types: +0x1: Phy Masks +0x2: Phy Gens +0x3: SAS Addrs +0xff: End of Data + +ID string - u8[12]: "#SCU MAGIC#\0" +Version - u8: 1 +SubVersion - u8: 0 + +Header Type - u8: 0x1 +Size - u8: 8 +Phy Mask - u32[8] + +Header Type - u8: 0x2 +Size - u8: 8 +Phy Gen - u32[8] + +Header Type - u8: 0x3 +Size - u8: 8 +Sas Addr - u64[8] + +Header Type - u8: 0xf + + +============================================================================== + +Place isci_firmware.bin in /lib/firmware +Be sure to recreate the initramfs image to include the firmware. + diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c new file mode 100644 index 00000000000..c7a2887a7e9 --- /dev/null +++ b/drivers/scsi/isci/firmware/create_fw.c @@ -0,0 +1,99 @@ +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <string.h> +#include <errno.h> +#include <asm/types.h> +#include <strings.h> +#include <stdint.h> + +#include "create_fw.h" +#include "../probe_roms.h" + +int write_blob(struct isci_orom *isci_orom) +{ + FILE *fd; + int err; + size_t count; + + fd = fopen(blob_name, "w+"); + if (!fd) { + perror("Open file for write failed"); + fclose(fd); + return -EIO; + } + + count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd); + if (count != 1) { + perror("Write data failed"); + fclose(fd); + return -EIO; + } + + fclose(fd); + + return 0; +} + +void set_binary_values(struct isci_orom *isci_orom) +{ + int ctrl_idx, phy_idx, port_idx; + + /* setting OROM signature */ + strncpy(isci_orom->hdr.signature, sig, strlen(sig)); + isci_orom->hdr.version = version; + isci_orom->hdr.total_block_length = sizeof(struct isci_orom); + isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr); + isci_orom->hdr.num_elements = num_elements; + + for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) { + isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type; + isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up = + max_num_concurrent_dev_spin_up; + isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc = + enable_ssc; + + for (port_idx = 0; port_idx < 4; port_idx++) + isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask = + phy_mask[ctrl_idx][port_idx]; + + for (phy_idx = 0; phy_idx < 4; phy_idx++) { + isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high = + (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32); + isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low = + (__u32)(sas_addr[ctrl_idx][phy_idx]); + + isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 = + afe_tx_amp_control0; + isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 = + afe_tx_amp_control1; + isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 = + afe_tx_amp_control2; + isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 = + afe_tx_amp_control3; + } + } +} + +int main(void) +{ + int err; + struct isci_orom *isci_orom; + + isci_orom = malloc(sizeof(struct isci_orom)); + memset(isci_orom, 0, sizeof(struct isci_orom)); + + set_binary_values(isci_orom); + + err = write_blob(isci_orom); + if (err < 0) { + free(isci_orom); + return err; + } + + free(isci_orom); + return 0; +} diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h new file mode 100644 index 00000000000..5f298828d22 --- /dev/null +++ b/drivers/scsi/isci/firmware/create_fw.h @@ -0,0 +1,77 @@ +#ifndef _CREATE_FW_H_ +#define _CREATE_FW_H_ +#include "../probe_roms.h" + + +/* we are configuring for 2 SCUs */ +static const int num_elements = 2; + +/* + * For all defined arrays: + * elements 0-3 are for SCU0, ports 0-3 + * elements 4-7 are for SCU1, ports 0-3 + * + * valid configurations for one SCU are: + * P0 P1 P2 P3 + * ---------------- + * 0xF,0x0,0x0,0x0 # 1 x4 port + * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1 + * # ports + * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2 + * # port + * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port + * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration) + * + * if there is a port/phy on which you do not wish to override the default + * values, use the value assigned to UNINIT_PARAM (255). + */ + +/* discovery mode type (port auto config mode by default ) */ + +/* + * if there is a port/phy on which you do not wish to override the default + * values, use the value "0000000000000000". SAS address of zero's is + * considered invalid and will not be used. + */ +#ifdef MPC +static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE; +static const __u8 phy_mask[2][4] = { {1, 2, 4, 8}, + {1, 2, 4, 8} }; +static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL, + 0x5FCFFFFFF0000002ULL, + 0x5FCFFFFFF0000003ULL, + 0x5FCFFFFFF0000004ULL }, + { 0x5FCFFFFFF0000005ULL, + 0x5FCFFFFFF0000006ULL, + 0x5FCFFFFFF0000007ULL, + 0x5FCFFFFFF0000008ULL } }; +#else /* APC (default) */ +static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; +static const __u8 phy_mask[2][4]; +static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL, + 0x5FCFFFFF00000001ULL, + 0x5FCFFFFF00000001ULL, + 0x5FCFFFFF00000001ULL }, + { 0x5FCFFFFF00000002ULL, + 0x5FCFFFFF00000002ULL, + 0x5FCFFFFF00000002ULL, + 0x5FCFFFFF00000002ULL } }; +#endif + +/* Maximum number of concurrent device spin up */ +static const int max_num_concurrent_dev_spin_up = 1; + +/* enable of ssc operation */ +static const int enable_ssc; + +/* AFE_TX_AMP_CONTROL */ +static const unsigned int afe_tx_amp_control0 = 0x000bdd08; +static const unsigned int afe_tx_amp_control1 = 0x000ffc00; +static const unsigned int afe_tx_amp_control2 = 0x000b7c09; +static const unsigned int afe_tx_amp_control3 = 0x000afc6e; + +static const char blob_name[] = "isci_firmware.bin"; +static const char sig[] = "ISCUOEMB"; +static const unsigned char version = 0x10; + +#endif diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c new file mode 100644 index 00000000000..26072f1e985 --- /dev/null +++ b/drivers/scsi/isci/host.c @@ -0,0 +1,2751 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <linux/circ_buf.h> +#include <linux/device.h> +#include <scsi/sas.h> +#include "host.h" +#include "isci.h" +#include "port.h" +#include "host.h" +#include "probe_roms.h" +#include "remote_device.h" +#include "request.h" +#include "scu_completion_codes.h" +#include "scu_event_codes.h" +#include "registers.h" +#include "scu_remote_node_context.h" +#include "scu_task_context.h" + +#define SCU_CONTEXT_RAM_INIT_STALL_TIME 200 + +#define smu_max_ports(dcc_value) \ + (\ + (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \ + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \ + ) + +#define smu_max_task_contexts(dcc_value) \ + (\ + (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \ + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \ + ) + +#define smu_max_rncs(dcc_value) \ + (\ + (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \ + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \ + ) + +#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100 + +/** + * + * + * The number of milliseconds to wait while a given phy is consuming power + * before allowing another set of phys to consume power. Ultimately, this will + * be specified by OEM parameter. + */ +#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500 + +/** + * NORMALIZE_PUT_POINTER() - + * + * This macro will normalize the completion queue put pointer so its value can + * be used as an array inde + */ +#define NORMALIZE_PUT_POINTER(x) \ + ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK) + + +/** + * NORMALIZE_EVENT_POINTER() - + * + * This macro will normalize the completion queue event entry so its value can + * be used as an index. + */ +#define NORMALIZE_EVENT_POINTER(x) \ + (\ + ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \ + >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \ + ) + +/** + * NORMALIZE_GET_POINTER() - + * + * This macro will normalize the completion queue get pointer so its value can + * be used as an index into an array + */ +#define NORMALIZE_GET_POINTER(x) \ + ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK) + +/** + * NORMALIZE_GET_POINTER_CYCLE_BIT() - + * + * This macro will normalize the completion queue cycle pointer so it matches + * the completion queue cycle bit + */ +#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \ + ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT)) + +/** + * COMPLETION_QUEUE_CYCLE_BIT() - + * + * This macro will return the cycle bit of the completion queue entry + */ +#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000) + +/* Init the state machine and call the state entry function (if any) */ +void sci_init_sm(struct sci_base_state_machine *sm, + const struct sci_base_state *state_table, u32 initial_state) +{ + sci_state_transition_t handler; + + sm->initial_state_id = initial_state; + sm->previous_state_id = initial_state; + sm->current_state_id = initial_state; + sm->state_table = state_table; + + handler = sm->state_table[initial_state].enter_state; + if (handler) + handler(sm); +} + +/* Call the state exit fn, update the current state, call the state entry fn */ +void sci_change_state(struct sci_base_state_machine *sm, u32 next_state) +{ + sci_state_transition_t handler; + + handler = sm->state_table[sm->current_state_id].exit_state; + if (handler) + handler(sm); + + sm->previous_state_id = sm->current_state_id; + sm->current_state_id = next_state; + + handler = sm->state_table[sm->current_state_id].enter_state; + if (handler) + handler(sm); +} + +static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost) +{ + u32 get_value = ihost->completion_queue_get; + u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; + + if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) == + COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])) + return true; + + return false; +} + +static bool sci_controller_isr(struct isci_host *ihost) +{ + if (sci_controller_completion_queue_has_entries(ihost)) { + return true; + } else { + /* + * we have a spurious interrupt it could be that we have already + * emptied the completion queue from a previous interrupt */ + writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); + + /* + * There is a race in the hardware that could cause us not to be notified + * of an interrupt completion if we do not take this step. We will mask + * then unmask the interrupts so if there is another interrupt pending + * the clearing of the interrupt source we get the next interrupt message. */ + writel(0xFF000000, &ihost->smu_registers->interrupt_mask); + writel(0, &ihost->smu_registers->interrupt_mask); + } + + return false; +} + +irqreturn_t isci_msix_isr(int vec, void *data) +{ + struct isci_host *ihost = data; + + if (sci_controller_isr(ihost)) + tasklet_schedule(&ihost->completion_tasklet); + + return IRQ_HANDLED; +} + +static bool sci_controller_error_isr(struct isci_host *ihost) +{ + u32 interrupt_status; + + interrupt_status = + readl(&ihost->smu_registers->interrupt_status); + interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND); + + if (interrupt_status != 0) { + /* + * There is an error interrupt pending so let it through and handle + * in the callback */ + return true; + } + + /* + * There is a race in the hardware that could cause us not to be notified + * of an interrupt completion if we do not take this step. We will mask + * then unmask the error interrupts so if there was another interrupt + * pending we will be notified. + * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */ + writel(0xff, &ihost->smu_registers->interrupt_mask); + writel(0, &ihost->smu_registers->interrupt_mask); + + return false; +} + +static void sci_controller_task_completion(struct isci_host *ihost, u32 ent) +{ + u32 index = SCU_GET_COMPLETION_INDEX(ent); + struct isci_request *ireq = ihost->reqs[index]; + + /* Make sure that we really want to process this IO request */ + if (test_bit(IREQ_ACTIVE, &ireq->flags) && + ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && + ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index]) + /* Yep this is a valid io request pass it along to the + * io request handler + */ + sci_io_request_tc_completion(ireq, ent); +} + +static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent) +{ + u32 index; + struct isci_request *ireq; + struct isci_remote_device *idev; + + index = SCU_GET_COMPLETION_INDEX(ent); + + switch (scu_get_command_request_type(ent)) { + case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: + case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: + ireq = ihost->reqs[index]; + dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n", + __func__, ent, ireq); + /* @todo For a post TC operation we need to fail the IO + * request + */ + break; + case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC: + case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC: + case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: + idev = ihost->device_table[index]; + dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n", + __func__, ent, idev); + /* @todo For a port RNC operation we need to fail the + * device + */ + break; + default: + dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n", + __func__, ent); + break; + } +} + +static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent) +{ + u32 index; + u32 frame_index; + + struct scu_unsolicited_frame_header *frame_header; + struct isci_phy *iphy; + struct isci_remote_device *idev; + + enum sci_status result = SCI_FAILURE; + + frame_index = SCU_GET_FRAME_INDEX(ent); + + frame_header = ihost->uf_control.buffers.array[frame_index].header; + ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; + + if (SCU_GET_FRAME_ERROR(ent)) { + /* + * / @todo If the IAF frame or SIGNATURE FIS frame has an error will + * / this cause a problem? We expect the phy initialization will + * / fail if there is an error in the frame. */ + sci_controller_release_frame(ihost, frame_index); + return; + } + + if (frame_header->is_address_frame) { + index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); + iphy = &ihost->phys[index]; + result = sci_phy_frame_handler(iphy, frame_index); + } else { + + index = SCU_GET_COMPLETION_INDEX(ent); + + if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { + /* + * This is a signature fis or a frame from a direct attached SATA + * device that has not yet been created. In either case forwared + * the frame to the PE and let it take care of the frame data. */ + index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); + iphy = &ihost->phys[index]; + result = sci_phy_frame_handler(iphy, frame_index); + } else { + if (index < ihost->remote_node_entries) + idev = ihost->device_table[index]; + else + idev = NULL; + + if (idev != NULL) + result = sci_remote_device_frame_handler(idev, frame_index); + else + sci_controller_release_frame(ihost, frame_index); + } + } + + if (result != SCI_SUCCESS) { + /* + * / @todo Is there any reason to report some additional error message + * / when we get this failure notifiction? */ + } +} + +static void sci_controller_event_completion(struct isci_host *ihost, u32 ent) +{ + struct isci_remote_device *idev; + struct isci_request *ireq; + struct isci_phy *iphy; + u32 index; + + index = SCU_GET_COMPLETION_INDEX(ent); + + switch (scu_get_event_type(ent)) { + case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: + /* / @todo The driver did something wrong and we need to fix the condtion. */ + dev_err(&ihost->pdev->dev, + "%s: SCIC Controller 0x%p received SMU command error " + "0x%x\n", + __func__, + ihost, + ent); + break; + + case SCU_EVENT_TYPE_SMU_PCQ_ERROR: + case SCU_EVENT_TYPE_SMU_ERROR: + case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR: + /* + * / @todo This is a hardware failure and its likely that we want to + * / reset the controller. */ + dev_err(&ihost->pdev->dev, + "%s: SCIC Controller 0x%p received fatal controller " + "event 0x%x\n", + __func__, + ihost, + ent); + break; + + case SCU_EVENT_TYPE_TRANSPORT_ERROR: + ireq = ihost->reqs[index]; + sci_io_request_event_handler(ireq, ent); + break; + + case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: + switch (scu_get_event_specifier(ent)) { + case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: + case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: + ireq = ihost->reqs[index]; + if (ireq != NULL) + sci_io_request_event_handler(ireq, ent); + else + dev_warn(&ihost->pdev->dev, + "%s: SCIC Controller 0x%p received " + "event 0x%x for io request object " + "that doesnt exist.\n", + __func__, + ihost, + ent); + + break; + + case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: + idev = ihost->device_table[index]; + if (idev != NULL) + sci_remote_device_event_handler(idev, ent); + else + dev_warn(&ihost->pdev->dev, + "%s: SCIC Controller 0x%p received " + "event 0x%x for remote device object " + "that doesnt exist.\n", + __func__, + ihost, + ent); + + break; + } + break; + + case SCU_EVENT_TYPE_BROADCAST_CHANGE: + /* + * direct the broadcast change event to the phy first and then let + * the phy redirect the broadcast change to the port object */ + case SCU_EVENT_TYPE_ERR_CNT_EVENT: + /* + * direct error counter event to the phy object since that is where + * we get the event notification. This is a type 4 event. */ + case SCU_EVENT_TYPE_OSSP_EVENT: + index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); + iphy = &ihost->phys[index]; + sci_phy_event_handler(iphy, ent); + break; + + case SCU_EVENT_TYPE_RNC_SUSPEND_TX: + case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: + case SCU_EVENT_TYPE_RNC_OPS_MISC: + if (index < ihost->remote_node_entries) { + idev = ihost->device_table[index]; + + if (idev != NULL) + sci_remote_device_event_handler(idev, ent); + } else + dev_err(&ihost->pdev->dev, + "%s: SCIC Controller 0x%p received event 0x%x " + "for remote device object 0x%0x that doesnt " + "exist.\n", + __func__, + ihost, + ent, + index); + + break; + + default: + dev_warn(&ihost->pdev->dev, + "%s: SCIC Controller received unknown event code %x\n", + __func__, + ent); + break; + } +} + +static void sci_controller_process_completions(struct isci_host *ihost) +{ + u32 completion_count = 0; + u32 ent; + u32 get_index; + u32 get_cycle; + u32 event_get; + u32 event_cycle; + + dev_dbg(&ihost->pdev->dev, + "%s: completion queue begining get:0x%08x\n", + __func__, + ihost->completion_queue_get); + + /* Get the component parts of the completion queue */ + get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get); + get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get; + + event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get); + event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get; + + while ( + NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle) + == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]) + ) { + completion_count++; + + ent = ihost->completion_queue[get_index]; + + /* increment the get pointer and check for rollover to toggle the cycle bit */ + get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) << + (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT); + get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1); + + dev_dbg(&ihost->pdev->dev, + "%s: completion queue entry:0x%08x\n", + __func__, + ent); + + switch (SCU_GET_COMPLETION_TYPE(ent)) { + case SCU_COMPLETION_TYPE_TASK: + sci_controller_task_completion(ihost, ent); + break; + + case SCU_COMPLETION_TYPE_SDMA: + sci_controller_sdma_completion(ihost, ent); + break; + + case SCU_COMPLETION_TYPE_UFI: + sci_controller_unsolicited_frame(ihost, ent); + break; + + case SCU_COMPLETION_TYPE_EVENT: + case SCU_COMPLETION_TYPE_NOTIFY: { + event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) << + (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); + event_get = (event_get+1) & (SCU_MAX_EVENTS-1); + + sci_controller_event_completion(ihost, ent); + break; + } + default: + dev_warn(&ihost->pdev->dev, + "%s: SCIC Controller received unknown " + "completion type %x\n", + __func__, + ent); + break; + } + } + + /* Update the get register if we completed one or more entries */ + if (completion_count > 0) { + ihost->completion_queue_get = + SMU_CQGR_GEN_BIT(ENABLE) | + SMU_CQGR_GEN_BIT(EVENT_ENABLE) | + event_cycle | + SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) | + get_cycle | + SMU_CQGR_GEN_VAL(POINTER, get_index); + + writel(ihost->completion_queue_get, + &ihost->smu_registers->completion_queue_get); + + } + + dev_dbg(&ihost->pdev->dev, + "%s: completion queue ending get:0x%08x\n", + __func__, + ihost->completion_queue_get); + +} + +static void sci_controller_error_handler(struct isci_host *ihost) +{ + u32 interrupt_status; + + interrupt_status = + readl(&ihost->smu_registers->interrupt_status); + + if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && + sci_controller_completion_queue_has_entries(ihost)) { + + sci_controller_process_completions(ihost); + writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status); + } else { + dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__, + interrupt_status); + + sci_change_state(&ihost->sm, SCIC_FAILED); + + return; + } + + /* If we dont process any completions I am not sure that we want to do this. + * We are in the middle of a hardware fault and should probably be reset. + */ + writel(0, &ihost->smu_registers->interrupt_mask); +} + +irqreturn_t isci_intx_isr(int vec, void *data) +{ + irqreturn_t ret = IRQ_NONE; + struct isci_host *ihost = data; + + if (sci_controller_isr(ihost)) { + writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); + tasklet_schedule(&ihost->completion_tasklet); + ret = IRQ_HANDLED; + } else if (sci_controller_error_isr(ihost)) { + spin_lock(&ihost->scic_lock); + sci_controller_error_handler(ihost); + spin_unlock(&ihost->scic_lock); + ret = IRQ_HANDLED; + } + + return ret; +} + +irqreturn_t isci_error_isr(int vec, void *data) +{ + struct isci_host *ihost = data; + + if (sci_controller_error_isr(ihost)) + sci_controller_error_handler(ihost); + + return IRQ_HANDLED; +} + +/** + * isci_host_start_complete() - This function is called by the core library, + * through the ISCI Module, to indicate controller start status. + * @isci_host: This parameter specifies the ISCI host object + * @completion_status: This parameter specifies the completion status from the + * core library. + * + */ +static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status) +{ + if (completion_status != SCI_SUCCESS) + dev_info(&ihost->pdev->dev, + "controller start timed out, continuing...\n"); + isci_host_change_state(ihost, isci_ready); + clear_bit(IHOST_START_PENDING, &ihost->flags); + wake_up(&ihost->eventq); +} + +int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; + + if (test_bit(IHOST_START_PENDING, &ihost->flags)) + return 0; + + /* todo: use sas_flush_discovery once it is upstream */ + scsi_flush_work(shost); + + scsi_flush_work(shost); + + dev_dbg(&ihost->pdev->dev, + "%s: ihost->status = %d, time = %ld\n", + __func__, isci_host_get_state(ihost), time); + + return 1; + +} + +/** + * sci_controller_get_suggested_start_timeout() - This method returns the + * suggested sci_controller_start() timeout amount. The user is free to + * use any timeout value, but this method provides the suggested minimum + * start timeout value. The returned value is based upon empirical + * information determined as a result of interoperability testing. + * @controller: the handle to the controller object for which to return the + * suggested start timeout. + * + * This method returns the number of milliseconds for the suggested start + * operation timeout. + */ +static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost) +{ + /* Validate the user supplied parameters. */ + if (!ihost) + return 0; + + /* + * The suggested minimum timeout value for a controller start operation: + * + * Signature FIS Timeout + * + Phy Start Timeout + * + Number of Phy Spin Up Intervals + * --------------------------------- + * Number of milliseconds for the controller start operation. + * + * NOTE: The number of phy spin up intervals will be equivalent + * to the number of phys divided by the number phys allowed + * per interval - 1 (once OEM parameters are supported). + * Currently we assume only 1 phy per interval. */ + + return SCIC_SDS_SIGNATURE_FIS_TIMEOUT + + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT + + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); +} + +static void sci_controller_enable_interrupts(struct isci_host *ihost) +{ + BUG_ON(ihost->smu_registers == NULL); + writel(0, &ihost->smu_registers->interrupt_mask); +} + +void sci_controller_disable_interrupts(struct isci_host *ihost) +{ + BUG_ON(ihost->smu_registers == NULL); + writel(0xffffffff, &ihost->smu_registers->interrupt_mask); +} + +static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) +{ + u32 port_task_scheduler_value; + + port_task_scheduler_value = + readl(&ihost->scu_registers->peg0.ptsg.control); + port_task_scheduler_value |= + (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) | + SCU_PTSGCR_GEN_BIT(PTSG_ENABLE)); + writel(port_task_scheduler_value, + &ihost->scu_registers->peg0.ptsg.control); +} + +static void sci_controller_assign_task_entries(struct isci_host *ihost) +{ + u32 task_assignment; + + /* + * Assign all the TCs to function 0 + * TODO: Do we actually need to read this register to write it back? + */ + + task_assignment = + readl(&ihost->smu_registers->task_context_assignment[0]); + + task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) | + (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) | + (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE)); + + writel(task_assignment, + &ihost->smu_registers->task_context_assignment[0]); + +} + +static void sci_controller_initialize_completion_queue(struct isci_host *ihost) +{ + u32 index; + u32 completion_queue_control_value; + u32 completion_queue_get_value; + u32 completion_queue_put_value; + + ihost->completion_queue_get = 0; + + completion_queue_control_value = + (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) | + SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1)); + + writel(completion_queue_control_value, + &ihost->smu_registers->completion_queue_control); + + + /* Set the completion queue get pointer and enable the queue */ + completion_queue_get_value = ( + (SMU_CQGR_GEN_VAL(POINTER, 0)) + | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0)) + | (SMU_CQGR_GEN_BIT(ENABLE)) + | (SMU_CQGR_GEN_BIT(EVENT_ENABLE)) + ); + + writel(completion_queue_get_value, + &ihost->smu_registers->completion_queue_get); + + /* Set the completion queue put pointer */ + completion_queue_put_value = ( + (SMU_CQPR_GEN_VAL(POINTER, 0)) + | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0)) + ); + + writel(completion_queue_put_value, + &ihost->smu_registers->completion_queue_put); + + /* Initialize the cycle bit of the completion queue entries */ + for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) { + /* + * If get.cycle_bit != completion_queue.cycle_bit + * its not a valid completion queue entry + * so at system start all entries are invalid */ + ihost->completion_queue[index] = 0x80000000; + } +} + +static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost) +{ + u32 frame_queue_control_value; + u32 frame_queue_get_value; + u32 frame_queue_put_value; + + /* Write the queue size */ + frame_queue_control_value = + SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES); + + writel(frame_queue_control_value, + &ihost->scu_registers->sdma.unsolicited_frame_queue_control); + + /* Setup the get pointer for the unsolicited frame queue */ + frame_queue_get_value = ( + SCU_UFQGP_GEN_VAL(POINTER, 0) + | SCU_UFQGP_GEN_BIT(ENABLE_BIT) + ); + + writel(frame_queue_get_value, + &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); + /* Setup the put pointer for the unsolicited frame queue */ + frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0); + writel(frame_queue_put_value, + &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); +} + +static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) +{ + if (ihost->sm.current_state_id == SCIC_STARTING) { + /* + * We move into the ready state, because some of the phys/ports + * may be up and operational. + */ + sci_change_state(&ihost->sm, SCIC_READY); + + isci_host_start_complete(ihost, status); + } +} + +static bool is_phy_starting(struct isci_phy *iphy) +{ + enum sci_phy_states state; + + state = iphy->sm.current_state_id; + switch (state) { + case SCI_PHY_STARTING: + case SCI_PHY_SUB_INITIAL: + case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: + case SCI_PHY_SUB_AWAIT_IAF_UF: + case SCI_PHY_SUB_AWAIT_SAS_POWER: + case SCI_PHY_SUB_AWAIT_SATA_POWER: + case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: + case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: + case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: + case SCI_PHY_SUB_FINAL: + return true; + default: + return false; + } +} + +/** + * sci_controller_start_next_phy - start phy + * @scic: controller + * + * If all the phys have been started, then attempt to transition the + * controller to the READY state and inform the user + * (sci_cb_controller_start_complete()). + */ +static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost) +{ + struct sci_oem_params *oem = &ihost->oem_parameters; + struct isci_phy *iphy; + enum sci_status status; + + status = SCI_SUCCESS; + + if (ihost->phy_startup_timer_pending) + return status; + + if (ihost->next_phy_to_start >= SCI_MAX_PHYS) { + bool is_controller_start_complete = true; + u32 state; + u8 index; + + for (index = 0; index < SCI_MAX_PHYS; index++) { + iphy = &ihost->phys[index]; + state = iphy->sm.current_state_id; + + if (!phy_get_non_dummy_port(iphy)) + continue; + + /* The controller start operation is complete iff: + * - all links have been given an opportunity to start + * - have no indication of a connected device + * - have an indication of a connected device and it has + * finished the link training process. + */ + if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || + (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || + (iphy->is_in_link_training == true && is_phy_starting(iphy))) { + is_controller_start_complete = false; + break; + } + } + + /* + * The controller has successfully finished the start process. + * Inform the SCI Core user and transition to the READY state. */ + if (is_controller_start_complete == true) { + sci_controller_transition_to_ready(ihost, SCI_SUCCESS); + sci_del_timer(&ihost->phy_timer); + ihost->phy_startup_timer_pending = false; + } + } else { + iphy = &ihost->phys[ihost->next_phy_to_start]; + + if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { + if (phy_get_non_dummy_port(iphy) == NULL) { + ihost->next_phy_to_start++; + + /* Caution recursion ahead be forwarned + * + * The PHY was never added to a PORT in MPC mode + * so start the next phy in sequence This phy + * will never go link up and will not draw power + * the OEM parameters either configured the phy + * incorrectly for the PORT or it was never + * assigned to a PORT + */ + return sci_controller_start_next_phy(ihost); + } + } + + status = sci_phy_start(iphy); + + if (status == SCI_SUCCESS) { + sci_mod_timer(&ihost->phy_timer, + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT); + ihost->phy_startup_timer_pending = true; + } else { + dev_warn(&ihost->pdev->dev, + "%s: Controller stop operation failed " + "to stop phy %d because of status " + "%d.\n", + __func__, + ihost->phys[ihost->next_phy_to_start].phy_index, + status); + } + + ihost->next_phy_to_start++; + } + + return status; +} + +static void phy_startup_timeout(unsigned long data) +{ + struct sci_timer *tmr = (struct sci_timer *)data; + struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer); + unsigned long flags; + enum sci_status status; + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (tmr->cancel) + goto done; + + ihost->phy_startup_timer_pending = false; + + do { + status = sci_controller_start_next_phy(ihost); + } while (status != SCI_SUCCESS); + +done: + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} + +static u16 isci_tci_active(struct isci_host *ihost) +{ + return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); +} + +static enum sci_status sci_controller_start(struct isci_host *ihost, + u32 timeout) +{ + enum sci_status result; + u16 index; + + if (ihost->sm.current_state_id != SCIC_INITIALIZED) { + dev_warn(&ihost->pdev->dev, + "SCIC Controller start operation requested in " + "invalid state\n"); + return SCI_FAILURE_INVALID_STATE; + } + + /* Build the TCi free pool */ + BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8); + ihost->tci_head = 0; + ihost->tci_tail = 0; + for (index = 0; index < ihost->task_context_entries; index++) + isci_tci_free(ihost, index); + + /* Build the RNi free pool */ + sci_remote_node_table_initialize(&ihost->available_remote_nodes, + ihost->remote_node_entries); + + /* + * Before anything else lets make sure we will not be + * interrupted by the hardware. + */ + sci_controller_disable_interrupts(ihost); + + /* Enable the port task scheduler */ + sci_controller_enable_port_task_scheduler(ihost); + + /* Assign all the task entries to ihost physical function */ + sci_controller_assign_task_entries(ihost); + + /* Now initialize the completion queue */ + sci_controller_initialize_completion_queue(ihost); + + /* Initialize the unsolicited frame queue for use */ + sci_controller_initialize_unsolicited_frame_queue(ihost); + + /* Start all of the ports on this controller */ + for (index = 0; index < ihost->logical_port_entries; index++) { + struct isci_port *iport = &ihost->ports[index]; + + result = sci_port_start(iport); + if (result) + return result; + } + + sci_controller_start_next_phy(ihost); + + sci_mod_timer(&ihost->timer, timeout); + + sci_change_state(&ihost->sm, SCIC_STARTING); + + return SCI_SUCCESS; +} + +void isci_host_scan_start(struct Scsi_Host *shost) +{ + struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; + unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost); + + set_bit(IHOST_START_PENDING, &ihost->flags); + + spin_lock_irq(&ihost->scic_lock); + sci_controller_start(ihost, tmo); + sci_controller_enable_interrupts(ihost); + spin_unlock_irq(&ihost->scic_lock); +} + +static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) +{ + isci_host_change_state(ihost, isci_stopped); + sci_controller_disable_interrupts(ihost); + clear_bit(IHOST_STOP_PENDING, &ihost->flags); + wake_up(&ihost->eventq); +} + +static void sci_controller_completion_handler(struct isci_host *ihost) +{ + /* Empty out the completion queue */ + if (sci_controller_completion_queue_has_entries(ihost)) + sci_controller_process_completions(ihost); + + /* Clear the interrupt and enable all interrupts again */ + writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); + /* Could we write the value of SMU_ISR_COMPLETION? */ + writel(0xFF000000, &ihost->smu_registers->interrupt_mask); + writel(0, &ihost->smu_registers->interrupt_mask); +} + +/** + * isci_host_completion_routine() - This function is the delayed service + * routine that calls the sci core library's completion handler. It's + * scheduled as a tasklet from the interrupt service routine when interrupts + * in use, or set as the timeout function in polled mode. + * @data: This parameter specifies the ISCI host object + * + */ +static void isci_host_completion_routine(unsigned long data) +{ + struct isci_host *ihost = (struct isci_host *)data; + struct list_head completed_request_list; + struct list_head errored_request_list; + struct list_head *current_position; + struct list_head *next_position; + struct isci_request *request; + struct isci_request *next_request; + struct sas_task *task; + + INIT_LIST_HEAD(&completed_request_list); + INIT_LIST_HEAD(&errored_request_list); + + spin_lock_irq(&ihost->scic_lock); + + sci_controller_completion_handler(ihost); + + /* Take the lists of completed I/Os from the host. */ + + list_splice_init(&ihost->requests_to_complete, + &completed_request_list); + + /* Take the list of errored I/Os from the host. */ + list_splice_init(&ihost->requests_to_errorback, + &errored_request_list); + + spin_unlock_irq(&ihost->scic_lock); + + /* Process any completions in the lists. */ + list_for_each_safe(current_position, next_position, + &completed_request_list) { + + request = list_entry(current_position, struct isci_request, + completed_node); + task = isci_request_access_task(request); + + /* Normal notification (task_done) */ + dev_dbg(&ihost->pdev->dev, + "%s: Normal - request/task = %p/%p\n", + __func__, + request, + task); + + /* Return the task to libsas */ + if (task != NULL) { + + task->lldd_task = NULL; + if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + + /* If the task is already in the abort path, + * the task_done callback cannot be called. + */ + task->task_done(task); + } + } + + spin_lock_irq(&ihost->scic_lock); + isci_free_tag(ihost, request->io_tag); + spin_unlock_irq(&ihost->scic_lock); + } + list_for_each_entry_safe(request, next_request, &errored_request_list, + completed_node) { + + task = isci_request_access_task(request); + + /* Use sas_task_abort */ + dev_warn(&ihost->pdev->dev, + "%s: Error - request/task = %p/%p\n", + __func__, + request, + task); + + if (task != NULL) { + + /* Put the task into the abort path if it's not there + * already. + */ + if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) + sas_task_abort(task); + + } else { + /* This is a case where the request has completed with a + * status such that it needed further target servicing, + * but the sas_task reference has already been removed + * from the request. Since it was errored, it was not + * being aborted, so there is nothing to do except free + * it. + */ + + spin_lock_irq(&ihost->scic_lock); + /* Remove the request from the remote device's list + * of pending requests. + */ + list_del_init(&request->dev_node); + isci_free_tag(ihost, request->io_tag); + spin_unlock_irq(&ihost->scic_lock); + } + } + +} + +/** + * sci_controller_stop() - This method will stop an individual controller + * object.This method will invoke the associated user callback upon + * completion. The completion callback is called when the following + * conditions are met: -# the method return status is SCI_SUCCESS. -# the + * controller has been quiesced. This method will ensure that all IO + * requests are quiesced, phys are stopped, and all additional operation by + * the hardware is halted. + * @controller: the handle to the controller object to stop. + * @timeout: This parameter specifies the number of milliseconds in which the + * stop operation should complete. + * + * The controller must be in the STARTED or STOPPED state. Indicate if the + * controller stop method succeeded or failed in some way. SCI_SUCCESS if the + * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the + * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the + * controller is not either in the STARTED or STOPPED states. + */ +static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) +{ + if (ihost->sm.current_state_id != SCIC_READY) { + dev_warn(&ihost->pdev->dev, + "SCIC Controller stop operation requested in " + "invalid state\n"); + return SCI_FAILURE_INVALID_STATE; + } + + sci_mod_timer(&ihost->timer, timeout); + sci_change_state(&ihost->sm, SCIC_STOPPING); + return SCI_SUCCESS; +} + +/** + * sci_controller_reset() - This method will reset the supplied core + * controller regardless of the state of said controller. This operation is + * considered destructive. In other words, all current operations are wiped + * out. No IO completions for outstanding devices occur. Outstanding IO + * requests are not aborted or completed at the actual remote device. + * @controller: the handle to the controller object to reset. + * + * Indicate if the controller reset method succeeded or failed in some way. + * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if + * the controller reset operation is unable to complete. + */ +static enum sci_status sci_controller_reset(struct isci_host *ihost) +{ + switch (ihost->sm.current_state_id) { + case SCIC_RESET: + case SCIC_READY: + case SCIC_STOPPED: + case SCIC_FAILED: + /* + * The reset operation is not a graceful cleanup, just + * perform the state transition. + */ + sci_change_state(&ihost->sm, SCIC_RESETTING); + return SCI_SUCCESS; + default: + dev_warn(&ihost->pdev->dev, + "SCIC Controller reset operation requested in " + "invalid state\n"); + return SCI_FAILURE_INVALID_STATE; + } +} + +void isci_host_deinit(struct isci_host *ihost) +{ + int i; + + isci_host_change_state(ihost, isci_stopping); + for (i = 0; i < SCI_MAX_PORTS; i++) { + struct isci_port *iport = &ihost->ports[i]; + struct isci_remote_device *idev, *d; + + list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) { + if (test_bit(IDEV_ALLOCATED, &idev->flags)) + isci_remote_device_stop(ihost, idev); + } + } + + set_bit(IHOST_STOP_PENDING, &ihost->flags); + + spin_lock_irq(&ihost->scic_lock); + sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT); + spin_unlock_irq(&ihost->scic_lock); + + wait_for_stop(ihost); + sci_controller_reset(ihost); + + /* Cancel any/all outstanding port timers */ + for (i = 0; i < ihost->logical_port_entries; i++) { + struct isci_port *iport = &ihost->ports[i]; + del_timer_sync(&iport->timer.timer); + } + + /* Cancel any/all outstanding phy timers */ + for (i = 0; i < SCI_MAX_PHYS; i++) { + struct isci_phy *iphy = &ihost->phys[i]; + del_timer_sync(&iphy->sata_timer.timer); + } + + del_timer_sync(&ihost->port_agent.timer.timer); + + del_timer_sync(&ihost->power_control.timer.timer); + + del_timer_sync(&ihost->timer.timer); + + del_timer_sync(&ihost->phy_timer.timer); +} + +static void __iomem *scu_base(struct isci_host *isci_host) +{ + struct pci_dev *pdev = isci_host->pdev; + int id = isci_host->id; + + return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id; +} + +static void __iomem *smu_base(struct isci_host *isci_host) +{ + struct pci_dev *pdev = isci_host->pdev; + int id = isci_host->id; + + return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; +} + +static void isci_user_parameters_get(struct sci_user_parameters *u) +{ + int i; + + for (i = 0; i < SCI_MAX_PHYS; i++) { + struct sci_phy_user_params *u_phy = &u->phys[i]; + + u_phy->max_speed_generation = phy_gen; + + /* we are not exporting these for now */ + u_phy->align_insertion_frequency = 0x7f; + u_phy->in_connection_align_insertion_frequency = 0xff; + u_phy->notify_enable_spin_up_insertion_frequency = 0x33; + } + + u->stp_inactivity_timeout = stp_inactive_to; + u->ssp_inactivity_timeout = ssp_inactive_to; + u->stp_max_occupancy_timeout = stp_max_occ_to; + u->ssp_max_occupancy_timeout = ssp_max_occ_to; + u->no_outbound_task_timeout = no_outbound_task_to; + u->max_number_concurrent_device_spin_up = max_concurr_spinup; +} + +static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); + + sci_change_state(&ihost->sm, SCIC_RESET); +} + +static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); + + sci_del_timer(&ihost->timer); +} + +#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853 +#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280 +#define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000 +#define INTERRUPT_COALESCE_NUMBER_MAX 256 +#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7 +#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28 + +/** + * sci_controller_set_interrupt_coalescence() - This method allows the user to + * configure the interrupt coalescence. + * @controller: This parameter represents the handle to the controller object + * for which its interrupt coalesce register is overridden. + * @coalesce_number: Used to control the number of entries in the Completion + * Queue before an interrupt is generated. If the number of entries exceed + * this number, an interrupt will be generated. The valid range of the input + * is [0, 256]. A setting of 0 results in coalescing being disabled. + * @coalesce_timeout: Timeout value in microseconds. The valid range of the + * input is [0, 2700000] . A setting of 0 is allowed and results in no + * interrupt coalescing timeout. + * + * Indicate if the user successfully set the interrupt coalesce parameters. + * SCI_SUCCESS The user successfully updated the interrutp coalescence. + * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. + */ +static enum sci_status +sci_controller_set_interrupt_coalescence(struct isci_host *ihost, + u32 coalesce_number, + u32 coalesce_timeout) +{ + u8 timeout_encode = 0; + u32 min = 0; + u32 max = 0; + + /* Check if the input parameters fall in the range. */ + if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX) + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + + /* + * Defined encoding for interrupt coalescing timeout: + * Value Min Max Units + * ----- --- --- ----- + * 0 - - Disabled + * 1 13.3 20.0 ns + * 2 26.7 40.0 + * 3 53.3 80.0 + * 4 106.7 160.0 + * 5 213.3 320.0 + * 6 426.7 640.0 + * 7 853.3 1280.0 + * 8 1.7 2.6 us + * 9 3.4 5.1 + * 10 6.8 10.2 + * 11 13.7 20.5 + * 12 27.3 41.0 + * 13 54.6 81.9 + * 14 109.2 163.8 + * 15 218.5 327.7 + * 16 436.9 655.4 + * 17 873.8 1310.7 + * 18 1.7 2.6 ms + * 19 3.5 5.2 + * 20 7.0 10.5 + * 21 14.0 21.0 + * 22 28.0 41.9 + * 23 55.9 83.9 + * 24 111.8 167.8 + * 25 223.7 335.5 + * 26 447.4 671.1 + * 27 894.8 1342.2 + * 28 1.8 2.7 s + * Others Undefined */ + + /* + * Use the table above to decide the encode of interrupt coalescing timeout + * value for register writing. */ + if (coalesce_timeout == 0) + timeout_encode = 0; + else{ + /* make the timeout value in unit of (10 ns). */ + coalesce_timeout = coalesce_timeout * 100; + min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10; + max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10; + + /* get the encode of timeout for register writing. */ + for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN; + timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX; + timeout_encode++) { + if (min <= coalesce_timeout && max > coalesce_timeout) + break; + else if (coalesce_timeout >= max && coalesce_timeout < min * 2 + && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) { + if ((coalesce_timeout - max) < (2 * min - coalesce_timeout)) + break; + else{ + timeout_encode++; + break; + } + } else { + max = max * 2; + min = min * 2; + } + } + + if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1) + /* the value is out of range. */ + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + } + + writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) | + SMU_ICC_GEN_VAL(TIMER, timeout_encode), + &ihost->smu_registers->interrupt_coalesce_control); + + + ihost->interrupt_coalesce_number = (u16)coalesce_number; + ihost->interrupt_coalesce_timeout = coalesce_timeout / 100; + + return SCI_SUCCESS; +} + + +static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); + + /* set the default interrupt coalescence number and timeout value. */ + sci_controller_set_interrupt_coalescence(ihost, 0x10, 250); +} + +static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); + + /* disable interrupt coalescence. */ + sci_controller_set_interrupt_coalescence(ihost, 0, 0); +} + +static enum sci_status sci_controller_stop_phys(struct isci_host *ihost) +{ + u32 index; + enum sci_status status; + enum sci_status phy_status; + + status = SCI_SUCCESS; + + for (index = 0; index < SCI_MAX_PHYS; index++) { + phy_status = sci_phy_stop(&ihost->phys[index]); + + if (phy_status != SCI_SUCCESS && + phy_status != SCI_FAILURE_INVALID_STATE) { + status = SCI_FAILURE; + + dev_warn(&ihost->pdev->dev, + "%s: Controller stop operation failed to stop " + "phy %d because of status %d.\n", + __func__, + ihost->phys[index].phy_index, phy_status); + } + } + + return status; +} + +static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) +{ + u32 index; + enum sci_status port_status; + enum sci_status status = SCI_SUCCESS; + + for (index = 0; index < ihost->logical_port_entries; index++) { + struct isci_port *iport = &ihost->ports[index]; + + port_status = sci_port_stop(iport); + + if ((port_status != SCI_SUCCESS) && + (port_status != SCI_FAILURE_INVALID_STATE)) { + status = SCI_FAILURE; + + dev_warn(&ihost->pdev->dev, + "%s: Controller stop operation failed to " + "stop port %d because of status %d.\n", + __func__, + iport->logical_port_index, + port_status); + } + } + + return status; +} + +static enum sci_status sci_controller_stop_devices(struct isci_host *ihost) +{ + u32 index; + enum sci_status status; + enum sci_status device_status; + + status = SCI_SUCCESS; + + for (index = 0; index < ihost->remote_node_entries; index++) { + if (ihost->device_table[index] != NULL) { + /* / @todo What timeout value do we want to provide to this request? */ + device_status = sci_remote_device_stop(ihost->device_table[index], 0); + + if ((device_status != SCI_SUCCESS) && + (device_status != SCI_FAILURE_INVALID_STATE)) { + dev_warn(&ihost->pdev->dev, + "%s: Controller stop operation failed " + "to stop device 0x%p because of " + "status %d.\n", + __func__, + ihost->device_table[index], device_status); + } + } + } + + return status; +} + +static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); + + /* Stop all of the components for this controller */ + sci_controller_stop_phys(ihost); + sci_controller_stop_ports(ihost); + sci_controller_stop_devices(ihost); +} + +static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); + + sci_del_timer(&ihost->timer); +} + +static void sci_controller_reset_hardware(struct isci_host *ihost) +{ + /* Disable interrupts so we dont take any spurious interrupts */ + sci_controller_disable_interrupts(ihost); + + /* Reset the SCU */ + writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control); + + /* Delay for 1ms to before clearing the CQP and UFQPR. */ + udelay(1000); + + /* The write to the CQGR clears the CQP */ + writel(0x00000000, &ihost->smu_registers->completion_queue_get); + + /* The write to the UFQGP clears the UFQPR */ + writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); +} + +static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); + + sci_controller_reset_hardware(ihost); + sci_change_state(&ihost->sm, SCIC_RESET); +} + +static const struct sci_base_state sci_controller_state_table[] = { + [SCIC_INITIAL] = { + .enter_state = sci_controller_initial_state_enter, + }, + [SCIC_RESET] = {}, + [SCIC_INITIALIZING] = {}, + [SCIC_INITIALIZED] = {}, + [SCIC_STARTING] = { + .exit_state = sci_controller_starting_state_exit, + }, + [SCIC_READY] = { + .enter_state = sci_controller_ready_state_enter, + .exit_state = sci_controller_ready_state_exit, + }, + [SCIC_RESETTING] = { + .enter_state = sci_controller_resetting_state_enter, + }, + [SCIC_STOPPING] = { + .enter_state = sci_controller_stopping_state_enter, + .exit_state = sci_controller_stopping_state_exit, + }, + [SCIC_STOPPED] = {}, + [SCIC_FAILED] = {} +}; + +static void sci_controller_set_default_config_parameters(struct isci_host *ihost) +{ + /* these defaults are overridden by the platform / firmware */ + u16 index; + + /* Default to APC mode. */ + ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; + + /* Default to APC mode. */ + ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1; + + /* Default to no SSC operation. */ + ihost->oem_parameters.controller.do_enable_ssc = false; + + /* Initialize all of the port parameter information to narrow ports. */ + for (index = 0; index < SCI_MAX_PORTS; index++) { + ihost->oem_parameters.ports[index].phy_mask = 0; + } + + /* Initialize all of the phy parameter information. */ + for (index = 0; index < SCI_MAX_PHYS; index++) { + /* Default to 6G (i.e. Gen 3) for now. */ + ihost->user_parameters.phys[index].max_speed_generation = 3; + + /* the frequencies cannot be 0 */ + ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f; + ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff; + ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; + + /* + * Previous Vitesse based expanders had a arbitration issue that + * is worked around by having the upper 32-bits of SAS address + * with a value greater then the Vitesse company identifier. + * Hence, usage of 0x5FCFFFFF. */ + ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id; + ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF; + } + + ihost->user_parameters.stp_inactivity_timeout = 5; + ihost->user_parameters.ssp_inactivity_timeout = 5; + ihost->user_parameters.stp_max_occupancy_timeout = 5; + ihost->user_parameters.ssp_max_occupancy_timeout = 20; + ihost->user_parameters.no_outbound_task_timeout = 20; +} + +static void controller_timeout(unsigned long data) +{ + struct sci_timer *tmr = (struct sci_timer *)data; + struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer); + struct sci_base_state_machine *sm = &ihost->sm; + unsigned long flags; + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (tmr->cancel) + goto done; + + if (sm->current_state_id == SCIC_STARTING) + sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); + else if (sm->current_state_id == SCIC_STOPPING) { + sci_change_state(sm, SCIC_FAILED); + isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); + } else /* / @todo Now what do we want to do in this case? */ + dev_err(&ihost->pdev->dev, + "%s: Controller timer fired when controller was not " + "in a state being timed.\n", + __func__); + +done: + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} + +static enum sci_status sci_controller_construct(struct isci_host *ihost, + void __iomem *scu_base, + void __iomem *smu_base) +{ + u8 i; + + sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL); + + ihost->scu_registers = scu_base; + ihost->smu_registers = smu_base; + + sci_port_configuration_agent_construct(&ihost->port_agent); + + /* Construct the ports for this controller */ + for (i = 0; i < SCI_MAX_PORTS; i++) + sci_port_construct(&ihost->ports[i], i, ihost); + sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost); + + /* Construct the phys for this controller */ + for (i = 0; i < SCI_MAX_PHYS; i++) { + /* Add all the PHYs to the dummy port */ + sci_phy_construct(&ihost->phys[i], + &ihost->ports[SCI_MAX_PORTS], i); + } + + ihost->invalid_phy_mask = 0; + + sci_init_timer(&ihost->timer, controller_timeout); + + /* Initialize the User and OEM parameters to default values. */ + sci_controller_set_default_config_parameters(ihost); + + return sci_controller_reset(ihost); +} + +int sci_oem_parameters_validate(struct sci_oem_params *oem) +{ + int i; + + for (i = 0; i < SCI_MAX_PORTS; i++) + if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX) + return -EINVAL; + + for (i = 0; i < SCI_MAX_PHYS; i++) + if (oem->phys[i].sas_address.high == 0 && + oem->phys[i].sas_address.low == 0) + return -EINVAL; + + if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) { + for (i = 0; i < SCI_MAX_PHYS; i++) + if (oem->ports[i].phy_mask != 0) + return -EINVAL; + } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { + u8 phy_mask = 0; + + for (i = 0; i < SCI_MAX_PHYS; i++) + phy_mask |= oem->ports[i].phy_mask; + + if (phy_mask == 0) + return -EINVAL; + } else + return -EINVAL; + + if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT) + return -EINVAL; + + return 0; +} + +static enum sci_status sci_oem_parameters_set(struct isci_host *ihost) +{ + u32 state = ihost->sm.current_state_id; + + if (state == SCIC_RESET || + state == SCIC_INITIALIZING || + state == SCIC_INITIALIZED) { + + if (sci_oem_parameters_validate(&ihost->oem_parameters)) + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + + return SCI_SUCCESS; + } + + return SCI_FAILURE_INVALID_STATE; +} + +static void power_control_timeout(unsigned long data) +{ + struct sci_timer *tmr = (struct sci_timer *)data; + struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer); + struct isci_phy *iphy; + unsigned long flags; + u8 i; + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (tmr->cancel) + goto done; + + ihost->power_control.phys_granted_power = 0; + + if (ihost->power_control.phys_waiting == 0) { + ihost->power_control.timer_started = false; + goto done; + } + + for (i = 0; i < SCI_MAX_PHYS; i++) { + + if (ihost->power_control.phys_waiting == 0) + break; + + iphy = ihost->power_control.requesters[i]; + if (iphy == NULL) + continue; + + if (ihost->power_control.phys_granted_power >= + ihost->oem_parameters.controller.max_concurrent_dev_spin_up) + break; + + ihost->power_control.requesters[i] = NULL; + ihost->power_control.phys_waiting--; + ihost->power_control.phys_granted_power++; + sci_phy_consume_power_handler(iphy); + } + + /* + * It doesn't matter if the power list is empty, we need to start the + * timer in case another phy becomes ready. + */ + sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); + ihost->power_control.timer_started = true; + +done: + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} + +void sci_controller_power_control_queue_insert(struct isci_host *ihost, + struct isci_phy *iphy) +{ + BUG_ON(iphy == NULL); + + if (ihost->power_control.phys_granted_power < + ihost->oem_parameters.controller.max_concurrent_dev_spin_up) { + ihost->power_control.phys_granted_power++; + sci_phy_consume_power_handler(iphy); + + /* + * stop and start the power_control timer. When the timer fires, the + * no_of_phys_granted_power will be set to 0 + */ + if (ihost->power_control.timer_started) + sci_del_timer(&ihost->power_control.timer); + + sci_mod_timer(&ihost->power_control.timer, + SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); + ihost->power_control.timer_started = true; + + } else { + /* Add the phy in the waiting list */ + ihost->power_control.requesters[iphy->phy_index] = iphy; + ihost->power_control.phys_waiting++; + } +} + +void sci_controller_power_control_queue_remove(struct isci_host *ihost, + struct isci_phy *iphy) +{ + BUG_ON(iphy == NULL); + + if (ihost->power_control.requesters[iphy->phy_index]) + ihost->power_control.phys_waiting--; + + ihost->power_control.requesters[iphy->phy_index] = NULL; +} + +#define AFE_REGISTER_WRITE_DELAY 10 + +/* Initialize the AFE for this phy index. We need to read the AFE setup from + * the OEM parameters + */ +static void sci_controller_afe_initialization(struct isci_host *ihost) +{ + const struct sci_oem_params *oem = &ihost->oem_parameters; + struct pci_dev *pdev = ihost->pdev; + u32 afe_status; + u32 phy_id; + + /* Clear DFX Status registers */ + writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + + if (is_b0(pdev)) { + /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement + * Timer, PM Stagger Timer */ + writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2); + udelay(AFE_REGISTER_WRITE_DELAY); + } + + /* Configure bias currents to normal */ + if (is_a2(pdev)) + writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control); + else if (is_b0(pdev) || is_c0(pdev)) + writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control); + + udelay(AFE_REGISTER_WRITE_DELAY); + + /* Enable PLL */ + if (is_b0(pdev) || is_c0(pdev)) + writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0); + else + writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0); + + udelay(AFE_REGISTER_WRITE_DELAY); + + /* Wait for the PLL to lock */ + do { + afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status); + udelay(AFE_REGISTER_WRITE_DELAY); + } while ((afe_status & 0x00001000) == 0); + + if (is_a2(pdev)) { + /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */ + writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + } + + for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { + const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; + + if (is_b0(pdev)) { + /* Configure transmitter SSC parameters */ + writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); + udelay(AFE_REGISTER_WRITE_DELAY); + } else if (is_c0(pdev)) { + /* Configure transmitter SSC parameters */ + writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); + udelay(AFE_REGISTER_WRITE_DELAY); + + /* + * All defaults, except the Receive Word Alignament/Comma Detect + * Enable....(0xe800) */ + writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + } else { + /* + * All defaults, except the Receive Word Alignament/Comma Detect + * Enable....(0xe800) */ + writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1); + udelay(AFE_REGISTER_WRITE_DELAY); + } + + /* + * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) + * & increase TX int & ext bias 20%....(0xe85c) */ + if (is_a2(pdev)) + writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); + else if (is_b0(pdev)) { + /* Power down TX and RX (PWRDNTX and PWRDNRX) */ + writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); + udelay(AFE_REGISTER_WRITE_DELAY); + + /* + * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) + * & increase TX int & ext bias 20%....(0xe85c) */ + writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); + } else { + writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); + udelay(AFE_REGISTER_WRITE_DELAY); + + /* + * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) + * & increase TX int & ext bias 20%....(0xe85c) */ + writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); + } + udelay(AFE_REGISTER_WRITE_DELAY); + + if (is_a2(pdev)) { + /* Enable TX equalization (0xe824) */ + writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); + udelay(AFE_REGISTER_WRITE_DELAY); + } + + /* + * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On), + * RDD=0x0(RX Detect Enabled) ....(0xe800) */ + writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + + /* Leave DFE/FFE on */ + if (is_a2(pdev)) + writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); + else if (is_b0(pdev)) { + writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + /* Enable TX equalization (0xe824) */ + writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); + } else { + writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + + /* Enable TX equalization (0xe824) */ + writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); + } + + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(oem_phy->afe_tx_amp_control0, + &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(oem_phy->afe_tx_amp_control1, + &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(oem_phy->afe_tx_amp_control2, + &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(oem_phy->afe_tx_amp_control3, + &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3); + udelay(AFE_REGISTER_WRITE_DELAY); + } + + /* Transfer control to the PEs */ + writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0); + udelay(AFE_REGISTER_WRITE_DELAY); +} + +static void sci_controller_initialize_power_control(struct isci_host *ihost) +{ + sci_init_timer(&ihost->power_control.timer, power_control_timeout); + + memset(ihost->power_control.requesters, 0, + sizeof(ihost->power_control.requesters)); + + ihost->power_control.phys_waiting = 0; + ihost->power_control.phys_granted_power = 0; +} + +static enum sci_status sci_controller_initialize(struct isci_host *ihost) +{ + struct sci_base_state_machine *sm = &ihost->sm; + enum sci_status result = SCI_FAILURE; + unsigned long i, state, val; + + if (ihost->sm.current_state_id != SCIC_RESET) { + dev_warn(&ihost->pdev->dev, + "SCIC Controller initialize operation requested " + "in invalid state\n"); + return SCI_FAILURE_INVALID_STATE; + } + + sci_change_state(sm, SCIC_INITIALIZING); + + sci_init_timer(&ihost->phy_timer, phy_startup_timeout); + + ihost->next_phy_to_start = 0; + ihost->phy_startup_timer_pending = false; + + sci_controller_initialize_power_control(ihost); + + /* + * There is nothing to do here for B0 since we do not have to + * program the AFE registers. + * / @todo The AFE settings are supposed to be correct for the B0 but + * / presently they seem to be wrong. */ + sci_controller_afe_initialization(ihost); + + + /* Take the hardware out of reset */ + writel(0, &ihost->smu_registers->soft_reset_control); + + /* + * / @todo Provide meaningfull error code for hardware failure + * result = SCI_FAILURE_CONTROLLER_HARDWARE; */ + for (i = 100; i >= 1; i--) { + u32 status; + + /* Loop until the hardware reports success */ + udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME); + status = readl(&ihost->smu_registers->control_status); + + if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED) + break; + } + if (i == 0) + goto out; + + /* + * Determine what are the actaul device capacities that the + * hardware will support */ + val = readl(&ihost->smu_registers->device_context_capacity); + + /* Record the smaller of the two capacity values */ + ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS); + ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS); + ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES); + + /* + * Make all PEs that are unassigned match up with the + * logical ports + */ + for (i = 0; i < ihost->logical_port_entries; i++) { + struct scu_port_task_scheduler_group_registers __iomem + *ptsg = &ihost->scu_registers->peg0.ptsg; + + writel(i, &ptsg->protocol_engine[i]); + } + + /* Initialize hardware PCI Relaxed ordering in DMA engines */ + val = readl(&ihost->scu_registers->sdma.pdma_configuration); + val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); + writel(val, &ihost->scu_registers->sdma.pdma_configuration); + + val = readl(&ihost->scu_registers->sdma.cdma_configuration); + val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); + writel(val, &ihost->scu_registers->sdma.cdma_configuration); + + /* + * Initialize the PHYs before the PORTs because the PHY registers + * are accessed during the port initialization. + */ + for (i = 0; i < SCI_MAX_PHYS; i++) { + result = sci_phy_initialize(&ihost->phys[i], + &ihost->scu_registers->peg0.pe[i].tl, + &ihost->scu_registers->peg0.pe[i].ll); + if (result != SCI_SUCCESS) + goto out; + } + + for (i = 0; i < ihost->logical_port_entries; i++) { + struct isci_port *iport = &ihost->ports[i]; + + iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i]; + iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0]; + iport->viit_registers = &ihost->scu_registers->peg0.viit[i]; + } + + result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent); + + out: + /* Advance the controller state machine */ + if (result == SCI_SUCCESS) + state = SCIC_INITIALIZED; + else + state = SCIC_FAILED; + sci_change_state(sm, state); + + return result; +} + +static enum sci_status sci_user_parameters_set(struct isci_host *ihost, + struct sci_user_parameters *sci_parms) +{ + u32 state = ihost->sm.current_state_id; + + if (state == SCIC_RESET || + state == SCIC_INITIALIZING || + state == SCIC_INITIALIZED) { + u16 index; + + /* + * Validate the user parameters. If they are not legal, then + * return a failure. + */ + for (index = 0; index < SCI_MAX_PHYS; index++) { + struct sci_phy_user_params *user_phy; + + user_phy = &sci_parms->phys[index]; + + if (!((user_phy->max_speed_generation <= + SCIC_SDS_PARM_MAX_SPEED) && + (user_phy->max_speed_generation > + SCIC_SDS_PARM_NO_SPEED))) + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + + if (user_phy->in_connection_align_insertion_frequency < + 3) + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + + if ((user_phy->in_connection_align_insertion_frequency < + 3) || + (user_phy->align_insertion_frequency == 0) || + (user_phy-> + notify_enable_spin_up_insertion_frequency == + 0)) + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + } + + if ((sci_parms->stp_inactivity_timeout == 0) || + (sci_parms->ssp_inactivity_timeout == 0) || + (sci_parms->stp_max_occupancy_timeout == 0) || + (sci_parms->ssp_max_occupancy_timeout == 0) || + (sci_parms->no_outbound_task_timeout == 0)) + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + + memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms)); + + return SCI_SUCCESS; + } + + return SCI_FAILURE_INVALID_STATE; +} + +static int sci_controller_mem_init(struct isci_host *ihost) +{ + struct device *dev = &ihost->pdev->dev; + dma_addr_t dma; + size_t size; + int err; + + size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); + ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); + if (!ihost->completion_queue) + return -ENOMEM; + + writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower); + writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper); + + size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); + ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, + GFP_KERNEL); + if (!ihost->remote_node_context_table) + return -ENOMEM; + + writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower); + writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper); + + size = ihost->task_context_entries * sizeof(struct scu_task_context), + ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); + if (!ihost->task_context_table) + return -ENOMEM; + + ihost->task_context_dma = dma; + writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); + writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); + + err = sci_unsolicited_frame_control_construct(ihost); + if (err) + return err; + + /* + * Inform the silicon as to the location of the UF headers and + * address table. + */ + writel(lower_32_bits(ihost->uf_control.headers.physical_address), + &ihost->scu_registers->sdma.uf_header_base_address_lower); + writel(upper_32_bits(ihost->uf_control.headers.physical_address), + &ihost->scu_registers->sdma.uf_header_base_address_upper); + + writel(lower_32_bits(ihost->uf_control.address_table.physical_address), + &ihost->scu_registers->sdma.uf_address_table_lower); + writel(upper_32_bits(ihost->uf_control.address_table.physical_address), + &ihost->scu_registers->sdma.uf_address_table_upper); + + return 0; +} + +int isci_host_init(struct isci_host *ihost) +{ + int err = 0, i; + enum sci_status status; + struct sci_user_parameters sci_user_params; + struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); + + spin_lock_init(&ihost->state_lock); + spin_lock_init(&ihost->scic_lock); + init_waitqueue_head(&ihost->eventq); + + isci_host_change_state(ihost, isci_starting); + + status = sci_controller_construct(ihost, scu_base(ihost), + smu_base(ihost)); + + if (status != SCI_SUCCESS) { + dev_err(&ihost->pdev->dev, + "%s: sci_controller_construct failed - status = %x\n", + __func__, + status); + return -ENODEV; + } + + ihost->sas_ha.dev = &ihost->pdev->dev; + ihost->sas_ha.lldd_ha = ihost; + + /* + * grab initial values stored in the controller object for OEM and USER + * parameters + */ + isci_user_parameters_get(&sci_user_params); + status = sci_user_parameters_set(ihost, &sci_user_params); + if (status != SCI_SUCCESS) { + dev_warn(&ihost->pdev->dev, + "%s: sci_user_parameters_set failed\n", + __func__); + return -ENODEV; + } + + /* grab any OEM parameters specified in orom */ + if (pci_info->orom) { + status = isci_parse_oem_parameters(&ihost->oem_parameters, + pci_info->orom, + ihost->id); + if (status != SCI_SUCCESS) { + dev_warn(&ihost->pdev->dev, + "parsing firmware oem parameters failed\n"); + return -EINVAL; + } + } + + status = sci_oem_parameters_set(ihost); + if (status != SCI_SUCCESS) { + dev_warn(&ihost->pdev->dev, + "%s: sci_oem_parameters_set failed\n", + __func__); + return -ENODEV; + } + + tasklet_init(&ihost->completion_tasklet, + isci_host_completion_routine, (unsigned long)ihost); + + INIT_LIST_HEAD(&ihost->requests_to_complete); + INIT_LIST_HEAD(&ihost->requests_to_errorback); + + spin_lock_irq(&ihost->scic_lock); + status = sci_controller_initialize(ihost); + spin_unlock_irq(&ihost->scic_lock); + if (status != SCI_SUCCESS) { + dev_warn(&ihost->pdev->dev, + "%s: sci_controller_initialize failed -" + " status = 0x%x\n", + __func__, status); + return -ENODEV; + } + + err = sci_controller_mem_init(ihost); + if (err) + return err; + + for (i = 0; i < SCI_MAX_PORTS; i++) + isci_port_init(&ihost->ports[i], ihost, i); + + for (i = 0; i < SCI_MAX_PHYS; i++) + isci_phy_init(&ihost->phys[i], ihost, i); + + for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { + struct isci_remote_device *idev = &ihost->devices[i]; + + INIT_LIST_HEAD(&idev->reqs_in_process); + INIT_LIST_HEAD(&idev->node); + } + + for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { + struct isci_request *ireq; + dma_addr_t dma; + + ireq = dmam_alloc_coherent(&ihost->pdev->dev, + sizeof(struct isci_request), &dma, + GFP_KERNEL); + if (!ireq) + return -ENOMEM; + + ireq->tc = &ihost->task_context_table[i]; + ireq->owning_controller = ihost; + spin_lock_init(&ireq->state_lock); + ireq->request_daddr = dma; + ireq->isci_host = ihost; + ihost->reqs[i] = ireq; + } + + return 0; +} + +void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy) +{ + switch (ihost->sm.current_state_id) { + case SCIC_STARTING: + sci_del_timer(&ihost->phy_timer); + ihost->phy_startup_timer_pending = false; + ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, + iport, iphy); + sci_controller_start_next_phy(ihost); + break; + case SCIC_READY: + ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, + iport, iphy); + break; + default: + dev_dbg(&ihost->pdev->dev, + "%s: SCIC Controller linkup event from phy %d in " + "unexpected state %d\n", __func__, iphy->phy_index, + ihost->sm.current_state_id); + } +} + +void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy) +{ + switch (ihost->sm.current_state_id) { + case SCIC_STARTING: + case SCIC_READY: + ihost->port_agent.link_down_handler(ihost, &ihost->port_agent, + iport, iphy); + break; + default: + dev_dbg(&ihost->pdev->dev, + "%s: SCIC Controller linkdown event from phy %d in " + "unexpected state %d\n", + __func__, + iphy->phy_index, + ihost->sm.current_state_id); + } +} + +static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) +{ + u32 index; + + for (index = 0; index < ihost->remote_node_entries; index++) { + if ((ihost->device_table[index] != NULL) && + (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING)) + return true; + } + + return false; +} + +void sci_controller_remote_device_stopped(struct isci_host *ihost, + struct isci_remote_device *idev) +{ + if (ihost->sm.current_state_id != SCIC_STOPPING) { + dev_dbg(&ihost->pdev->dev, + "SCIC Controller 0x%p remote device stopped event " + "from device 0x%p in unexpected state %d\n", + ihost, idev, + ihost->sm.current_state_id); + return; + } + + if (!sci_controller_has_remote_devices_stopping(ihost)) + sci_change_state(&ihost->sm, SCIC_STOPPED); +} + +void sci_controller_post_request(struct isci_host *ihost, u32 request) +{ + dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n", + __func__, ihost->id, request); + + writel(request, &ihost->smu_registers->post_context_port); +} + +struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag) +{ + u16 task_index; + u16 task_sequence; + + task_index = ISCI_TAG_TCI(io_tag); + + if (task_index < ihost->task_context_entries) { + struct isci_request *ireq = ihost->reqs[task_index]; + + if (test_bit(IREQ_ACTIVE, &ireq->flags)) { + task_sequence = ISCI_TAG_SEQ(io_tag); + + if (task_sequence == ihost->io_request_sequence[task_index]) + return ireq; + } + } + + return NULL; +} + +/** + * This method allocates remote node index and the reserves the remote node + * context space for use. This method can fail if there are no more remote + * node index available. + * @scic: This is the controller object which contains the set of + * free remote node ids + * @sci_dev: This is the device object which is requesting the a remote node + * id + * @node_id: This is the remote node id that is assinged to the device if one + * is available + * + * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote + * node index available. + */ +enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 *node_id) +{ + u16 node_index; + u32 remote_node_count = sci_remote_device_node_count(idev); + + node_index = sci_remote_node_table_allocate_remote_node( + &ihost->available_remote_nodes, remote_node_count + ); + + if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { + ihost->device_table[node_index] = idev; + + *node_id = node_index; + + return SCI_SUCCESS; + } + + return SCI_FAILURE_INSUFFICIENT_RESOURCES; +} + +void sci_controller_free_remote_node_context(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 node_id) +{ + u32 remote_node_count = sci_remote_device_node_count(idev); + + if (ihost->device_table[node_id] == idev) { + ihost->device_table[node_id] = NULL; + + sci_remote_node_table_release_remote_node_index( + &ihost->available_remote_nodes, remote_node_count, node_id + ); + } +} + +void sci_controller_copy_sata_response(void *response_buffer, + void *frame_header, + void *frame_buffer) +{ + /* XXX type safety? */ + memcpy(response_buffer, frame_header, sizeof(u32)); + + memcpy(response_buffer + sizeof(u32), + frame_buffer, + sizeof(struct dev_to_host_fis) - sizeof(u32)); +} + +void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index) +{ + if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index)) + writel(ihost->uf_control.get, + &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); +} + +void isci_tci_free(struct isci_host *ihost, u16 tci) +{ + u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1); + + ihost->tci_pool[tail] = tci; + ihost->tci_tail = tail + 1; +} + +static u16 isci_tci_alloc(struct isci_host *ihost) +{ + u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1); + u16 tci = ihost->tci_pool[head]; + + ihost->tci_head = head + 1; + return tci; +} + +static u16 isci_tci_space(struct isci_host *ihost) +{ + return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); +} + +u16 isci_alloc_tag(struct isci_host *ihost) +{ + if (isci_tci_space(ihost)) { + u16 tci = isci_tci_alloc(ihost); + u8 seq = ihost->io_request_sequence[tci]; + + return ISCI_TAG(seq, tci); + } + + return SCI_CONTROLLER_INVALID_IO_TAG; +} + +enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) +{ + u16 tci = ISCI_TAG_TCI(io_tag); + u16 seq = ISCI_TAG_SEQ(io_tag); + + /* prevent tail from passing head */ + if (isci_tci_active(ihost) == 0) + return SCI_FAILURE_INVALID_IO_TAG; + + if (seq == ihost->io_request_sequence[tci]) { + ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1); + + isci_tci_free(ihost, tci); + + return SCI_SUCCESS; + } + return SCI_FAILURE_INVALID_IO_TAG; +} + +enum sci_status sci_controller_start_io(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + enum sci_status status; + + if (ihost->sm.current_state_id != SCIC_READY) { + dev_warn(&ihost->pdev->dev, "invalid state to start I/O"); + return SCI_FAILURE_INVALID_STATE; + } + + status = sci_remote_device_start_io(ihost, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + set_bit(IREQ_ACTIVE, &ireq->flags); + sci_controller_post_request(ihost, ireq->post_context); + return SCI_SUCCESS; +} + +enum sci_status sci_controller_terminate_request(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + /* terminate an ongoing (i.e. started) core IO request. This does not + * abort the IO request at the target, but rather removes the IO + * request from the host controller. + */ + enum sci_status status; + + if (ihost->sm.current_state_id != SCIC_READY) { + dev_warn(&ihost->pdev->dev, + "invalid state to terminate request\n"); + return SCI_FAILURE_INVALID_STATE; + } + + status = sci_io_request_terminate(ireq); + if (status != SCI_SUCCESS) + return status; + + /* + * Utilize the original post context command and or in the POST_TC_ABORT + * request sub-type. + */ + sci_controller_post_request(ihost, + ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); + return SCI_SUCCESS; +} + +/** + * sci_controller_complete_io() - This method will perform core specific + * completion operations for an IO request. After this method is invoked, + * the user should consider the IO request as invalid until it is properly + * reused (i.e. re-constructed). + * @ihost: The handle to the controller object for which to complete the + * IO request. + * @idev: The handle to the remote device object for which to complete + * the IO request. + * @ireq: the handle to the io request object to complete. + */ +enum sci_status sci_controller_complete_io(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + enum sci_status status; + u16 index; + + switch (ihost->sm.current_state_id) { + case SCIC_STOPPING: + /* XXX: Implement this function */ + return SCI_FAILURE; + case SCIC_READY: + status = sci_remote_device_complete_io(ihost, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + index = ISCI_TAG_TCI(ireq->io_tag); + clear_bit(IREQ_ACTIVE, &ireq->flags); + return SCI_SUCCESS; + default: + dev_warn(&ihost->pdev->dev, "invalid state to complete I/O"); + return SCI_FAILURE_INVALID_STATE; + } + +} + +enum sci_status sci_controller_continue_io(struct isci_request *ireq) +{ + struct isci_host *ihost = ireq->owning_controller; + + if (ihost->sm.current_state_id != SCIC_READY) { + dev_warn(&ihost->pdev->dev, "invalid state to continue I/O"); + return SCI_FAILURE_INVALID_STATE; + } + + set_bit(IREQ_ACTIVE, &ireq->flags); + sci_controller_post_request(ihost, ireq->post_context); + return SCI_SUCCESS; +} + +/** + * sci_controller_start_task() - This method is called by the SCIC user to + * send/start a framework task management request. + * @controller: the handle to the controller object for which to start the task + * management request. + * @remote_device: the handle to the remote device object for which to start + * the task management request. + * @task_request: the handle to the task request object to start. + */ +enum sci_task_status sci_controller_start_task(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + enum sci_status status; + + if (ihost->sm.current_state_id != SCIC_READY) { + dev_warn(&ihost->pdev->dev, + "%s: SCIC Controller starting task from invalid " + "state\n", + __func__); + return SCI_TASK_FAILURE_INVALID_STATE; + } + + status = sci_remote_device_start_task(ihost, idev, ireq); + switch (status) { + case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: + set_bit(IREQ_ACTIVE, &ireq->flags); + + /* + * We will let framework know this task request started successfully, + * although core is still woring on starting the request (to post tc when + * RNC is resumed.) + */ + return SCI_SUCCESS; + case SCI_SUCCESS: + set_bit(IREQ_ACTIVE, &ireq->flags); + sci_controller_post_request(ihost, ireq->post_context); + break; + default: + break; + } + + return status; +} diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h new file mode 100644 index 00000000000..062101a39f7 --- /dev/null +++ b/drivers/scsi/isci/host.h @@ -0,0 +1,542 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _SCI_HOST_H_ +#define _SCI_HOST_H_ + +#include "remote_device.h" +#include "phy.h" +#include "isci.h" +#include "remote_node_table.h" +#include "registers.h" +#include "unsolicited_frame_control.h" +#include "probe_roms.h" + +struct isci_request; +struct scu_task_context; + + +/** + * struct sci_power_control - + * + * This structure defines the fields for managing power control for direct + * attached disk devices. + */ +struct sci_power_control { + /** + * This field is set when the power control timer is running and cleared when + * it is not. + */ + bool timer_started; + + /** + * Timer to control when the directed attached disks can consume power. + */ + struct sci_timer timer; + + /** + * This field is used to keep track of how many phys are put into the + * requesters field. + */ + u8 phys_waiting; + + /** + * This field is used to keep track of how many phys have been granted to consume power + */ + u8 phys_granted_power; + + /** + * This field is an array of phys that we are waiting on. The phys are direct + * mapped into requesters via struct sci_phy.phy_index + */ + struct isci_phy *requesters[SCI_MAX_PHYS]; + +}; + +struct sci_port_configuration_agent; +typedef void (*port_config_fn)(struct isci_host *, + struct sci_port_configuration_agent *, + struct isci_port *, struct isci_phy *); + +struct sci_port_configuration_agent { + u16 phy_configured_mask; + u16 phy_ready_mask; + struct { + u8 min_index; + u8 max_index; + } phy_valid_port_range[SCI_MAX_PHYS]; + bool timer_pending; + port_config_fn link_up_handler; + port_config_fn link_down_handler; + struct sci_timer timer; +}; + +/** + * isci_host - primary host/controller object + * @timer: timeout start/stop operations + * @device_table: rni (hw remote node index) to remote device lookup table + * @available_remote_nodes: rni allocator + * @power_control: manage device spin up + * @io_request_sequence: generation number for tci's (task contexts) + * @task_context_table: hw task context table + * @remote_node_context_table: hw remote node context table + * @completion_queue: hw-producer driver-consumer communication ring + * @completion_queue_get: tracks the driver 'head' of the ring to notify hw + * @logical_port_entries: min({driver|silicon}-supported-port-count) + * @remote_node_entries: min({driver|silicon}-supported-node-count) + * @task_context_entries: min({driver|silicon}-supported-task-count) + * @phy_timer: phy startup timer + * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for + * the phy index is set so further notifications are not + * made. Once the phy reports link up and is made part of a + * port then this bit is cleared. + + */ +struct isci_host { + struct sci_base_state_machine sm; + /* XXX can we time this externally */ + struct sci_timer timer; + /* XXX drop reference module params directly */ + struct sci_user_parameters user_parameters; + /* XXX no need to be a union */ + struct sci_oem_params oem_parameters; + struct sci_port_configuration_agent port_agent; + struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES]; + struct sci_remote_node_table available_remote_nodes; + struct sci_power_control power_control; + u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; + struct scu_task_context *task_context_table; + dma_addr_t task_context_dma; + union scu_remote_node_context *remote_node_context_table; + u32 *completion_queue; + u32 completion_queue_get; + u32 logical_port_entries; + u32 remote_node_entries; + u32 task_context_entries; + struct sci_unsolicited_frame_control uf_control; + + /* phy startup */ + struct sci_timer phy_timer; + /* XXX kill */ + bool phy_startup_timer_pending; + u32 next_phy_to_start; + /* XXX convert to unsigned long and use bitops */ + u8 invalid_phy_mask; + + /* TODO attempt dynamic interrupt coalescing scheme */ + u16 interrupt_coalesce_number; + u32 interrupt_coalesce_timeout; + struct smu_registers __iomem *smu_registers; + struct scu_registers __iomem *scu_registers; + + u16 tci_head; + u16 tci_tail; + u16 tci_pool[SCI_MAX_IO_REQUESTS]; + + int id; /* unique within a given pci device */ + struct isci_phy phys[SCI_MAX_PHYS]; + struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ + struct sas_ha_struct sas_ha; + + spinlock_t state_lock; + struct pci_dev *pdev; + enum isci_status status; + #define IHOST_START_PENDING 0 + #define IHOST_STOP_PENDING 1 + unsigned long flags; + wait_queue_head_t eventq; + struct Scsi_Host *shost; + struct tasklet_struct completion_tasklet; + struct list_head requests_to_complete; + struct list_head requests_to_errorback; + spinlock_t scic_lock; + struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; + struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES]; +}; + +/** + * enum sci_controller_states - This enumeration depicts all the states + * for the common controller state machine. + */ +enum sci_controller_states { + /** + * Simply the initial state for the base controller state machine. + */ + SCIC_INITIAL = 0, + + /** + * This state indicates that the controller is reset. The memory for + * the controller is in it's initial state, but the controller requires + * initialization. + * This state is entered from the INITIAL state. + * This state is entered from the RESETTING state. + */ + SCIC_RESET, + + /** + * This state is typically an action state that indicates the controller + * is in the process of initialization. In this state no new IO operations + * are permitted. + * This state is entered from the RESET state. + */ + SCIC_INITIALIZING, + + /** + * This state indicates that the controller has been successfully + * initialized. In this state no new IO operations are permitted. + * This state is entered from the INITIALIZING state. + */ + SCIC_INITIALIZED, + + /** + * This state indicates the the controller is in the process of becoming + * ready (i.e. starting). In this state no new IO operations are permitted. + * This state is entered from the INITIALIZED state. + */ + SCIC_STARTING, + + /** + * This state indicates the controller is now ready. Thus, the user + * is able to perform IO operations on the controller. + * This state is entered from the STARTING state. + */ + SCIC_READY, + + /** + * This state is typically an action state that indicates the controller + * is in the process of resetting. Thus, the user is unable to perform + * IO operations on the controller. A reset is considered destructive in + * most cases. + * This state is entered from the READY state. + * This state is entered from the FAILED state. + * This state is entered from the STOPPED state. + */ + SCIC_RESETTING, + + /** + * This state indicates that the controller is in the process of stopping. + * In this state no new IO operations are permitted, but existing IO + * operations are allowed to complete. + * This state is entered from the READY state. + */ + SCIC_STOPPING, + + /** + * This state indicates that the controller has successfully been stopped. + * In this state no new IO operations are permitted. + * This state is entered from the STOPPING state. + */ + SCIC_STOPPED, + + /** + * This state indicates that the controller could not successfully be + * initialized. In this state no new IO operations are permitted. + * This state is entered from the INITIALIZING state. + * This state is entered from the STARTING state. + * This state is entered from the STOPPING state. + * This state is entered from the RESETTING state. + */ + SCIC_FAILED, +}; + +/** + * struct isci_pci_info - This class represents the pci function containing the + * controllers. Depending on PCI SKU, there could be up to 2 controllers in + * the PCI function. + */ +#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS) + +struct isci_pci_info { + struct msix_entry msix_entries[SCI_MAX_MSIX_INT]; + struct isci_host *hosts[SCI_MAX_CONTROLLERS]; + struct isci_orom *orom; +}; + +static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev) +{ + return pci_get_drvdata(pdev); +} + +#define for_each_isci_host(id, ihost, pdev) \ + for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \ + id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ + ihost = to_pci_info(pdev)->hosts[++id]) + +static inline enum isci_status isci_host_get_state(struct isci_host *isci_host) +{ + return isci_host->status; +} + +static inline void isci_host_change_state(struct isci_host *isci_host, + enum isci_status status) +{ + unsigned long flags; + + dev_dbg(&isci_host->pdev->dev, + "%s: isci_host = %p, state = 0x%x", + __func__, + isci_host, + status); + spin_lock_irqsave(&isci_host->state_lock, flags); + isci_host->status = status; + spin_unlock_irqrestore(&isci_host->state_lock, flags); + +} + +static inline void wait_for_start(struct isci_host *ihost) +{ + wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags)); +} + +static inline void wait_for_stop(struct isci_host *ihost) +{ + wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags)); +} + +static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev) +{ + wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags)); +} + +static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev) +{ + wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags)); +} + +static inline struct isci_host *dev_to_ihost(struct domain_device *dev) +{ + return dev->port->ha->lldd_ha; +} + +/* we always use protocol engine group zero */ +#define ISCI_PEG 0 + +/* see sci_controller_io_tag_allocate|free for how seq and tci are built */ +#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci) + +/* these are returned by the hardware, so sanitize them */ +#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1)) +#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) + +/* expander attached sata devices require 3 rnc slots */ +static inline int sci_remote_device_node_count(struct isci_remote_device *idev) +{ + struct domain_device *dev = idev->domain_dev; + + if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && + !idev->is_direct_attached) + return SCU_STP_REMOTE_NODE_COUNT; + return SCU_SSP_REMOTE_NODE_COUNT; +} + +/** + * sci_controller_clear_invalid_phy() - + * + * This macro will clear the bit in the invalid phy mask for this controller + * object. This is used to control messages reported for invalid link up + * notifications. + */ +#define sci_controller_clear_invalid_phy(controller, phy) \ + ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index)) + +static inline struct device *sciphy_to_dev(struct isci_phy *iphy) +{ + + if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host) + return NULL; + + return &iphy->isci_port->isci_host->pdev->dev; +} + +static inline struct device *sciport_to_dev(struct isci_port *iport) +{ + + if (!iport || !iport->isci_host) + return NULL; + + return &iport->isci_host->pdev->dev; +} + +static inline struct device *scirdev_to_dev(struct isci_remote_device *idev) +{ + if (!idev || !idev->isci_port || !idev->isci_port->isci_host) + return NULL; + + return &idev->isci_port->isci_host->pdev->dev; +} + +static inline bool is_a2(struct pci_dev *pdev) +{ + if (pdev->revision < 4) + return true; + return false; +} + +static inline bool is_b0(struct pci_dev *pdev) +{ + if (pdev->revision == 4) + return true; + return false; +} + +static inline bool is_c0(struct pci_dev *pdev) +{ + if (pdev->revision >= 5) + return true; + return false; +} + +void sci_controller_post_request(struct isci_host *ihost, + u32 request); +void sci_controller_release_frame(struct isci_host *ihost, + u32 frame_index); +void sci_controller_copy_sata_response(void *response_buffer, + void *frame_header, + void *frame_buffer); +enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 *node_id); +void sci_controller_free_remote_node_context( + struct isci_host *ihost, + struct isci_remote_device *idev, + u16 node_id); + +struct isci_request *sci_request_by_tag(struct isci_host *ihost, + u16 io_tag); + +void sci_controller_power_control_queue_insert( + struct isci_host *ihost, + struct isci_phy *iphy); + +void sci_controller_power_control_queue_remove( + struct isci_host *ihost, + struct isci_phy *iphy); + +void sci_controller_link_up( + struct isci_host *ihost, + struct isci_port *iport, + struct isci_phy *iphy); + +void sci_controller_link_down( + struct isci_host *ihost, + struct isci_port *iport, + struct isci_phy *iphy); + +void sci_controller_remote_device_stopped( + struct isci_host *ihost, + struct isci_remote_device *idev); + +void sci_controller_copy_task_context( + struct isci_host *ihost, + struct isci_request *ireq); + +void sci_controller_register_setup(struct isci_host *ihost); + +enum sci_status sci_controller_continue_io(struct isci_request *ireq); +int isci_host_scan_finished(struct Scsi_Host *, unsigned long); +void isci_host_scan_start(struct Scsi_Host *); +u16 isci_alloc_tag(struct isci_host *ihost); +enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag); +void isci_tci_free(struct isci_host *ihost, u16 tci); + +int isci_host_init(struct isci_host *); + +void isci_host_init_controller_names( + struct isci_host *isci_host, + unsigned int controller_idx); + +void isci_host_deinit( + struct isci_host *); + +void isci_host_port_link_up( + struct isci_host *, + struct isci_port *, + struct isci_phy *); +int isci_host_dev_found(struct domain_device *); + +void isci_host_remote_device_start_complete( + struct isci_host *, + struct isci_remote_device *, + enum sci_status); + +void sci_controller_disable_interrupts( + struct isci_host *ihost); + +enum sci_status sci_controller_start_io( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sci_task_status sci_controller_start_task( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sci_status sci_controller_terminate_request( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sci_status sci_controller_complete_io( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +void sci_port_configuration_agent_construct( + struct sci_port_configuration_agent *port_agent); + +enum sci_status sci_port_configuration_agent_initialize( + struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent); +#endif diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c new file mode 100644 index 00000000000..61e0d09e2b5 --- /dev/null +++ b/drivers/scsi/isci/init.c @@ -0,0 +1,565 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/firmware.h> +#include <linux/efi.h> +#include <asm/string.h> +#include "isci.h" +#include "task.h" +#include "probe_roms.h" + +static struct scsi_transport_template *isci_transport_template; + +static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = { + { PCI_VDEVICE(INTEL, 0x1D61),}, + { PCI_VDEVICE(INTEL, 0x1D63),}, + { PCI_VDEVICE(INTEL, 0x1D65),}, + { PCI_VDEVICE(INTEL, 0x1D67),}, + { PCI_VDEVICE(INTEL, 0x1D69),}, + { PCI_VDEVICE(INTEL, 0x1D6B),}, + { PCI_VDEVICE(INTEL, 0x1D60),}, + { PCI_VDEVICE(INTEL, 0x1D62),}, + { PCI_VDEVICE(INTEL, 0x1D64),}, + { PCI_VDEVICE(INTEL, 0x1D66),}, + { PCI_VDEVICE(INTEL, 0x1D68),}, + { PCI_VDEVICE(INTEL, 0x1D6A),}, + {} +}; + +MODULE_DEVICE_TABLE(pci, isci_id_table); + +/* linux isci specific settings */ + +unsigned char no_outbound_task_to = 20; +module_param(no_outbound_task_to, byte, 0); +MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)"); + +u16 ssp_max_occ_to = 20; +module_param(ssp_max_occ_to, ushort, 0); +MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)"); + +u16 stp_max_occ_to = 5; +module_param(stp_max_occ_to, ushort, 0); +MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)"); + +u16 ssp_inactive_to = 5; +module_param(ssp_inactive_to, ushort, 0); +MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)"); + +u16 stp_inactive_to = 5; +module_param(stp_inactive_to, ushort, 0); +MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)"); + +unsigned char phy_gen = 3; +module_param(phy_gen, byte, 0); +MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)"); + +unsigned char max_concurr_spinup = 1; +module_param(max_concurr_spinup, byte, 0); +MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); + +static struct scsi_host_template isci_sht = { + + .module = THIS_MODULE, + .name = DRV_NAME, + .proc_name = DRV_NAME, + .queuecommand = sas_queuecommand, + .target_alloc = sas_target_alloc, + .slave_configure = sas_slave_configure, + .slave_destroy = sas_slave_destroy, + .scan_finished = isci_host_scan_finished, + .scan_start = isci_host_scan_start, + .change_queue_depth = sas_change_queue_depth, + .change_queue_type = sas_change_queue_type, + .bios_param = sas_bios_param, + .can_queue = ISCI_CAN_QUEUE_VAL, + .cmd_per_lun = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .use_clustering = ENABLE_CLUSTERING, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_bus_reset_handler = isci_bus_reset_handler, + .slave_alloc = sas_slave_alloc, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, +}; + +static struct sas_domain_function_template isci_transport_ops = { + + /* The class calls these to notify the LLDD of an event. */ + .lldd_port_formed = isci_port_formed, + .lldd_port_deformed = isci_port_deformed, + + /* The class calls these when a device is found or gone. */ + .lldd_dev_found = isci_remote_device_found, + .lldd_dev_gone = isci_remote_device_gone, + + .lldd_execute_task = isci_task_execute_task, + /* Task Management Functions. Must be called from process context. */ + .lldd_abort_task = isci_task_abort_task, + .lldd_abort_task_set = isci_task_abort_task_set, + .lldd_clear_aca = isci_task_clear_aca, + .lldd_clear_task_set = isci_task_clear_task_set, + .lldd_I_T_nexus_reset = isci_task_I_T_nexus_reset, + .lldd_lu_reset = isci_task_lu_reset, + .lldd_query_task = isci_task_query_task, + + /* Port and Adapter management */ + .lldd_clear_nexus_port = isci_task_clear_nexus_port, + .lldd_clear_nexus_ha = isci_task_clear_nexus_ha, + + /* Phy management */ + .lldd_control_phy = isci_phy_control, +}; + + +/****************************************************************************** +* P R O T E C T E D M E T H O D S +******************************************************************************/ + + + +/** + * isci_register_sas_ha() - This method initializes various lldd + * specific members of the sas_ha struct and calls the libsas + * sas_register_ha() function. + * @isci_host: This parameter specifies the lldd specific wrapper for the + * libsas sas_ha struct. + * + * This method returns an error code indicating sucess or failure. The user + * should check for possible memory allocation error return otherwise, a zero + * indicates success. + */ +static int isci_register_sas_ha(struct isci_host *isci_host) +{ + int i; + struct sas_ha_struct *sas_ha = &(isci_host->sas_ha); + struct asd_sas_phy **sas_phys; + struct asd_sas_port **sas_ports; + + sas_phys = devm_kzalloc(&isci_host->pdev->dev, + SCI_MAX_PHYS * sizeof(void *), + GFP_KERNEL); + if (!sas_phys) + return -ENOMEM; + + sas_ports = devm_kzalloc(&isci_host->pdev->dev, + SCI_MAX_PORTS * sizeof(void *), + GFP_KERNEL); + if (!sas_ports) + return -ENOMEM; + + /*----------------- Libsas Initialization Stuff---------------------- + * Set various fields in the sas_ha struct: + */ + + sas_ha->sas_ha_name = DRV_NAME; + sas_ha->lldd_module = THIS_MODULE; + sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0]; + + /* set the array of phy and port structs. */ + for (i = 0; i < SCI_MAX_PHYS; i++) { + sas_phys[i] = &isci_host->phys[i].sas_phy; + sas_ports[i] = &isci_host->ports[i].sas_port; + } + + sas_ha->sas_phy = sas_phys; + sas_ha->sas_port = sas_ports; + sas_ha->num_phys = SCI_MAX_PHYS; + + sas_ha->lldd_queue_size = ISCI_CAN_QUEUE_VAL; + sas_ha->lldd_max_execute_num = 1; + sas_ha->strict_wide_ports = 1; + + sas_register_ha(sas_ha); + + return 0; +} + +static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); + + return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); +} + +static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); + +static void isci_unregister(struct isci_host *isci_host) +{ + struct Scsi_Host *shost; + + if (!isci_host) + return; + + shost = isci_host->shost; + device_remove_file(&shost->shost_dev, &dev_attr_isci_id); + + sas_unregister_ha(&isci_host->sas_ha); + + sas_remove_host(isci_host->shost); + scsi_remove_host(isci_host->shost); + scsi_host_put(isci_host->shost); +} + +static int __devinit isci_pci_init(struct pci_dev *pdev) +{ + int err, bar_num, bar_mask = 0; + void __iomem * const *iomap; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, + "failed enable PCI device %s!\n", + pci_name(pdev)); + return err; + } + + for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++) + bar_mask |= 1 << (bar_num * 2); + + err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME); + if (err) + return err; + + iomap = pcim_iomap_table(pdev); + if (!iomap) + return -ENOMEM; + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) + return err; + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) + return err; + } + + return 0; +} + +static int num_controllers(struct pci_dev *pdev) +{ + /* bar size alone can tell us if we are running with a dual controller + * part, no need to trust revision ids that might be under broken firmware + * control + */ + resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2); + resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2); + + if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS && + smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS) + return SCI_MAX_CONTROLLERS; + else + return 1; +} + +static int isci_setup_interrupts(struct pci_dev *pdev) +{ + int err, i, num_msix; + struct isci_host *ihost; + struct isci_pci_info *pci_info = to_pci_info(pdev); + + /* + * Determine the number of vectors associated with this + * PCI function. + */ + num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT; + + for (i = 0; i < num_msix; i++) + pci_info->msix_entries[i].entry = i; + + err = pci_enable_msix(pdev, pci_info->msix_entries, num_msix); + if (err) + goto intx; + + for (i = 0; i < num_msix; i++) { + int id = i / SCI_NUM_MSI_X_INT; + struct msix_entry *msix = &pci_info->msix_entries[i]; + irq_handler_t isr; + + ihost = pci_info->hosts[id]; + /* odd numbered vectors are error interrupts */ + if (i & 1) + isr = isci_error_isr; + else + isr = isci_msix_isr; + + err = devm_request_irq(&pdev->dev, msix->vector, isr, 0, + DRV_NAME"-msix", ihost); + if (!err) + continue; + + dev_info(&pdev->dev, "msix setup failed falling back to intx\n"); + while (i--) { + id = i / SCI_NUM_MSI_X_INT; + ihost = pci_info->hosts[id]; + msix = &pci_info->msix_entries[i]; + devm_free_irq(&pdev->dev, msix->vector, ihost); + } + pci_disable_msix(pdev); + goto intx; + } + return 0; + + intx: + for_each_isci_host(i, ihost, pdev) { + err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr, + IRQF_SHARED, DRV_NAME"-intx", ihost); + if (err) + break; + } + return err; +} + +static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) +{ + struct isci_host *isci_host; + struct Scsi_Host *shost; + int err; + + isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL); + if (!isci_host) + return NULL; + + isci_host->pdev = pdev; + isci_host->id = id; + + shost = scsi_host_alloc(&isci_sht, sizeof(void *)); + if (!shost) + return NULL; + isci_host->shost = shost; + + err = isci_host_init(isci_host); + if (err) + goto err_shost; + + SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha; + isci_host->sas_ha.core.shost = shost; + shost->transportt = isci_transport_template; + + shost->max_id = ~0; + shost->max_lun = ~0; + shost->max_cmd_len = MAX_COMMAND_SIZE; + + err = scsi_add_host(shost, &pdev->dev); + if (err) + goto err_shost; + + err = isci_register_sas_ha(isci_host); + if (err) + goto err_shost_remove; + + err = device_create_file(&shost->shost_dev, &dev_attr_isci_id); + if (err) + goto err_unregister_ha; + + return isci_host; + + err_unregister_ha: + sas_unregister_ha(&(isci_host->sas_ha)); + err_shost_remove: + scsi_remove_host(shost); + err_shost: + scsi_host_put(shost); + + return NULL; +} + +static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct isci_pci_info *pci_info; + int err, i; + struct isci_host *isci_host; + const struct firmware *fw = NULL; + struct isci_orom *orom = NULL; + char *source = "(platform)"; + + dev_info(&pdev->dev, "driver configured for rev: %d silicon\n", + pdev->revision); + + pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL); + if (!pci_info) + return -ENOMEM; + pci_set_drvdata(pdev, pci_info); + + if (efi_enabled) + orom = isci_get_efi_var(pdev); + + if (!orom) + orom = isci_request_oprom(pdev); + + for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { + if (sci_oem_parameters_validate(&orom->ctrl[i])) { + dev_warn(&pdev->dev, + "[%d]: invalid oem parameters detected, falling back to firmware\n", i); + devm_kfree(&pdev->dev, orom); + orom = NULL; + break; + } + } + + if (!orom) { + source = "(firmware)"; + orom = isci_request_firmware(pdev, fw); + if (!orom) { + /* TODO convert this to WARN_TAINT_ONCE once the + * orom/efi parameter support is widely available + */ + dev_warn(&pdev->dev, + "Loading user firmware failed, using default " + "values\n"); + dev_warn(&pdev->dev, + "Default OEM configuration being used: 4 " + "narrow ports, and default SAS Addresses\n"); + } + } + + if (orom) + dev_info(&pdev->dev, + "OEM SAS parameters (version: %u.%u) loaded %s\n", + (orom->hdr.version & 0xf0) >> 4, + (orom->hdr.version & 0xf), source); + + pci_info->orom = orom; + + err = isci_pci_init(pdev); + if (err) + return err; + + for (i = 0; i < num_controllers(pdev); i++) { + struct isci_host *h = isci_host_alloc(pdev, i); + + if (!h) { + err = -ENOMEM; + goto err_host_alloc; + } + pci_info->hosts[i] = h; + } + + err = isci_setup_interrupts(pdev); + if (err) + goto err_host_alloc; + + for_each_isci_host(i, isci_host, pdev) + scsi_scan_host(isci_host->shost); + + return 0; + + err_host_alloc: + for_each_isci_host(i, isci_host, pdev) + isci_unregister(isci_host); + return err; +} + +static void __devexit isci_pci_remove(struct pci_dev *pdev) +{ + struct isci_host *ihost; + int i; + + for_each_isci_host(i, ihost, pdev) { + isci_unregister(ihost); + isci_host_deinit(ihost); + sci_controller_disable_interrupts(ihost); + } +} + +static struct pci_driver isci_pci_driver = { + .name = DRV_NAME, + .id_table = isci_id_table, + .probe = isci_pci_probe, + .remove = __devexit_p(isci_pci_remove), +}; + +static __init int isci_init(void) +{ + int err; + + pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME); + + isci_transport_template = sas_domain_attach_transport(&isci_transport_ops); + if (!isci_transport_template) + return -ENOMEM; + + err = pci_register_driver(&isci_pci_driver); + if (err) + sas_release_transport(isci_transport_template); + + return err; +} + +static __exit void isci_exit(void) +{ + pci_unregister_driver(&isci_pci_driver); + sas_release_transport(isci_transport_template); +} + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_FIRMWARE(ISCI_FW_NAME); +module_init(isci_init); +module_exit(isci_exit); diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h new file mode 100644 index 00000000000..d1de63312e7 --- /dev/null +++ b/drivers/scsi/isci/isci.h @@ -0,0 +1,538 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __ISCI_H__ +#define __ISCI_H__ + +#include <linux/interrupt.h> +#include <linux/types.h> + +#define DRV_NAME "isci" +#define SCI_PCI_BAR_COUNT 2 +#define SCI_NUM_MSI_X_INT 2 +#define SCI_SMU_BAR 0 +#define SCI_SMU_BAR_SIZE (16*1024) +#define SCI_SCU_BAR 1 +#define SCI_SCU_BAR_SIZE (4*1024*1024) +#define SCI_IO_SPACE_BAR0 2 +#define SCI_IO_SPACE_BAR1 3 +#define ISCI_CAN_QUEUE_VAL 250 /* < SCI_MAX_IO_REQUESTS ? */ +#define SCIC_CONTROLLER_STOP_TIMEOUT 5000 + +#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF + +#define SCI_MAX_PHYS (4UL) +#define SCI_MAX_PORTS SCI_MAX_PHYS +#define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */ +#define SCI_MAX_REMOTE_DEVICES (256UL) +#define SCI_MAX_IO_REQUESTS (256UL) +#define SCI_MAX_SEQ (16) +#define SCI_MAX_MSIX_MESSAGES (2) +#define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 /* not silicon constrained */ +#define SCI_MAX_CONTROLLERS 2 +#define SCI_MAX_DOMAINS SCI_MAX_PORTS + +#define SCU_MAX_CRITICAL_NOTIFICATIONS (384) +#define SCU_MAX_EVENTS_SHIFT (7) +#define SCU_MAX_EVENTS (1 << SCU_MAX_EVENTS_SHIFT) +#define SCU_MAX_UNSOLICITED_FRAMES (128) +#define SCU_MAX_COMPLETION_QUEUE_SCRATCH (128) +#define SCU_MAX_COMPLETION_QUEUE_ENTRIES (SCU_MAX_CRITICAL_NOTIFICATIONS \ + + SCU_MAX_EVENTS \ + + SCU_MAX_UNSOLICITED_FRAMES \ + + SCI_MAX_IO_REQUESTS \ + + SCU_MAX_COMPLETION_QUEUE_SCRATCH) +#define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES)) + +#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096) +#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024) +#define SCU_INVALID_FRAME_INDEX (0xFFFF) + +#define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF) +#define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH (0x00FFFFFF) + +static inline void check_sizes(void) +{ + BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_EVENTS); + BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES <= 8); + BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_UNSOLICITED_FRAMES); + BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_COMPLETION_QUEUE_ENTRIES); + BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES > SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES); + BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_IO_REQUESTS); + BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_SEQ); +} + +/** + * enum sci_status - This is the general return status enumeration for non-IO, + * non-task management related SCI interface methods. + * + * + */ +enum sci_status { + /** + * This member indicates successful completion. + */ + SCI_SUCCESS = 0, + + /** + * This value indicates that the calling method completed successfully, + * but that the IO may have completed before having it's start method + * invoked. This occurs during SAT translation for requests that do + * not require an IO to the target or for any other requests that may + * be completed without having to submit IO. + */ + SCI_SUCCESS_IO_COMPLETE_BEFORE_START, + + /** + * This Value indicates that the SCU hardware returned an early response + * because the io request specified more data than is returned by the + * target device (mode pages, inquiry data, etc.). The completion routine + * will handle this case to get the actual number of bytes transferred. + */ + SCI_SUCCESS_IO_DONE_EARLY, + + /** + * This member indicates that the object for which a state change is + * being requested is already in said state. + */ + SCI_WARNING_ALREADY_IN_STATE, + + /** + * This member indicates interrupt coalescence timer may cause SAS + * specification compliance issues (i.e. SMP target mode response + * frames must be returned within 1.9 milliseconds). + */ + SCI_WARNING_TIMER_CONFLICT, + + /** + * This field indicates a sequence of action is not completed yet. Mostly, + * this status is used when multiple ATA commands are needed in a SATI translation. + */ + SCI_WARNING_SEQUENCE_INCOMPLETE, + + /** + * This member indicates that there was a general failure. + */ + SCI_FAILURE, + + /** + * This member indicates that the SCI implementation is unable to complete + * an operation due to a critical flaw the prevents any further operation + * (i.e. an invalid pointer). + */ + SCI_FATAL_ERROR, + + /** + * This member indicates the calling function failed, because the state + * of the controller is in a state that prevents successful completion. + */ + SCI_FAILURE_INVALID_STATE, + + /** + * This member indicates the calling function failed, because there is + * insufficient resources/memory to complete the request. + */ + SCI_FAILURE_INSUFFICIENT_RESOURCES, + + /** + * This member indicates the calling function failed, because the + * controller object required for the operation can't be located. + */ + SCI_FAILURE_CONTROLLER_NOT_FOUND, + + /** + * This member indicates the calling function failed, because the + * discovered controller type is not supported by the library. + */ + SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE, + + /** + * This member indicates the calling function failed, because the + * requested initialization data version isn't supported. + */ + SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION, + + /** + * This member indicates the calling function failed, because the + * requested configuration of SAS Phys into SAS Ports is not supported. + */ + SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION, + + /** + * This member indicates the calling function failed, because the + * requested protocol is not supported by the remote device, port, + * or controller. + */ + SCI_FAILURE_UNSUPPORTED_PROTOCOL, + + /** + * This member indicates the calling function failed, because the + * requested information type is not supported by the SCI implementation. + */ + SCI_FAILURE_UNSUPPORTED_INFORMATION_TYPE, + + /** + * This member indicates the calling function failed, because the + * device already exists. + */ + SCI_FAILURE_DEVICE_EXISTS, + + /** + * This member indicates the calling function failed, because adding + * a phy to the object is not possible. + */ + SCI_FAILURE_ADDING_PHY_UNSUPPORTED, + + /** + * This member indicates the calling function failed, because the + * requested information type is not supported by the SCI implementation. + */ + SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD, + + /** + * This member indicates the calling function failed, because the SCI + * implementation does not support the supplied time limit. + */ + SCI_FAILURE_UNSUPPORTED_TIME_LIMIT, + + /** + * This member indicates the calling method failed, because the SCI + * implementation does not contain the specified Phy. + */ + SCI_FAILURE_INVALID_PHY, + + /** + * This member indicates the calling method failed, because the SCI + * implementation does not contain the specified Port. + */ + SCI_FAILURE_INVALID_PORT, + + /** + * This member indicates the calling method was partly successful + * The port was reset but not all phys in port are operational + */ + SCI_FAILURE_RESET_PORT_PARTIAL_SUCCESS, + + /** + * This member indicates that calling method failed + * The port reset did not complete because none of the phys are operational + */ + SCI_FAILURE_RESET_PORT_FAILURE, + + /** + * This member indicates the calling method failed, because the SCI + * implementation does not contain the specified remote device. + */ + SCI_FAILURE_INVALID_REMOTE_DEVICE, + + /** + * This member indicates the calling method failed, because the remote + * device is in a bad state and requires a reset. + */ + SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED, + + /** + * This member indicates the calling method failed, because the SCI + * implementation does not contain or support the specified IO tag. + */ + SCI_FAILURE_INVALID_IO_TAG, + + /** + * This member indicates that the operation failed and the user should + * check the response data associated with the IO. + */ + SCI_FAILURE_IO_RESPONSE_VALID, + + /** + * This member indicates that the operation failed, the failure is + * controller implementation specific, and the response data associated + * with the request is not valid. You can query for the controller + * specific error information via sci_controller_get_request_status() + */ + SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, + + /** + * This member indicated that the operation failed because the + * user requested this IO to be terminated. + */ + SCI_FAILURE_IO_TERMINATED, + + /** + * This member indicates that the operation failed and the associated + * request requires a SCSI abort task to be sent to the target. + */ + SCI_FAILURE_IO_REQUIRES_SCSI_ABORT, + + /** + * This member indicates that the operation failed because the supplied + * device could not be located. + */ + SCI_FAILURE_DEVICE_NOT_FOUND, + + /** + * This member indicates that the operation failed because the + * objects association is required and is not correctly set. + */ + SCI_FAILURE_INVALID_ASSOCIATION, + + /** + * This member indicates that the operation failed, because a timeout + * occurred. + */ + SCI_FAILURE_TIMEOUT, + + /** + * This member indicates that the operation failed, because the user + * specified a value that is either invalid or not supported. + */ + SCI_FAILURE_INVALID_PARAMETER_VALUE, + + /** + * This value indicates that the operation failed, because the number + * of messages (MSI-X) is not supported. + */ + SCI_FAILURE_UNSUPPORTED_MESSAGE_COUNT, + + /** + * This value indicates that the method failed due to a lack of + * available NCQ tags. + */ + SCI_FAILURE_NO_NCQ_TAG_AVAILABLE, + + /** + * This value indicates that a protocol violation has occurred on the + * link. + */ + SCI_FAILURE_PROTOCOL_VIOLATION, + + /** + * This value indicates a failure condition that retry may help to clear. + */ + SCI_FAILURE_RETRY_REQUIRED, + + /** + * This field indicates the retry limit was reached when a retry is attempted + */ + SCI_FAILURE_RETRY_LIMIT_REACHED, + + /** + * This member indicates the calling method was partly successful. + * Mostly, this status is used when a LUN_RESET issued to an expander attached + * STP device in READY NCQ substate needs to have RNC suspended/resumed + * before posting TC. + */ + SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS, + + /** + * This field indicates an illegal phy connection based on the routing attribute + * of both expander phy attached to each other. + */ + SCI_FAILURE_ILLEGAL_ROUTING_ATTRIBUTE_CONFIGURATION, + + /** + * This field indicates a CONFIG ROUTE INFO command has a response with function result + * INDEX DOES NOT EXIST, usually means exceeding max route index. + */ + SCI_FAILURE_EXCEED_MAX_ROUTE_INDEX, + + /** + * This value indicates that an unsupported PCI device ID has been + * specified. This indicates that attempts to invoke + * sci_library_allocate_controller() will fail. + */ + SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID + +}; + +/** + * enum sci_io_status - This enumeration depicts all of the possible IO + * completion status values. Each value in this enumeration maps directly + * to a value in the enum sci_status enumeration. Please refer to that + * enumeration for detailed comments concerning what the status represents. + * + * Add the API to retrieve the SCU status from the core. Check to see that the + * following status are properly handled: - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL + * - SCI_IO_FAILURE_INVALID_IO_TAG + */ +enum sci_io_status { + SCI_IO_SUCCESS = SCI_SUCCESS, + SCI_IO_FAILURE = SCI_FAILURE, + SCI_IO_SUCCESS_COMPLETE_BEFORE_START = SCI_SUCCESS_IO_COMPLETE_BEFORE_START, + SCI_IO_SUCCESS_IO_DONE_EARLY = SCI_SUCCESS_IO_DONE_EARLY, + SCI_IO_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE, + SCI_IO_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES, + SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL, + SCI_IO_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID, + SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, + SCI_IO_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED, + SCI_IO_FAILURE_REQUIRES_SCSI_ABORT = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT, + SCI_IO_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE, + SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE = SCI_FAILURE_NO_NCQ_TAG_AVAILABLE, + SCI_IO_FAILURE_PROTOCOL_VIOLATION = SCI_FAILURE_PROTOCOL_VIOLATION, + + SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED, + + SCI_IO_FAILURE_RETRY_REQUIRED = SCI_FAILURE_RETRY_REQUIRED, + SCI_IO_FAILURE_RETRY_LIMIT_REACHED = SCI_FAILURE_RETRY_LIMIT_REACHED, + SCI_IO_FAILURE_INVALID_REMOTE_DEVICE = SCI_FAILURE_INVALID_REMOTE_DEVICE +}; + +/** + * enum sci_task_status - This enumeration depicts all of the possible task + * completion status values. Each value in this enumeration maps directly + * to a value in the enum sci_status enumeration. Please refer to that + * enumeration for detailed comments concerning what the status represents. + * + * Check to see that the following status are properly handled: + */ +enum sci_task_status { + SCI_TASK_SUCCESS = SCI_SUCCESS, + SCI_TASK_FAILURE = SCI_FAILURE, + SCI_TASK_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE, + SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES, + SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL, + SCI_TASK_FAILURE_INVALID_TAG = SCI_FAILURE_INVALID_IO_TAG, + SCI_TASK_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID, + SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, + SCI_TASK_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED, + SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE, + + SCI_TASK_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED, + SCI_TASK_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS = SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS + +}; + +/** + * sci_swab32_cpy - convert between scsi and scu-hardware byte format + * @dest: receive the 4-byte endian swapped version of src + * @src: word aligned source buffer + * + * scu hardware handles SSP/SMP control, response, and unidentified + * frames in "big endian dword" order. Regardless of host endian this + * is always a swab32()-per-dword conversion of the standard definition, + * i.e. single byte fields swapped and multi-byte fields in little- + * endian + */ +static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt) +{ + u32 *dest = _dest, *src = _src; + + while (--word_cnt >= 0) + dest[word_cnt] = swab32(src[word_cnt]); +} + +extern unsigned char no_outbound_task_to; +extern u16 ssp_max_occ_to; +extern u16 stp_max_occ_to; +extern u16 ssp_inactive_to; +extern u16 stp_inactive_to; +extern unsigned char phy_gen; +extern unsigned char max_concurr_spinup; + +irqreturn_t isci_msix_isr(int vec, void *data); +irqreturn_t isci_intx_isr(int vec, void *data); +irqreturn_t isci_error_isr(int vec, void *data); + +/* + * Each timer is associated with a cancellation flag that is set when + * del_timer() is called and checked in the timer callback function. This + * is needed since del_timer_sync() cannot be called with sci_lock held. + * For deinit however, del_timer_sync() is used without holding the lock. + */ +struct sci_timer { + struct timer_list timer; + bool cancel; +}; + +static inline +void sci_init_timer(struct sci_timer *tmr, void (*fn)(unsigned long)) +{ + tmr->timer.function = fn; + tmr->timer.data = (unsigned long) tmr; + tmr->cancel = 0; + init_timer(&tmr->timer); +} + +static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec) +{ + tmr->cancel = 0; + mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec)); +} + +static inline void sci_del_timer(struct sci_timer *tmr) +{ + tmr->cancel = 1; + del_timer(&tmr->timer); +} + +struct sci_base_state_machine { + const struct sci_base_state *state_table; + u32 initial_state_id; + u32 current_state_id; + u32 previous_state_id; +}; + +typedef void (*sci_state_transition_t)(struct sci_base_state_machine *sm); + +struct sci_base_state { + sci_state_transition_t enter_state; /* Called on state entry */ + sci_state_transition_t exit_state; /* Called on state exit */ +}; + +extern void sci_init_sm(struct sci_base_state_machine *sm, + const struct sci_base_state *state_table, + u32 initial_state); +extern void sci_change_state(struct sci_base_state_machine *sm, u32 next_state); +#endif /* __ISCI_H__ */ diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c new file mode 100644 index 00000000000..79313a7a235 --- /dev/null +++ b/drivers/scsi/isci/phy.c @@ -0,0 +1,1312 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "isci.h" +#include "host.h" +#include "phy.h" +#include "scu_event_codes.h" +#include "probe_roms.h" + +/* Maximum arbitration wait time in micro-seconds */ +#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700) + +enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy) +{ + return iphy->max_negotiated_speed; +} + +static enum sci_status +sci_phy_transport_layer_initialization(struct isci_phy *iphy, + struct scu_transport_layer_registers __iomem *reg) +{ + u32 tl_control; + + iphy->transport_layer_registers = reg; + + writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX, + &iphy->transport_layer_registers->stp_rni); + + /* + * Hardware team recommends that we enable the STP prefetch for all + * transports + */ + tl_control = readl(&iphy->transport_layer_registers->control); + tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH); + writel(tl_control, &iphy->transport_layer_registers->control); + + return SCI_SUCCESS; +} + +static enum sci_status +sci_phy_link_layer_initialization(struct isci_phy *iphy, + struct scu_link_layer_registers __iomem *reg) +{ + struct isci_host *ihost = iphy->owning_port->owning_controller; + int phy_idx = iphy->phy_index; + struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx]; + struct sci_phy_oem_params *phy_oem = + &ihost->oem_parameters.phys[phy_idx]; + u32 phy_configuration; + struct sci_phy_cap phy_cap; + u32 parity_check = 0; + u32 parity_count = 0; + u32 llctl, link_rate; + u32 clksm_value = 0; + + iphy->link_layer_registers = reg; + + /* Set our IDENTIFY frame data */ + #define SCI_END_DEVICE 0x01 + + writel(SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) | + SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) | + SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) | + SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) | + SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE), + &iphy->link_layer_registers->transmit_identification); + + /* Write the device SAS Address */ + writel(0xFEDCBA98, + &iphy->link_layer_registers->sas_device_name_high); + writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low); + + /* Write the source SAS Address */ + writel(phy_oem->sas_address.high, + &iphy->link_layer_registers->source_sas_address_high); + writel(phy_oem->sas_address.low, + &iphy->link_layer_registers->source_sas_address_low); + + /* Clear and Set the PHY Identifier */ + writel(0, &iphy->link_layer_registers->identify_frame_phy_id); + writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), + &iphy->link_layer_registers->identify_frame_phy_id); + + /* Change the initial state of the phy configuration register */ + phy_configuration = + readl(&iphy->link_layer_registers->phy_configuration); + + /* Hold OOB state machine in reset */ + phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); + writel(phy_configuration, + &iphy->link_layer_registers->phy_configuration); + + /* Configure the SNW capabilities */ + phy_cap.all = 0; + phy_cap.start = 1; + phy_cap.gen3_no_ssc = 1; + phy_cap.gen2_no_ssc = 1; + phy_cap.gen1_no_ssc = 1; + if (ihost->oem_parameters.controller.do_enable_ssc == true) { + phy_cap.gen3_ssc = 1; + phy_cap.gen2_ssc = 1; + phy_cap.gen1_ssc = 1; + } + + /* + * The SAS specification indicates that the phy_capabilities that + * are transmitted shall have an even parity. Calculate the parity. */ + parity_check = phy_cap.all; + while (parity_check != 0) { + if (parity_check & 0x1) + parity_count++; + parity_check >>= 1; + } + + /* + * If parity indicates there are an odd number of bits set, then + * set the parity bit to 1 in the phy capabilities. */ + if ((parity_count % 2) != 0) + phy_cap.parity = 1; + + writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities); + + /* Set the enable spinup period but disable the ability to send + * notify enable spinup + */ + writel(SCU_ENSPINUP_GEN_VAL(COUNT, + phy_user->notify_enable_spin_up_insertion_frequency), + &iphy->link_layer_registers->notify_enable_spinup_control); + + /* Write the ALIGN Insertion Ferequency for connected phy and + * inpendent of connected state + */ + clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(CONNECTED, + phy_user->in_connection_align_insertion_frequency); + + clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL, + phy_user->align_insertion_frequency); + + writel(clksm_value, &iphy->link_layer_registers->clock_skew_management); + + /* @todo Provide a way to write this register correctly */ + writel(0x02108421, + &iphy->link_layer_registers->afe_lookup_table_control); + + llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, + (u8)ihost->user_parameters.no_outbound_task_timeout); + + switch (phy_user->max_speed_generation) { + case SCIC_SDS_PARM_GEN3_SPEED: + link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3; + break; + case SCIC_SDS_PARM_GEN2_SPEED: + link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2; + break; + default: + link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1; + break; + } + llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); + writel(llctl, &iphy->link_layer_registers->link_layer_control); + + if (is_a2(ihost->pdev)) { + /* Program the max ARB time for the PHY to 700us so we inter-operate with + * the PMC expander which shuts down PHYs if the expander PHY generates too + * many breaks. This time value will guarantee that the initiator PHY will + * generate the break. + */ + writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME, + &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout); + } + + /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */ + writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout); + + /* We can exit the initial state to the stopped state */ + sci_change_state(&iphy->sm, SCI_PHY_STOPPED); + + return SCI_SUCCESS; +} + +static void phy_sata_timeout(unsigned long data) +{ + struct sci_timer *tmr = (struct sci_timer *)data; + struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer); + struct isci_host *ihost = iphy->owning_port->owning_controller; + unsigned long flags; + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (tmr->cancel) + goto done; + + dev_dbg(sciphy_to_dev(iphy), + "%s: SCIC SDS Phy 0x%p did not receive signature fis before " + "timeout.\n", + __func__, + iphy); + + sci_change_state(&iphy->sm, SCI_PHY_STARTING); +done: + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} + +/** + * This method returns the port currently containing this phy. If the phy is + * currently contained by the dummy port, then the phy is considered to not + * be part of a port. + * @sci_phy: This parameter specifies the phy for which to retrieve the + * containing port. + * + * This method returns a handle to a port that contains the supplied phy. + * NULL This value is returned if the phy is not part of a real + * port (i.e. it's contained in the dummy port). !NULL All other + * values indicate a handle/pointer to the port containing the phy. + */ +struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy) +{ + struct isci_port *iport = iphy->owning_port; + + if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT) + return NULL; + + return iphy->owning_port; +} + +/** + * This method will assign a port to the phy object. + * @out]: iphy This parameter specifies the phy for which to assign a port + * object. + * + * + */ +void sci_phy_set_port( + struct isci_phy *iphy, + struct isci_port *iport) +{ + iphy->owning_port = iport; + + if (iphy->bcn_received_while_port_unassigned) { + iphy->bcn_received_while_port_unassigned = false; + sci_port_broadcast_change_received(iphy->owning_port, iphy); + } +} + +enum sci_status sci_phy_initialize(struct isci_phy *iphy, + struct scu_transport_layer_registers __iomem *tl, + struct scu_link_layer_registers __iomem *ll) +{ + /* Perfrom the initialization of the TL hardware */ + sci_phy_transport_layer_initialization(iphy, tl); + + /* Perofrm the initialization of the PE hardware */ + sci_phy_link_layer_initialization(iphy, ll); + + /* There is nothing that needs to be done in this state just + * transition to the stopped state + */ + sci_change_state(&iphy->sm, SCI_PHY_STOPPED); + + return SCI_SUCCESS; +} + +/** + * This method assigns the direct attached device ID for this phy. + * + * @iphy The phy for which the direct attached device id is to + * be assigned. + * @device_id The direct attached device ID to assign to the phy. + * This will either be the RNi for the device or an invalid RNi if there + * is no current device assigned to the phy. + */ +void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id) +{ + u32 tl_control; + + writel(device_id, &iphy->transport_layer_registers->stp_rni); + + /* + * The read should guarantee that the first write gets posted + * before the next write + */ + tl_control = readl(&iphy->transport_layer_registers->control); + tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE); + writel(tl_control, &iphy->transport_layer_registers->control); +} + +static void sci_phy_suspend(struct isci_phy *iphy) +{ + u32 scu_sas_pcfg_value; + + scu_sas_pcfg_value = + readl(&iphy->link_layer_registers->phy_configuration); + scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE); + writel(scu_sas_pcfg_value, + &iphy->link_layer_registers->phy_configuration); + + sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); +} + +void sci_phy_resume(struct isci_phy *iphy) +{ + u32 scu_sas_pcfg_value; + + scu_sas_pcfg_value = + readl(&iphy->link_layer_registers->phy_configuration); + scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE); + writel(scu_sas_pcfg_value, + &iphy->link_layer_registers->phy_configuration); +} + +void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas) +{ + sas->high = readl(&iphy->link_layer_registers->source_sas_address_high); + sas->low = readl(&iphy->link_layer_registers->source_sas_address_low); +} + +void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas) +{ + struct sas_identify_frame *iaf; + + iaf = &iphy->frame_rcvd.iaf; + memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE); +} + +void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto) +{ + proto->all = readl(&iphy->link_layer_registers->transmit_identification); +} + +enum sci_status sci_phy_start(struct isci_phy *iphy) +{ + enum sci_phy_states state = iphy->sm.current_state_id; + + if (state != SCI_PHY_STOPPED) { + dev_dbg(sciphy_to_dev(iphy), + "%s: in wrong state: %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } + + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + return SCI_SUCCESS; +} + +enum sci_status sci_phy_stop(struct isci_phy *iphy) +{ + enum sci_phy_states state = iphy->sm.current_state_id; + + switch (state) { + case SCI_PHY_SUB_INITIAL: + case SCI_PHY_SUB_AWAIT_OSSP_EN: + case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: + case SCI_PHY_SUB_AWAIT_SAS_POWER: + case SCI_PHY_SUB_AWAIT_SATA_POWER: + case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: + case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: + case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: + case SCI_PHY_SUB_FINAL: + case SCI_PHY_READY: + break; + default: + dev_dbg(sciphy_to_dev(iphy), + "%s: in wrong state: %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } + + sci_change_state(&iphy->sm, SCI_PHY_STOPPED); + return SCI_SUCCESS; +} + +enum sci_status sci_phy_reset(struct isci_phy *iphy) +{ + enum sci_phy_states state = iphy->sm.current_state_id; + + if (state != SCI_PHY_READY) { + dev_dbg(sciphy_to_dev(iphy), + "%s: in wrong state: %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } + + sci_change_state(&iphy->sm, SCI_PHY_RESETTING); + return SCI_SUCCESS; +} + +enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy) +{ + enum sci_phy_states state = iphy->sm.current_state_id; + + switch (state) { + case SCI_PHY_SUB_AWAIT_SAS_POWER: { + u32 enable_spinup; + + enable_spinup = readl(&iphy->link_layer_registers->notify_enable_spinup_control); + enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE); + writel(enable_spinup, &iphy->link_layer_registers->notify_enable_spinup_control); + + /* Change state to the final state this substate machine has run to completion */ + sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL); + + return SCI_SUCCESS; + } + case SCI_PHY_SUB_AWAIT_SATA_POWER: { + u32 scu_sas_pcfg_value; + + /* Release the spinup hold state and reset the OOB state machine */ + scu_sas_pcfg_value = + readl(&iphy->link_layer_registers->phy_configuration); + scu_sas_pcfg_value &= + ~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE)); + scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); + writel(scu_sas_pcfg_value, + &iphy->link_layer_registers->phy_configuration); + + /* Now restart the OOB operation */ + scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET); + scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); + writel(scu_sas_pcfg_value, + &iphy->link_layer_registers->phy_configuration); + + /* Change state to the final state this substate machine has run to completion */ + sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN); + + return SCI_SUCCESS; + } + default: + dev_dbg(sciphy_to_dev(iphy), + "%s: in wrong state: %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } +} + +static void sci_phy_start_sas_link_training(struct isci_phy *iphy) +{ + /* continue the link training for the phy as if it were a SAS PHY + * instead of a SATA PHY. This is done because the completion queue had a SAS + * PHY DETECTED event when the state machine was expecting a SATA PHY event. + */ + u32 phy_control; + + phy_control = readl(&iphy->link_layer_registers->phy_configuration); + phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD); + writel(phy_control, + &iphy->link_layer_registers->phy_configuration); + + sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); + + iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS; +} + +static void sci_phy_start_sata_link_training(struct isci_phy *iphy) +{ + /* This method continues the link training for the phy as if it were a SATA PHY + * instead of a SAS PHY. This is done because the completion queue had a SATA + * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none + */ + sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); + + iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; +} + +/** + * sci_phy_complete_link_training - perform processing common to + * all protocols upon completion of link training. + * @sci_phy: This parameter specifies the phy object for which link training + * has completed. + * @max_link_rate: This parameter specifies the maximum link rate to be + * associated with this phy. + * @next_state: This parameter specifies the next state for the phy's starting + * sub-state machine. + * + */ +static void sci_phy_complete_link_training(struct isci_phy *iphy, + enum sas_linkrate max_link_rate, + u32 next_state) +{ + iphy->max_negotiated_speed = max_link_rate; + + sci_change_state(&iphy->sm, next_state); +} + +enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) +{ + enum sci_phy_states state = iphy->sm.current_state_id; + + switch (state) { + case SCI_PHY_SUB_AWAIT_OSSP_EN: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_SAS_PHY_DETECTED: + sci_phy_start_sas_link_training(iphy); + iphy->is_in_link_training = true; + break; + case SCU_EVENT_SATA_SPINUP_HOLD: + sci_phy_start_sata_link_training(iphy); + iphy->is_in_link_training = true; + break; + default: + dev_dbg(sciphy_to_dev(iphy), + "%s: PHY starting substate machine received " + "unexpected event_code %x\n", + __func__, + event_code); + return SCI_FAILURE; + } + return SCI_SUCCESS; + case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_SAS_PHY_DETECTED: + /* + * Why is this being reported again by the controller? + * We would re-enter this state so just stay here */ + break; + case SCU_EVENT_SAS_15: + case SCU_EVENT_SAS_15_SSC: + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS, + SCI_PHY_SUB_AWAIT_IAF_UF); + break; + case SCU_EVENT_SAS_30: + case SCU_EVENT_SAS_30_SSC: + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS, + SCI_PHY_SUB_AWAIT_IAF_UF); + break; + case SCU_EVENT_SAS_60: + case SCU_EVENT_SAS_60_SSC: + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS, + SCI_PHY_SUB_AWAIT_IAF_UF); + break; + case SCU_EVENT_SATA_SPINUP_HOLD: + /* + * We were doing SAS PHY link training and received a SATA PHY event + * continue OOB/SN as if this were a SATA PHY */ + sci_phy_start_sata_link_training(iphy); + break; + case SCU_EVENT_LINK_FAILURE: + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + default: + dev_warn(sciphy_to_dev(iphy), + "%s: PHY starting substate machine received " + "unexpected event_code %x\n", + __func__, event_code); + + return SCI_FAILURE; + break; + } + return SCI_SUCCESS; + case SCI_PHY_SUB_AWAIT_IAF_UF: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_SAS_PHY_DETECTED: + /* Backup the state machine */ + sci_phy_start_sas_link_training(iphy); + break; + case SCU_EVENT_SATA_SPINUP_HOLD: + /* We were doing SAS PHY link training and received a + * SATA PHY event continue OOB/SN as if this were a + * SATA PHY + */ + sci_phy_start_sata_link_training(iphy); + break; + case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: + case SCU_EVENT_LINK_FAILURE: + case SCU_EVENT_HARD_RESET_RECEIVED: + /* Start the oob/sn state machine over again */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + default: + dev_warn(sciphy_to_dev(iphy), + "%s: PHY starting substate machine received " + "unexpected event_code %x\n", + __func__, event_code); + return SCI_FAILURE; + } + return SCI_SUCCESS; + case SCI_PHY_SUB_AWAIT_SAS_POWER: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_LINK_FAILURE: + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + default: + dev_warn(sciphy_to_dev(iphy), + "%s: PHY starting substate machine received unexpected " + "event_code %x\n", + __func__, + event_code); + return SCI_FAILURE; + } + return SCI_SUCCESS; + case SCI_PHY_SUB_AWAIT_SATA_POWER: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_LINK_FAILURE: + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + case SCU_EVENT_SATA_SPINUP_HOLD: + /* These events are received every 10ms and are + * expected while in this state + */ + break; + + case SCU_EVENT_SAS_PHY_DETECTED: + /* There has been a change in the phy type before OOB/SN for the + * SATA finished start down the SAS link traning path. + */ + sci_phy_start_sas_link_training(iphy); + break; + + default: + dev_warn(sciphy_to_dev(iphy), + "%s: PHY starting substate machine received " + "unexpected event_code %x\n", + __func__, event_code); + + return SCI_FAILURE; + } + return SCI_SUCCESS; + case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_LINK_FAILURE: + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + case SCU_EVENT_SATA_SPINUP_HOLD: + /* These events might be received since we dont know how many may be in + * the completion queue while waiting for power + */ + break; + case SCU_EVENT_SATA_PHY_DETECTED: + iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; + + /* We have received the SATA PHY notification change state */ + sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); + break; + case SCU_EVENT_SAS_PHY_DETECTED: + /* There has been a change in the phy type before OOB/SN for the + * SATA finished start down the SAS link traning path. + */ + sci_phy_start_sas_link_training(iphy); + break; + default: + dev_warn(sciphy_to_dev(iphy), + "%s: PHY starting substate machine received " + "unexpected event_code %x\n", + __func__, + event_code); + + return SCI_FAILURE;; + } + return SCI_SUCCESS; + case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_SATA_PHY_DETECTED: + /* + * The hardware reports multiple SATA PHY detected events + * ignore the extras */ + break; + case SCU_EVENT_SATA_15: + case SCU_EVENT_SATA_15_SSC: + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS, + SCI_PHY_SUB_AWAIT_SIG_FIS_UF); + break; + case SCU_EVENT_SATA_30: + case SCU_EVENT_SATA_30_SSC: + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS, + SCI_PHY_SUB_AWAIT_SIG_FIS_UF); + break; + case SCU_EVENT_SATA_60: + case SCU_EVENT_SATA_60_SSC: + sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS, + SCI_PHY_SUB_AWAIT_SIG_FIS_UF); + break; + case SCU_EVENT_LINK_FAILURE: + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + case SCU_EVENT_SAS_PHY_DETECTED: + /* + * There has been a change in the phy type before OOB/SN for the + * SATA finished start down the SAS link traning path. */ + sci_phy_start_sas_link_training(iphy); + break; + default: + dev_warn(sciphy_to_dev(iphy), + "%s: PHY starting substate machine received " + "unexpected event_code %x\n", + __func__, event_code); + + return SCI_FAILURE; + } + + return SCI_SUCCESS; + case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_SATA_PHY_DETECTED: + /* Backup the state machine */ + sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); + break; + + case SCU_EVENT_LINK_FAILURE: + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + + default: + dev_warn(sciphy_to_dev(iphy), + "%s: PHY starting substate machine received " + "unexpected event_code %x\n", + __func__, + event_code); + + return SCI_FAILURE; + } + return SCI_SUCCESS; + case SCI_PHY_READY: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_LINK_FAILURE: + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + case SCU_EVENT_BROADCAST_CHANGE: + /* Broadcast change received. Notify the port. */ + if (phy_get_non_dummy_port(iphy) != NULL) + sci_port_broadcast_change_received(iphy->owning_port, iphy); + else + iphy->bcn_received_while_port_unassigned = true; + break; + default: + dev_warn(sciphy_to_dev(iphy), + "%sP SCIC PHY 0x%p ready state machine received " + "unexpected event_code %x\n", + __func__, iphy, event_code); + return SCI_FAILURE_INVALID_STATE; + } + return SCI_SUCCESS; + case SCI_PHY_RESETTING: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_HARD_RESET_TRANSMITTED: + /* Link failure change state back to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; + default: + dev_warn(sciphy_to_dev(iphy), + "%s: SCIC PHY 0x%p resetting state machine received " + "unexpected event_code %x\n", + __func__, iphy, event_code); + + return SCI_FAILURE_INVALID_STATE; + break; + } + return SCI_SUCCESS; + default: + dev_dbg(sciphy_to_dev(iphy), + "%s: in wrong state: %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } +} + +enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index) +{ + enum sci_phy_states state = iphy->sm.current_state_id; + struct isci_host *ihost = iphy->owning_port->owning_controller; + enum sci_status result; + unsigned long flags; + + switch (state) { + case SCI_PHY_SUB_AWAIT_IAF_UF: { + u32 *frame_words; + struct sas_identify_frame iaf; + + result = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_words); + + if (result != SCI_SUCCESS) + return result; + + sci_swab32_cpy(&iaf, frame_words, sizeof(iaf) / sizeof(u32)); + if (iaf.frame_type == 0) { + u32 state; + + spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); + memcpy(&iphy->frame_rcvd.iaf, &iaf, sizeof(iaf)); + spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); + if (iaf.smp_tport) { + /* We got the IAF for an expander PHY go to the final + * state since there are no power requirements for + * expander phys. + */ + state = SCI_PHY_SUB_FINAL; + } else { + /* We got the IAF we can now go to the await spinup + * semaphore state + */ + state = SCI_PHY_SUB_AWAIT_SAS_POWER; + } + sci_change_state(&iphy->sm, state); + result = SCI_SUCCESS; + } else + dev_warn(sciphy_to_dev(iphy), + "%s: PHY starting substate machine received " + "unexpected frame id %x\n", + __func__, frame_index); + + sci_controller_release_frame(ihost, frame_index); + return result; + } + case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: { + struct dev_to_host_fis *frame_header; + u32 *fis_frame_data; + + result = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_header); + + if (result != SCI_SUCCESS) + return result; + + if ((frame_header->fis_type == FIS_REGD2H) && + !(frame_header->status & ATA_BUSY)) { + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&fis_frame_data); + + spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); + sci_controller_copy_sata_response(&iphy->frame_rcvd.fis, + frame_header, + fis_frame_data); + spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); + + /* got IAF we can now go to the await spinup semaphore state */ + sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL); + + result = SCI_SUCCESS; + } else + dev_warn(sciphy_to_dev(iphy), + "%s: PHY starting substate machine received " + "unexpected frame id %x\n", + __func__, frame_index); + + /* Regardless of the result we are done with this frame with it */ + sci_controller_release_frame(ihost, frame_index); + + return result; + } + default: + dev_dbg(sciphy_to_dev(iphy), + "%s: in wrong state: %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } + +} + +static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + /* This is just an temporary state go off to the starting state */ + sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN); +} + +static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_host *ihost = iphy->owning_port->owning_controller; + + sci_controller_power_control_queue_insert(ihost, iphy); +} + +static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_host *ihost = iphy->owning_port->owning_controller; + + sci_controller_power_control_queue_remove(ihost, iphy); +} + +static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_host *ihost = iphy->owning_port->owning_controller; + + sci_controller_power_control_queue_insert(ihost, iphy); +} + +static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_host *ihost = iphy->owning_port->owning_controller; + + sci_controller_power_control_queue_remove(ihost, iphy); +} + +static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); +} + +static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + sci_del_timer(&iphy->sata_timer); +} + +static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); +} + +static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + sci_del_timer(&iphy->sata_timer); +} + +static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + if (sci_port_link_detected(iphy->owning_port, iphy)) { + + /* + * Clear the PE suspend condition so we can actually + * receive SIG FIS + * The hardware will not respond to the XRDY until the PE + * suspend condition is cleared. + */ + sci_phy_resume(iphy); + + sci_mod_timer(&iphy->sata_timer, + SCIC_SDS_SIGNATURE_FIS_TIMEOUT); + } else + iphy->is_in_link_training = false; +} + +static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + sci_del_timer(&iphy->sata_timer); +} + +static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + /* State machine has run to completion so exit out and change + * the base state machine to the ready state + */ + sci_change_state(&iphy->sm, SCI_PHY_READY); +} + +/** + * + * @sci_phy: This is the struct isci_phy object to stop. + * + * This method will stop the struct isci_phy object. This does not reset the + * protocol engine it just suspends it and places it in a state where it will + * not cause the end device to power up. none + */ +static void scu_link_layer_stop_protocol_engine( + struct isci_phy *iphy) +{ + u32 scu_sas_pcfg_value; + u32 enable_spinup_value; + + /* Suspend the protocol engine and place it in a sata spinup hold state */ + scu_sas_pcfg_value = + readl(&iphy->link_layer_registers->phy_configuration); + scu_sas_pcfg_value |= + (SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | + SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE) | + SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD)); + writel(scu_sas_pcfg_value, + &iphy->link_layer_registers->phy_configuration); + + /* Disable the notify enable spinup primitives */ + enable_spinup_value = readl(&iphy->link_layer_registers->notify_enable_spinup_control); + enable_spinup_value &= ~SCU_ENSPINUP_GEN_BIT(ENABLE); + writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control); +} + +/** + * + * + * This method will start the OOB/SN state machine for this struct isci_phy object. + */ +static void scu_link_layer_start_oob( + struct isci_phy *iphy) +{ + u32 scu_sas_pcfg_value; + + scu_sas_pcfg_value = + readl(&iphy->link_layer_registers->phy_configuration); + scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); + scu_sas_pcfg_value &= + ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | + SCU_SAS_PCFG_GEN_BIT(HARD_RESET)); + writel(scu_sas_pcfg_value, + &iphy->link_layer_registers->phy_configuration); +} + +/** + * + * + * This method will transmit a hard reset request on the specified phy. The SCU + * hardware requires that we reset the OOB state machine and set the hard reset + * bit in the phy configuration register. We then must start OOB over with the + * hard reset bit set. + */ +static void scu_link_layer_tx_hard_reset( + struct isci_phy *iphy) +{ + u32 phy_configuration_value; + + /* + * SAS Phys must wait for the HARD_RESET_TX event notification to transition + * to the starting state. */ + phy_configuration_value = + readl(&iphy->link_layer_registers->phy_configuration); + phy_configuration_value |= + (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) | + SCU_SAS_PCFG_GEN_BIT(OOB_RESET)); + writel(phy_configuration_value, + &iphy->link_layer_registers->phy_configuration); + + /* Now take the OOB state machine out of reset */ + phy_configuration_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); + phy_configuration_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET); + writel(phy_configuration_value, + &iphy->link_layer_registers->phy_configuration); +} + +static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_port *iport = iphy->owning_port; + struct isci_host *ihost = iport->owning_controller; + + /* + * @todo We need to get to the controller to place this PE in a + * reset state + */ + sci_del_timer(&iphy->sata_timer); + + scu_link_layer_stop_protocol_engine(iphy); + + if (iphy->sm.previous_state_id != SCI_PHY_INITIAL) + sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy); +} + +static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_port *iport = iphy->owning_port; + struct isci_host *ihost = iport->owning_controller; + + scu_link_layer_stop_protocol_engine(iphy); + scu_link_layer_start_oob(iphy); + + /* We don't know what kind of phy we are going to be just yet */ + iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; + iphy->bcn_received_while_port_unassigned = false; + + if (iphy->sm.previous_state_id == SCI_PHY_READY) + sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy); + + sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL); +} + +static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + struct isci_port *iport = iphy->owning_port; + struct isci_host *ihost = iport->owning_controller; + + sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy); +} + +static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + sci_phy_suspend(iphy); +} + +static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); + + /* The phy is being reset, therefore deactivate it from the port. In + * the resetting state we don't notify the user regarding link up and + * link down notifications + */ + sci_port_deactivate_phy(iphy->owning_port, iphy, false); + + if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { + scu_link_layer_tx_hard_reset(iphy); + } else { + /* The SCU does not need to have a discrete reset state so + * just go back to the starting state. + */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + } +} + +static const struct sci_base_state sci_phy_state_table[] = { + [SCI_PHY_INITIAL] = { }, + [SCI_PHY_STOPPED] = { + .enter_state = sci_phy_stopped_state_enter, + }, + [SCI_PHY_STARTING] = { + .enter_state = sci_phy_starting_state_enter, + }, + [SCI_PHY_SUB_INITIAL] = { + .enter_state = sci_phy_starting_initial_substate_enter, + }, + [SCI_PHY_SUB_AWAIT_OSSP_EN] = { }, + [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { }, + [SCI_PHY_SUB_AWAIT_IAF_UF] = { }, + [SCI_PHY_SUB_AWAIT_SAS_POWER] = { + .enter_state = sci_phy_starting_await_sas_power_substate_enter, + .exit_state = sci_phy_starting_await_sas_power_substate_exit, + }, + [SCI_PHY_SUB_AWAIT_SATA_POWER] = { + .enter_state = sci_phy_starting_await_sata_power_substate_enter, + .exit_state = sci_phy_starting_await_sata_power_substate_exit + }, + [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = { + .enter_state = sci_phy_starting_await_sata_phy_substate_enter, + .exit_state = sci_phy_starting_await_sata_phy_substate_exit + }, + [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = { + .enter_state = sci_phy_starting_await_sata_speed_substate_enter, + .exit_state = sci_phy_starting_await_sata_speed_substate_exit + }, + [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = { + .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter, + .exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit + }, + [SCI_PHY_SUB_FINAL] = { + .enter_state = sci_phy_starting_final_substate_enter, + }, + [SCI_PHY_READY] = { + .enter_state = sci_phy_ready_state_enter, + .exit_state = sci_phy_ready_state_exit, + }, + [SCI_PHY_RESETTING] = { + .enter_state = sci_phy_resetting_state_enter, + }, + [SCI_PHY_FINAL] = { }, +}; + +void sci_phy_construct(struct isci_phy *iphy, + struct isci_port *iport, u8 phy_index) +{ + sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL); + + /* Copy the rest of the input data to our locals */ + iphy->owning_port = iport; + iphy->phy_index = phy_index; + iphy->bcn_received_while_port_unassigned = false; + iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; + iphy->link_layer_registers = NULL; + iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; + + /* Create the SIGNATURE FIS Timeout timer for this phy */ + sci_init_timer(&iphy->sata_timer, phy_sata_timeout); +} + +void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index) +{ + struct sci_oem_params *oem = &ihost->oem_parameters; + u64 sci_sas_addr; + __be64 sas_addr; + + sci_sas_addr = oem->phys[index].sas_address.high; + sci_sas_addr <<= 32; + sci_sas_addr |= oem->phys[index].sas_address.low; + sas_addr = cpu_to_be64(sci_sas_addr); + memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr)); + + iphy->isci_port = NULL; + iphy->sas_phy.enabled = 0; + iphy->sas_phy.id = index; + iphy->sas_phy.sas_addr = &iphy->sas_addr[0]; + iphy->sas_phy.frame_rcvd = (u8 *)&iphy->frame_rcvd; + iphy->sas_phy.ha = &ihost->sas_ha; + iphy->sas_phy.lldd_phy = iphy; + iphy->sas_phy.enabled = 1; + iphy->sas_phy.class = SAS; + iphy->sas_phy.iproto = SAS_PROTOCOL_ALL; + iphy->sas_phy.tproto = 0; + iphy->sas_phy.type = PHY_TYPE_PHYSICAL; + iphy->sas_phy.role = PHY_ROLE_INITIATOR; + iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED; + iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN; + memset(&iphy->frame_rcvd, 0, sizeof(iphy->frame_rcvd)); +} + + +/** + * isci_phy_control() - This function is one of the SAS Domain Template + * functions. This is a phy management function. + * @phy: This parameter specifies the sphy being controlled. + * @func: This parameter specifies the phy control function being invoked. + * @buf: This parameter is specific to the phy function being invoked. + * + * status, zero indicates success. + */ +int isci_phy_control(struct asd_sas_phy *sas_phy, + enum phy_func func, + void *buf) +{ + int ret = 0; + struct isci_phy *iphy = sas_phy->lldd_phy; + struct isci_port *iport = iphy->isci_port; + struct isci_host *ihost = sas_phy->ha->lldd_ha; + unsigned long flags; + + dev_dbg(&ihost->pdev->dev, + "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n", + __func__, sas_phy, func, buf, iphy, iport); + + switch (func) { + case PHY_FUNC_DISABLE: + spin_lock_irqsave(&ihost->scic_lock, flags); + sci_phy_stop(iphy); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + break; + + case PHY_FUNC_LINK_RESET: + spin_lock_irqsave(&ihost->scic_lock, flags); + sci_phy_stop(iphy); + sci_phy_start(iphy); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + break; + + case PHY_FUNC_HARD_RESET: + if (!iport) + return -ENODEV; + + /* Perform the port reset. */ + ret = isci_port_perform_hard_reset(ihost, iport, iphy); + + break; + + default: + dev_dbg(&ihost->pdev->dev, + "%s: phy %p; func %d NOT IMPLEMENTED!\n", + __func__, sas_phy, func); + ret = -ENOSYS; + break; + } + return ret; +} diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h new file mode 100644 index 00000000000..67699c8e321 --- /dev/null +++ b/drivers/scsi/isci/phy.h @@ -0,0 +1,504 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _ISCI_PHY_H_ +#define _ISCI_PHY_H_ + +#include <scsi/sas.h> +#include <scsi/libsas.h> +#include "isci.h" +#include "sas.h" + +/* This is the timeout value for the SATA phy to wait for a SIGNATURE FIS + * before restarting the starting state machine. Technically, the old parallel + * ATA specification required up to 30 seconds for a device to issue its + * signature FIS as a result of a soft reset. Now we see that devices respond + * generally within 15 seconds, but we'll use 25 for now. + */ +#define SCIC_SDS_SIGNATURE_FIS_TIMEOUT 25000 + +/* This is the timeout for the SATA OOB/SN because the hardware does not + * recognize a hot plug after OOB signal but before the SN signals. We need to + * make sure after a hotplug timeout if we have not received the speed event + * notification from the hardware that we restart the hardware OOB state + * machine. + */ +#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250 + +enum sci_phy_protocol { + SCIC_SDS_PHY_PROTOCOL_UNKNOWN, + SCIC_SDS_PHY_PROTOCOL_SAS, + SCIC_SDS_PHY_PROTOCOL_SATA, + SCIC_SDS_MAX_PHY_PROTOCOLS +}; + +/** + * isci_phy - hba local phy infrastructure + * @sm: + * @protocol: attached device protocol + * @phy_index: physical index relative to the controller (0-3) + * @bcn_received_while_port_unassigned: bcn to report after port association + * @sata_timer: timeout SATA signature FIS arrival + */ +struct isci_phy { + struct sci_base_state_machine sm; + struct isci_port *owning_port; + enum sas_linkrate max_negotiated_speed; + enum sci_phy_protocol protocol; + u8 phy_index; + bool bcn_received_while_port_unassigned; + bool is_in_link_training; + struct sci_timer sata_timer; + struct scu_transport_layer_registers __iomem *transport_layer_registers; + struct scu_link_layer_registers __iomem *link_layer_registers; + struct asd_sas_phy sas_phy; + struct isci_port *isci_port; + u8 sas_addr[SAS_ADDR_SIZE]; + union { + struct sas_identify_frame iaf; + struct dev_to_host_fis fis; + } frame_rcvd; +}; + +static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy) +{ + struct isci_phy *iphy = container_of(sas_phy, typeof(*iphy), sas_phy); + + return iphy; +} + +struct sci_phy_cap { + union { + struct { + /* + * The SAS specification indicates the start bit shall + * always be set to + * 1. This implementation will have the start bit set + * to 0 if the PHY CAPABILITIES were either not + * received or speed negotiation failed. + */ + u8 start:1; + u8 tx_ssc_type:1; + u8 res1:2; + u8 req_logical_linkrate:4; + + u32 gen1_no_ssc:1; + u32 gen1_ssc:1; + u32 gen2_no_ssc:1; + u32 gen2_ssc:1; + u32 gen3_no_ssc:1; + u32 gen3_ssc:1; + u32 res2:17; + u32 parity:1; + }; + u32 all; + }; +} __packed; + +/* this data structure reflects the link layer transmit identification reg */ +struct sci_phy_proto { + union { + struct { + u16 _r_a:1; + u16 smp_iport:1; + u16 stp_iport:1; + u16 ssp_iport:1; + u16 _r_b:4; + u16 _r_c:1; + u16 smp_tport:1; + u16 stp_tport:1; + u16 ssp_tport:1; + u16 _r_d:4; + }; + u16 all; + }; +} __packed; + + +/** + * struct sci_phy_properties - This structure defines the properties common to + * all phys that can be retrieved. + * + * + */ +struct sci_phy_properties { + /** + * This field specifies the port that currently contains the + * supplied phy. This field may be set to NULL + * if the phy is not currently contained in a port. + */ + struct isci_port *iport; + + /** + * This field specifies the link rate at which the phy is + * currently operating. + */ + enum sas_linkrate negotiated_link_rate; + + /** + * This field specifies the index of the phy in relation to other + * phys within the controller. This index is zero relative. + */ + u8 index; +}; + +/** + * struct sci_sas_phy_properties - This structure defines the properties, + * specific to a SAS phy, that can be retrieved. + * + * + */ +struct sci_sas_phy_properties { + /** + * This field delineates the Identify Address Frame received + * from the remote end point. + */ + struct sas_identify_frame rcvd_iaf; + + /** + * This field delineates the Phy capabilities structure received + * from the remote end point. + */ + struct sci_phy_cap rcvd_cap; + +}; + +/** + * struct sci_sata_phy_properties - This structure defines the properties, + * specific to a SATA phy, that can be retrieved. + * + * + */ +struct sci_sata_phy_properties { + /** + * This field delineates the signature FIS received from the + * attached target. + */ + struct dev_to_host_fis signature_fis; + + /** + * This field specifies to the user if a port selector is connected + * on the specified phy. + */ + bool is_port_selector_present; + +}; + +/** + * enum sci_phy_counter_id - This enumeration depicts the various pieces of + * optional information that can be retrieved for a specific phy. + * + * + */ +enum sci_phy_counter_id { + /** + * This PHY information field tracks the number of frames received. + */ + SCIC_PHY_COUNTER_RECEIVED_FRAME, + + /** + * This PHY information field tracks the number of frames transmitted. + */ + SCIC_PHY_COUNTER_TRANSMITTED_FRAME, + + /** + * This PHY information field tracks the number of DWORDs received. + */ + SCIC_PHY_COUNTER_RECEIVED_FRAME_WORD, + + /** + * This PHY information field tracks the number of DWORDs transmitted. + */ + SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD, + + /** + * This PHY information field tracks the number of times DWORD + * synchronization was lost. + */ + SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR, + + /** + * This PHY information field tracks the number of received DWORDs with + * running disparity errors. + */ + SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR, + + /** + * This PHY information field tracks the number of received frames with a + * CRC error (not including short or truncated frames). + */ + SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR, + + /** + * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT) + * primitives received. + */ + SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT, + + /** + * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT) + * primitives transmitted. + */ + SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT, + + /** + * This PHY information field tracks the number of times the inactivity + * timer for connections on the phy has been utilized. + */ + SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED, + + /** + * This PHY information field tracks the number of DONE (CREDIT TIMEOUT) + * primitives received. + */ + SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT, + + /** + * This PHY information field tracks the number of DONE (CREDIT TIMEOUT) + * primitives transmitted. + */ + SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT, + + /** + * This PHY information field tracks the number of CREDIT BLOCKED + * primitives received. + * @note Depending on remote device implementation, credit blocks + * may occur regularly. + */ + SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED, + + /** + * This PHY information field contains the number of short frames + * received. A short frame is simply a frame smaller then what is + * allowed by either the SAS or SATA specification. + */ + SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME, + + /** + * This PHY information field contains the number of frames received after + * credit has been exhausted. + */ + SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT, + + /** + * This PHY information field contains the number of frames received after + * a DONE has been received. + */ + SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE, + + /** + * This PHY information field contains the number of times the phy + * failed to achieve DWORD synchronization during speed negotiation. + */ + SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR +}; + +enum sci_phy_states { + /** + * Simply the initial state for the base domain state machine. + */ + SCI_PHY_INITIAL, + + /** + * This state indicates that the phy has successfully been stopped. + * In this state no new IO operations are permitted on this phy. + * This state is entered from the INITIAL state. + * This state is entered from the STARTING state. + * This state is entered from the READY state. + * This state is entered from the RESETTING state. + */ + SCI_PHY_STOPPED, + + /** + * This state indicates that the phy is in the process of becomming + * ready. In this state no new IO operations are permitted on this phy. + * This state is entered from the STOPPED state. + * This state is entered from the READY state. + * This state is entered from the RESETTING state. + */ + SCI_PHY_STARTING, + + /** + * Initial state + */ + SCI_PHY_SUB_INITIAL, + + /** + * Wait state for the hardware OSSP event type notification + */ + SCI_PHY_SUB_AWAIT_OSSP_EN, + + /** + * Wait state for the PHY speed notification + */ + SCI_PHY_SUB_AWAIT_SAS_SPEED_EN, + + /** + * Wait state for the IAF Unsolicited frame notification + */ + SCI_PHY_SUB_AWAIT_IAF_UF, + + /** + * Wait state for the request to consume power + */ + SCI_PHY_SUB_AWAIT_SAS_POWER, + + /** + * Wait state for request to consume power + */ + SCI_PHY_SUB_AWAIT_SATA_POWER, + + /** + * Wait state for the SATA PHY notification + */ + SCI_PHY_SUB_AWAIT_SATA_PHY_EN, + + /** + * Wait for the SATA PHY speed notification + */ + SCI_PHY_SUB_AWAIT_SATA_SPEED_EN, + + /** + * Wait state for the SIGNATURE FIS unsolicited frame notification + */ + SCI_PHY_SUB_AWAIT_SIG_FIS_UF, + + /** + * Exit state for this state machine + */ + SCI_PHY_SUB_FINAL, + + /** + * This state indicates the the phy is now ready. Thus, the user + * is able to perform IO operations utilizing this phy as long as it + * is currently part of a valid port. + * This state is entered from the STARTING state. + */ + SCI_PHY_READY, + + /** + * This state indicates that the phy is in the process of being reset. + * In this state no new IO operations are permitted on this phy. + * This state is entered from the READY state. + */ + SCI_PHY_RESETTING, + + /** + * Simply the final state for the base phy state machine. + */ + SCI_PHY_FINAL, +}; + +void sci_phy_construct( + struct isci_phy *iphy, + struct isci_port *iport, + u8 phy_index); + +struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy); + +void sci_phy_set_port( + struct isci_phy *iphy, + struct isci_port *iport); + +enum sci_status sci_phy_initialize( + struct isci_phy *iphy, + struct scu_transport_layer_registers __iomem *transport_layer_registers, + struct scu_link_layer_registers __iomem *link_layer_registers); + +enum sci_status sci_phy_start( + struct isci_phy *iphy); + +enum sci_status sci_phy_stop( + struct isci_phy *iphy); + +enum sci_status sci_phy_reset( + struct isci_phy *iphy); + +void sci_phy_resume( + struct isci_phy *iphy); + +void sci_phy_setup_transport( + struct isci_phy *iphy, + u32 device_id); + +enum sci_status sci_phy_event_handler( + struct isci_phy *iphy, + u32 event_code); + +enum sci_status sci_phy_frame_handler( + struct isci_phy *iphy, + u32 frame_index); + +enum sci_status sci_phy_consume_power_handler( + struct isci_phy *iphy); + +void sci_phy_get_sas_address( + struct isci_phy *iphy, + struct sci_sas_address *sas_address); + +void sci_phy_get_attached_sas_address( + struct isci_phy *iphy, + struct sci_sas_address *sas_address); + +struct sci_phy_proto; +void sci_phy_get_protocols( + struct isci_phy *iphy, + struct sci_phy_proto *protocols); +enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy); + +struct isci_host; +void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index); +int isci_phy_control(struct asd_sas_phy *phy, enum phy_func func, void *buf); + +#endif /* !defined(_ISCI_PHY_H_) */ diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c new file mode 100644 index 00000000000..8f6f9b77e41 --- /dev/null +++ b/drivers/scsi/isci/port.c @@ -0,0 +1,1757 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "isci.h" +#include "port.h" +#include "request.h" + +#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000) +#define SCU_DUMMY_INDEX (0xFFFF) + +static void isci_port_change_state(struct isci_port *iport, enum isci_status status) +{ + unsigned long flags; + + dev_dbg(&iport->isci_host->pdev->dev, + "%s: iport = %p, state = 0x%x\n", + __func__, iport, status); + + /* XXX pointless lock */ + spin_lock_irqsave(&iport->state_lock, flags); + iport->status = status; + spin_unlock_irqrestore(&iport->state_lock, flags); +} + +static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto) +{ + u8 index; + + proto->all = 0; + for (index = 0; index < SCI_MAX_PHYS; index++) { + struct isci_phy *iphy = iport->phy_table[index]; + + if (!iphy) + continue; + sci_phy_get_protocols(iphy, proto); + } +} + +static u32 sci_port_get_phys(struct isci_port *iport) +{ + u32 index; + u32 mask; + + mask = 0; + for (index = 0; index < SCI_MAX_PHYS; index++) + if (iport->phy_table[index]) + mask |= (1 << index); + + return mask; +} + +/** + * sci_port_get_properties() - This method simply returns the properties + * regarding the port, such as: physical index, protocols, sas address, etc. + * @port: this parameter specifies the port for which to retrieve the physical + * index. + * @properties: This parameter specifies the properties structure into which to + * copy the requested information. + * + * Indicate if the user specified a valid port. SCI_SUCCESS This value is + * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This + * value is returned if the specified port is not valid. When this value is + * returned, no data is copied to the properties output parameter. + */ +static enum sci_status sci_port_get_properties(struct isci_port *iport, + struct sci_port_properties *prop) +{ + if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT) + return SCI_FAILURE_INVALID_PORT; + + prop->index = iport->logical_port_index; + prop->phy_mask = sci_port_get_phys(iport); + sci_port_get_sas_address(iport, &prop->local.sas_address); + sci_port_get_protocols(iport, &prop->local.protocols); + sci_port_get_attached_sas_address(iport, &prop->remote.sas_address); + + return SCI_SUCCESS; +} + +static void sci_port_bcn_enable(struct isci_port *iport) +{ + struct isci_phy *iphy; + u32 val; + int i; + + for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) { + iphy = iport->phy_table[i]; + if (!iphy) + continue; + val = readl(&iphy->link_layer_registers->link_layer_control); + /* clear the bit by writing 1. */ + writel(val, &iphy->link_layer_registers->link_layer_control); + } +} + +/* called under sci_lock to stabilize phy:port associations */ +void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport) +{ + int i; + + clear_bit(IPORT_BCN_BLOCKED, &iport->flags); + wake_up(&ihost->eventq); + + if (!test_and_clear_bit(IPORT_BCN_PENDING, &iport->flags)) + return; + + for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) { + struct isci_phy *iphy = iport->phy_table[i]; + + if (!iphy) + continue; + + ihost->sas_ha.notify_port_event(&iphy->sas_phy, + PORTE_BROADCAST_RCVD); + break; + } +} + +static void isci_port_bc_change_received(struct isci_host *ihost, + struct isci_port *iport, + struct isci_phy *iphy) +{ + if (iport && test_bit(IPORT_BCN_BLOCKED, &iport->flags)) { + dev_dbg(&ihost->pdev->dev, + "%s: disabled BCN; isci_phy = %p, sas_phy = %p\n", + __func__, iphy, &iphy->sas_phy); + set_bit(IPORT_BCN_PENDING, &iport->flags); + atomic_inc(&iport->event); + wake_up(&ihost->eventq); + } else { + dev_dbg(&ihost->pdev->dev, + "%s: isci_phy = %p, sas_phy = %p\n", + __func__, iphy, &iphy->sas_phy); + + ihost->sas_ha.notify_port_event(&iphy->sas_phy, + PORTE_BROADCAST_RCVD); + } + sci_port_bcn_enable(iport); +} + +static void isci_port_link_up(struct isci_host *isci_host, + struct isci_port *iport, + struct isci_phy *iphy) +{ + unsigned long flags; + struct sci_port_properties properties; + unsigned long success = true; + + BUG_ON(iphy->isci_port != NULL); + + iphy->isci_port = iport; + + dev_dbg(&isci_host->pdev->dev, + "%s: isci_port = %p\n", + __func__, iport); + + spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); + + isci_port_change_state(iphy->isci_port, isci_starting); + + sci_port_get_properties(iport, &properties); + + if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) { + u64 attached_sas_address; + + iphy->sas_phy.oob_mode = SATA_OOB_MODE; + iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis); + + /* + * For direct-attached SATA devices, the SCI core will + * automagically assign a SAS address to the end device + * for the purpose of creating a port. This SAS address + * will not be the same as assigned to the PHY and needs + * to be obtained from struct sci_port_properties properties. + */ + attached_sas_address = properties.remote.sas_address.high; + attached_sas_address <<= 32; + attached_sas_address |= properties.remote.sas_address.low; + swab64s(&attached_sas_address); + + memcpy(&iphy->sas_phy.attached_sas_addr, + &attached_sas_address, sizeof(attached_sas_address)); + } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { + iphy->sas_phy.oob_mode = SAS_OOB_MODE; + iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame); + + /* Copy the attached SAS address from the IAF */ + memcpy(iphy->sas_phy.attached_sas_addr, + iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE); + } else { + dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__); + success = false; + } + + iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy); + + spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); + + /* Notify libsas that we have an address frame, if indeed + * we've found an SSP, SMP, or STP target */ + if (success) + isci_host->sas_ha.notify_port_event(&iphy->sas_phy, + PORTE_BYTES_DMAED); +} + + +/** + * isci_port_link_down() - This function is called by the sci core when a link + * becomes inactive. + * @isci_host: This parameter specifies the isci host object. + * @phy: This parameter specifies the isci phy with the active link. + * @port: This parameter specifies the isci port with the active link. + * + */ +static void isci_port_link_down(struct isci_host *isci_host, + struct isci_phy *isci_phy, + struct isci_port *isci_port) +{ + struct isci_remote_device *isci_device; + + dev_dbg(&isci_host->pdev->dev, + "%s: isci_port = %p\n", __func__, isci_port); + + if (isci_port) { + + /* check to see if this is the last phy on this port. */ + if (isci_phy->sas_phy.port && + isci_phy->sas_phy.port->num_phys == 1) { + atomic_inc(&isci_port->event); + isci_port_bcn_enable(isci_host, isci_port); + + /* change the state for all devices on this port. The + * next task sent to this device will be returned as + * SAS_TASK_UNDELIVERED, and the scsi mid layer will + * remove the target + */ + list_for_each_entry(isci_device, + &isci_port->remote_dev_list, + node) { + dev_dbg(&isci_host->pdev->dev, + "%s: isci_device = %p\n", + __func__, isci_device); + set_bit(IDEV_GONE, &isci_device->flags); + } + } + isci_port_change_state(isci_port, isci_stopping); + } + + /* Notify libsas of the borken link, this will trigger calls to our + * isci_port_deformed and isci_dev_gone functions. + */ + sas_phy_disconnected(&isci_phy->sas_phy); + isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy, + PHYE_LOSS_OF_SIGNAL); + + isci_phy->isci_port = NULL; + + dev_dbg(&isci_host->pdev->dev, + "%s: isci_port = %p - Done\n", __func__, isci_port); +} + + +/** + * isci_port_ready() - This function is called by the sci core when a link + * becomes ready. + * @isci_host: This parameter specifies the isci host object. + * @port: This parameter specifies the sci port with the active link. + * + */ +static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port) +{ + dev_dbg(&isci_host->pdev->dev, + "%s: isci_port = %p\n", __func__, isci_port); + + complete_all(&isci_port->start_complete); + isci_port_change_state(isci_port, isci_ready); + return; +} + +/** + * isci_port_not_ready() - This function is called by the sci core when a link + * is not ready. All remote devices on this link will be removed if they are + * in the stopping state. + * @isci_host: This parameter specifies the isci host object. + * @port: This parameter specifies the sci port with the active link. + * + */ +static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port) +{ + dev_dbg(&isci_host->pdev->dev, + "%s: isci_port = %p\n", __func__, isci_port); +} + +static void isci_port_stop_complete(struct isci_host *ihost, + struct isci_port *iport, + enum sci_status completion_status) +{ + dev_dbg(&ihost->pdev->dev, "Port stop complete\n"); +} + +/** + * isci_port_hard_reset_complete() - This function is called by the sci core + * when the hard reset complete notification has been received. + * @port: This parameter specifies the sci port with the active link. + * @completion_status: This parameter specifies the core status for the reset + * process. + * + */ +static void isci_port_hard_reset_complete(struct isci_port *isci_port, + enum sci_status completion_status) +{ + dev_dbg(&isci_port->isci_host->pdev->dev, + "%s: isci_port = %p, completion_status=%x\n", + __func__, isci_port, completion_status); + + /* Save the status of the hard reset from the port. */ + isci_port->hard_reset_status = completion_status; + + complete_all(&isci_port->hard_reset_complete); +} + +/* This method will return a true value if the specified phy can be assigned to + * this port The following is a list of phys for each port that are allowed: - + * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method + * doesn't preclude all configurations. It merely ensures that a phy is part + * of the allowable set of phy identifiers for that port. For example, one + * could assign phy 3 to port 0 and no other phys. Please refer to + * sci_port_is_phy_mask_valid() for information regarding whether the + * phy_mask for a port can be supported. bool true if this is a valid phy + * assignment for the port false if this is not a valid phy assignment for the + * port + */ +bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index) +{ + struct isci_host *ihost = iport->owning_controller; + struct sci_user_parameters *user = &ihost->user_parameters; + + /* Initialize to invalid value. */ + u32 existing_phy_index = SCI_MAX_PHYS; + u32 index; + + if ((iport->physical_port_index == 1) && (phy_index != 1)) + return false; + + if (iport->physical_port_index == 3 && phy_index != 3) + return false; + + if (iport->physical_port_index == 2 && + (phy_index == 0 || phy_index == 1)) + return false; + + for (index = 0; index < SCI_MAX_PHYS; index++) + if (iport->phy_table[index] && index != phy_index) + existing_phy_index = index; + + /* Ensure that all of the phys in the port are capable of + * operating at the same maximum link rate. + */ + if (existing_phy_index < SCI_MAX_PHYS && + user->phys[phy_index].max_speed_generation != + user->phys[existing_phy_index].max_speed_generation) + return false; + + return true; +} + +/** + * + * @sci_port: This is the port object for which to determine if the phy mask + * can be supported. + * + * This method will return a true value if the port's phy mask can be supported + * by the SCU. The following is a list of valid PHY mask configurations for + * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2] + * - Port 3 - [3] This method returns a boolean indication specifying if the + * phy mask can be supported. true if this is a valid phy assignment for the + * port false if this is not a valid phy assignment for the port + */ +static bool sci_port_is_phy_mask_valid( + struct isci_port *iport, + u32 phy_mask) +{ + if (iport->physical_port_index == 0) { + if (((phy_mask & 0x0F) == 0x0F) + || ((phy_mask & 0x03) == 0x03) + || ((phy_mask & 0x01) == 0x01) + || (phy_mask == 0)) + return true; + } else if (iport->physical_port_index == 1) { + if (((phy_mask & 0x02) == 0x02) + || (phy_mask == 0)) + return true; + } else if (iport->physical_port_index == 2) { + if (((phy_mask & 0x0C) == 0x0C) + || ((phy_mask & 0x04) == 0x04) + || (phy_mask == 0)) + return true; + } else if (iport->physical_port_index == 3) { + if (((phy_mask & 0x08) == 0x08) + || (phy_mask == 0)) + return true; + } + + return false; +} + +/* + * This method retrieves a currently active (i.e. connected) phy contained in + * the port. Currently, the lowest order phy that is connected is returned. + * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is + * returned if there are no currently active (i.e. connected to a remote end + * point) phys contained in the port. All other values specify a struct sci_phy + * object that is active in the port. + */ +static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport) +{ + u32 index; + struct isci_phy *iphy; + + for (index = 0; index < SCI_MAX_PHYS; index++) { + /* Ensure that the phy is both part of the port and currently + * connected to the remote end-point. + */ + iphy = iport->phy_table[index]; + if (iphy && sci_port_active_phy(iport, iphy)) + return iphy; + } + + return NULL; +} + +static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy) +{ + /* Check to see if we can add this phy to a port + * that means that the phy is not part of a port and that the port does + * not already have a phy assinged to the phy index. + */ + if (!iport->phy_table[iphy->phy_index] && + !phy_get_non_dummy_port(iphy) && + sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) { + /* Phy is being added in the stopped state so we are in MPC mode + * make logical port index = physical port index + */ + iport->logical_port_index = iport->physical_port_index; + iport->phy_table[iphy->phy_index] = iphy; + sci_phy_set_port(iphy, iport); + + return SCI_SUCCESS; + } + + return SCI_FAILURE; +} + +static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy) +{ + /* Make sure that this phy is part of this port */ + if (iport->phy_table[iphy->phy_index] == iphy && + phy_get_non_dummy_port(iphy) == iport) { + struct isci_host *ihost = iport->owning_controller; + + /* Yep it is assigned to this port so remove it */ + sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]); + iport->phy_table[iphy->phy_index] = NULL; + return SCI_SUCCESS; + } + + return SCI_FAILURE; +} + +void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas) +{ + u32 index; + + sas->high = 0; + sas->low = 0; + for (index = 0; index < SCI_MAX_PHYS; index++) + if (iport->phy_table[index]) + sci_phy_get_sas_address(iport->phy_table[index], sas); +} + +void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas) +{ + struct isci_phy *iphy; + + /* + * Ensure that the phy is both part of the port and currently + * connected to the remote end-point. + */ + iphy = sci_port_get_a_connected_phy(iport); + if (iphy) { + if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) { + sci_phy_get_attached_sas_address(iphy, sas); + } else { + sci_phy_get_sas_address(iphy, sas); + sas->low += iphy->phy_index; + } + } else { + sas->high = 0; + sas->low = 0; + } +} + +/** + * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround + * + * @sci_port: logical port on which we need to create the remote node context + * @rni: remote node index for this remote node context. + * + * This routine will construct a dummy remote node context data structure + * This structure will be posted to the hardware to work around a scheduler + * error in the hardware. + */ +static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) +{ + union scu_remote_node_context *rnc; + + rnc = &iport->owning_controller->remote_node_context_table[rni]; + + memset(rnc, 0, sizeof(union scu_remote_node_context)); + + rnc->ssp.remote_sas_address_hi = 0; + rnc->ssp.remote_sas_address_lo = 0; + + rnc->ssp.remote_node_index = rni; + rnc->ssp.remote_node_port_width = 1; + rnc->ssp.logical_port_index = iport->physical_port_index; + + rnc->ssp.nexus_loss_timer_enable = false; + rnc->ssp.check_bit = false; + rnc->ssp.is_valid = true; + rnc->ssp.is_remote_node_context = true; + rnc->ssp.function_number = 0; + rnc->ssp.arbitration_wait_time = 0; +} + +/* + * construct a dummy task context data structure. This + * structure will be posted to the hardwre to work around a scheduler error + * in the hardware. + */ +static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag) +{ + struct isci_host *ihost = iport->owning_controller; + struct scu_task_context *task_context; + + task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; + memset(task_context, 0, sizeof(struct scu_task_context)); + + task_context->initiator_request = 1; + task_context->connection_rate = 1; + task_context->logical_port_index = iport->physical_port_index; + task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; + task_context->task_index = ISCI_TAG_TCI(tag); + task_context->valid = SCU_TASK_CONTEXT_VALID; + task_context->context_type = SCU_TASK_CONTEXT_TYPE; + task_context->remote_node_index = iport->reserved_rni; + task_context->do_not_dma_ssp_good_response = 1; + task_context->task_phase = 0x01; +} + +static void sci_port_destroy_dummy_resources(struct isci_port *iport) +{ + struct isci_host *ihost = iport->owning_controller; + + if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG) + isci_free_tag(ihost, iport->reserved_tag); + + if (iport->reserved_rni != SCU_DUMMY_INDEX) + sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes, + 1, iport->reserved_rni); + + iport->reserved_rni = SCU_DUMMY_INDEX; + iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; +} + +void sci_port_setup_transports(struct isci_port *iport, u32 device_id) +{ + u8 index; + + for (index = 0; index < SCI_MAX_PHYS; index++) { + if (iport->active_phy_mask & (1 << index)) + sci_phy_setup_transport(iport->phy_table[index], device_id); + } +} + +static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy, + bool do_notify_user) +{ + struct isci_host *ihost = iport->owning_controller; + + if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) + sci_phy_resume(iphy); + + iport->active_phy_mask |= 1 << iphy->phy_index; + + sci_controller_clear_invalid_phy(ihost, iphy); + + if (do_notify_user == true) + isci_port_link_up(ihost, iport, iphy); +} + +void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, + bool do_notify_user) +{ + struct isci_host *ihost = iport->owning_controller; + + iport->active_phy_mask &= ~(1 << iphy->phy_index); + + iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; + + /* Re-assign the phy back to the LP as if it were a narrow port */ + writel(iphy->phy_index, + &iport->port_pe_configuration_register[iphy->phy_index]); + + if (do_notify_user == true) + isci_port_link_down(ihost, iphy, iport); +} + +static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy) +{ + struct isci_host *ihost = iport->owning_controller; + + /* + * Check to see if we have alreay reported this link as bad and if + * not go ahead and tell the SCI_USER that we have discovered an + * invalid link. + */ + if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { + ihost->invalid_phy_mask |= 1 << iphy->phy_index; + dev_warn(&ihost->pdev->dev, "Invalid link up!\n"); + } +} + +static bool is_port_ready_state(enum sci_port_states state) +{ + switch (state) { + case SCI_PORT_READY: + case SCI_PORT_SUB_WAITING: + case SCI_PORT_SUB_OPERATIONAL: + case SCI_PORT_SUB_CONFIGURING: + return true; + default: + return false; + } +} + +/* flag dummy rnc hanling when exiting a ready state */ +static void port_state_machine_change(struct isci_port *iport, + enum sci_port_states state) +{ + struct sci_base_state_machine *sm = &iport->sm; + enum sci_port_states old_state = sm->current_state_id; + + if (is_port_ready_state(old_state) && !is_port_ready_state(state)) + iport->ready_exit = true; + + sci_change_state(sm, state); + iport->ready_exit = false; +} + +/** + * sci_port_general_link_up_handler - phy can be assigned to port? + * @sci_port: sci_port object for which has a phy that has gone link up. + * @sci_phy: This is the struct isci_phy object that has gone link up. + * @do_notify_user: This parameter specifies whether to inform the user (via + * sci_port_link_up()) as to the fact that a new phy as become ready. + * + * Determine if this phy can be assigned to this + * port . If the phy is not a valid PHY for + * this port then the function will notify the user. A PHY can only be + * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in + * the same port. none + */ +static void sci_port_general_link_up_handler(struct isci_port *iport, + struct isci_phy *iphy, + bool do_notify_user) +{ + struct sci_sas_address port_sas_address; + struct sci_sas_address phy_sas_address; + + sci_port_get_attached_sas_address(iport, &port_sas_address); + sci_phy_get_attached_sas_address(iphy, &phy_sas_address); + + /* If the SAS address of the new phy matches the SAS address of + * other phys in the port OR this is the first phy in the port, + * then activate the phy and allow it to be used for operations + * in this port. + */ + if ((phy_sas_address.high == port_sas_address.high && + phy_sas_address.low == port_sas_address.low) || + iport->active_phy_mask == 0) { + struct sci_base_state_machine *sm = &iport->sm; + + sci_port_activate_phy(iport, iphy, do_notify_user); + if (sm->current_state_id == SCI_PORT_RESETTING) + port_state_machine_change(iport, SCI_PORT_READY); + } else + sci_port_invalid_link_up(iport, iphy); +} + + + +/** + * This method returns false if the port only has a single phy object assigned. + * If there are no phys or more than one phy then the method will return + * true. + * @sci_port: The port for which the wide port condition is to be checked. + * + * bool true Is returned if this is a wide ported port. false Is returned if + * this is a narrow port. + */ +static bool sci_port_is_wide(struct isci_port *iport) +{ + u32 index; + u32 phy_count = 0; + + for (index = 0; index < SCI_MAX_PHYS; index++) { + if (iport->phy_table[index] != NULL) { + phy_count++; + } + } + + return phy_count != 1; +} + +/** + * This method is called by the PHY object when the link is detected. if the + * port wants the PHY to continue on to the link up state then the port + * layer must return true. If the port object returns false the phy object + * must halt its attempt to go link up. + * @sci_port: The port associated with the phy object. + * @sci_phy: The phy object that is trying to go link up. + * + * true if the phy object can continue to the link up condition. true Is + * returned if this phy can continue to the ready state. false Is returned if + * can not continue on to the ready state. This notification is in place for + * wide ports and direct attached phys. Since there are no wide ported SATA + * devices this could become an invalid port configuration. + */ +bool sci_port_link_detected( + struct isci_port *iport, + struct isci_phy *iphy) +{ + if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && + (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) && + sci_port_is_wide(iport)) { + sci_port_invalid_link_up(iport, iphy); + + return false; + } + + return true; +} + +static void port_timeout(unsigned long data) +{ + struct sci_timer *tmr = (struct sci_timer *)data; + struct isci_port *iport = container_of(tmr, typeof(*iport), timer); + struct isci_host *ihost = iport->owning_controller; + unsigned long flags; + u32 current_state; + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (tmr->cancel) + goto done; + + current_state = iport->sm.current_state_id; + + if (current_state == SCI_PORT_RESETTING) { + /* if the port is still in the resetting state then the timeout + * fired before the reset completed. + */ + port_state_machine_change(iport, SCI_PORT_FAILED); + } else if (current_state == SCI_PORT_STOPPED) { + /* if the port is stopped then the start request failed In this + * case stay in the stopped state. + */ + dev_err(sciport_to_dev(iport), + "%s: SCIC Port 0x%p failed to stop before tiemout.\n", + __func__, + iport); + } else if (current_state == SCI_PORT_STOPPING) { + /* if the port is still stopping then the stop has not completed */ + isci_port_stop_complete(iport->owning_controller, + iport, + SCI_FAILURE_TIMEOUT); + } else { + /* The port is in the ready state and we have a timer + * reporting a timeout this should not happen. + */ + dev_err(sciport_to_dev(iport), + "%s: SCIC Port 0x%p is processing a timeout operation " + "in state %d.\n", __func__, iport, current_state); + } + +done: + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} + +/* --------------------------------------------------------------------------- */ + +/** + * This function updates the hardwares VIIT entry for this port. + * + * + */ +static void sci_port_update_viit_entry(struct isci_port *iport) +{ + struct sci_sas_address sas_address; + + sci_port_get_sas_address(iport, &sas_address); + + writel(sas_address.high, + &iport->viit_registers->initiator_sas_address_hi); + writel(sas_address.low, + &iport->viit_registers->initiator_sas_address_lo); + + /* This value get cleared just in case its not already cleared */ + writel(0, &iport->viit_registers->reserved); + + /* We are required to update the status register last */ + writel(SCU_VIIT_ENTRY_ID_VIIT | + SCU_VIIT_IPPT_INITIATOR | + ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) | + SCU_VIIT_STATUS_ALL_VALID, + &iport->viit_registers->status); +} + +enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport) +{ + u16 index; + struct isci_phy *iphy; + enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS; + + /* + * Loop through all of the phys in this port and find the phy with the + * lowest maximum link rate. */ + for (index = 0; index < SCI_MAX_PHYS; index++) { + iphy = iport->phy_table[index]; + if (iphy && sci_port_active_phy(iport, iphy) && + iphy->max_negotiated_speed < max_allowed_speed) + max_allowed_speed = iphy->max_negotiated_speed; + } + + return max_allowed_speed; +} + +static void sci_port_suspend_port_task_scheduler(struct isci_port *iport) +{ + u32 pts_control_value; + + pts_control_value = readl(&iport->port_task_scheduler_registers->control); + pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND); + writel(pts_control_value, &iport->port_task_scheduler_registers->control); +} + +/** + * sci_port_post_dummy_request() - post dummy/workaround request + * @sci_port: port to post task + * + * Prevent the hardware scheduler from posting new requests to the front + * of the scheduler queue causing a starvation problem for currently + * ongoing requests. + * + */ +static void sci_port_post_dummy_request(struct isci_port *iport) +{ + struct isci_host *ihost = iport->owning_controller; + u16 tag = iport->reserved_tag; + struct scu_task_context *tc; + u32 command; + + tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; + tc->abort = 0; + + command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | + ISCI_TAG_TCI(tag); + + sci_controller_post_request(ihost, command); +} + +/** + * This routine will abort the dummy request. This will alow the hardware to + * power down parts of the silicon to save power. + * + * @sci_port: The port on which the task must be aborted. + * + */ +static void sci_port_abort_dummy_request(struct isci_port *iport) +{ + struct isci_host *ihost = iport->owning_controller; + u16 tag = iport->reserved_tag; + struct scu_task_context *tc; + u32 command; + + tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; + tc->abort = 1; + + command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT | + iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | + ISCI_TAG_TCI(tag); + + sci_controller_post_request(ihost, command); +} + +/** + * + * @sci_port: This is the struct isci_port object to resume. + * + * This method will resume the port task scheduler for this port object. none + */ +static void +sci_port_resume_port_task_scheduler(struct isci_port *iport) +{ + u32 pts_control_value; + + pts_control_value = readl(&iport->port_task_scheduler_registers->control); + pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND); + writel(pts_control_value, &iport->port_task_scheduler_registers->control); +} + +static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + + sci_port_suspend_port_task_scheduler(iport); + + iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS; + + if (iport->active_phy_mask != 0) { + /* At least one of the phys on the port is ready */ + port_state_machine_change(iport, + SCI_PORT_SUB_OPERATIONAL); + } +} + +static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) +{ + u32 index; + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + struct isci_host *ihost = iport->owning_controller; + + isci_port_ready(ihost, iport); + + for (index = 0; index < SCI_MAX_PHYS; index++) { + if (iport->phy_table[index]) { + writel(iport->physical_port_index, + &iport->port_pe_configuration_register[ + iport->phy_table[index]->phy_index]); + } + } + + sci_port_update_viit_entry(iport); + + sci_port_resume_port_task_scheduler(iport); + + /* + * Post the dummy task for the port so the hardware can schedule + * io correctly + */ + sci_port_post_dummy_request(iport); +} + +static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport) +{ + struct isci_host *ihost = iport->owning_controller; + u8 phys_index = iport->physical_port_index; + union scu_remote_node_context *rnc; + u16 rni = iport->reserved_rni; + u32 command; + + rnc = &ihost->remote_node_context_table[rni]; + + rnc->ssp.is_valid = false; + + /* ensure the preceding tc abort request has reached the + * controller and give it ample time to act before posting the rnc + * invalidate + */ + readl(&ihost->smu_registers->interrupt_status); /* flush */ + udelay(10); + + command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE | + phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; + + sci_controller_post_request(ihost, command); +} + +/** + * + * @object: This is the object which is cast to a struct isci_port object. + * + * This method will perform the actions required by the struct isci_port on + * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports + * the port not ready and suspends the port task scheduler. none + */ +static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + struct isci_host *ihost = iport->owning_controller; + + /* + * Kill the dummy task for this port if it has not yet posted + * the hardware will treat this as a NOP and just return abort + * complete. + */ + sci_port_abort_dummy_request(iport); + + isci_port_not_ready(ihost, iport); + + if (iport->ready_exit) + sci_port_invalidate_dummy_remote_node(iport); +} + +static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + struct isci_host *ihost = iport->owning_controller; + + if (iport->active_phy_mask == 0) { + isci_port_not_ready(ihost, iport); + + port_state_machine_change(iport, + SCI_PORT_SUB_WAITING); + } else if (iport->started_request_count == 0) + port_state_machine_change(iport, + SCI_PORT_SUB_OPERATIONAL); +} + +static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + + sci_port_suspend_port_task_scheduler(iport); + if (iport->ready_exit) + sci_port_invalidate_dummy_remote_node(iport); +} + +enum sci_status sci_port_start(struct isci_port *iport) +{ + struct isci_host *ihost = iport->owning_controller; + enum sci_status status = SCI_SUCCESS; + enum sci_port_states state; + u32 phy_mask; + + state = iport->sm.current_state_id; + if (state != SCI_PORT_STOPPED) { + dev_warn(sciport_to_dev(iport), + "%s: in wrong state: %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } + + if (iport->assigned_device_count > 0) { + /* TODO This is a start failure operation because + * there are still devices assigned to this port. + * There must be no devices assigned to a port on a + * start operation. + */ + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + + if (iport->reserved_rni == SCU_DUMMY_INDEX) { + u16 rni = sci_remote_node_table_allocate_remote_node( + &ihost->available_remote_nodes, 1); + + if (rni != SCU_DUMMY_INDEX) + sci_port_construct_dummy_rnc(iport, rni); + else + status = SCI_FAILURE_INSUFFICIENT_RESOURCES; + iport->reserved_rni = rni; + } + + if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) { + u16 tag; + + tag = isci_alloc_tag(ihost); + if (tag == SCI_CONTROLLER_INVALID_IO_TAG) + status = SCI_FAILURE_INSUFFICIENT_RESOURCES; + else + sci_port_construct_dummy_task(iport, tag); + iport->reserved_tag = tag; + } + + if (status == SCI_SUCCESS) { + phy_mask = sci_port_get_phys(iport); + + /* + * There are one or more phys assigned to this port. Make sure + * the port's phy mask is in fact legal and supported by the + * silicon. + */ + if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) { + port_state_machine_change(iport, + SCI_PORT_READY); + + return SCI_SUCCESS; + } + status = SCI_FAILURE; + } + + if (status != SCI_SUCCESS) + sci_port_destroy_dummy_resources(iport); + + return status; +} + +enum sci_status sci_port_stop(struct isci_port *iport) +{ + enum sci_port_states state; + + state = iport->sm.current_state_id; + switch (state) { + case SCI_PORT_STOPPED: + return SCI_SUCCESS; + case SCI_PORT_SUB_WAITING: + case SCI_PORT_SUB_OPERATIONAL: + case SCI_PORT_SUB_CONFIGURING: + case SCI_PORT_RESETTING: + port_state_machine_change(iport, + SCI_PORT_STOPPING); + return SCI_SUCCESS; + default: + dev_warn(sciport_to_dev(iport), + "%s: in wrong state: %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } +} + +static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout) +{ + enum sci_status status = SCI_FAILURE_INVALID_PHY; + struct isci_phy *iphy = NULL; + enum sci_port_states state; + u32 phy_index; + + state = iport->sm.current_state_id; + if (state != SCI_PORT_SUB_OPERATIONAL) { + dev_warn(sciport_to_dev(iport), + "%s: in wrong state: %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } + + /* Select a phy on which we can send the hard reset request. */ + for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) { + iphy = iport->phy_table[phy_index]; + if (iphy && !sci_port_active_phy(iport, iphy)) { + /* + * We found a phy but it is not ready select + * different phy + */ + iphy = NULL; + } + } + + /* If we have a phy then go ahead and start the reset procedure */ + if (!iphy) + return status; + status = sci_phy_reset(iphy); + + if (status != SCI_SUCCESS) + return status; + + sci_mod_timer(&iport->timer, timeout); + iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED; + + port_state_machine_change(iport, SCI_PORT_RESETTING); + return SCI_SUCCESS; +} + +/** + * sci_port_add_phy() - + * @sci_port: This parameter specifies the port in which the phy will be added. + * @sci_phy: This parameter is the phy which is to be added to the port. + * + * This method will add a PHY to the selected port. This method returns an + * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other + * status is a failure to add the phy to the port. + */ +enum sci_status sci_port_add_phy(struct isci_port *iport, + struct isci_phy *iphy) +{ + enum sci_status status; + enum sci_port_states state; + + state = iport->sm.current_state_id; + switch (state) { + case SCI_PORT_STOPPED: { + struct sci_sas_address port_sas_address; + + /* Read the port assigned SAS Address if there is one */ + sci_port_get_sas_address(iport, &port_sas_address); + + if (port_sas_address.high != 0 && port_sas_address.low != 0) { + struct sci_sas_address phy_sas_address; + + /* Make sure that the PHY SAS Address matches the SAS Address + * for this port + */ + sci_phy_get_sas_address(iphy, &phy_sas_address); + + if (port_sas_address.high != phy_sas_address.high || + port_sas_address.low != phy_sas_address.low) + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + return sci_port_set_phy(iport, iphy); + } + case SCI_PORT_SUB_WAITING: + case SCI_PORT_SUB_OPERATIONAL: + status = sci_port_set_phy(iport, iphy); + + if (status != SCI_SUCCESS) + return status; + + sci_port_general_link_up_handler(iport, iphy, true); + iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; + port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); + + return status; + case SCI_PORT_SUB_CONFIGURING: + status = sci_port_set_phy(iport, iphy); + + if (status != SCI_SUCCESS) + return status; + sci_port_general_link_up_handler(iport, iphy, true); + + /* Re-enter the configuring state since this may be the last phy in + * the port. + */ + port_state_machine_change(iport, + SCI_PORT_SUB_CONFIGURING); + return SCI_SUCCESS; + default: + dev_warn(sciport_to_dev(iport), + "%s: in wrong state: %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } +} + +/** + * sci_port_remove_phy() - + * @sci_port: This parameter specifies the port in which the phy will be added. + * @sci_phy: This parameter is the phy which is to be added to the port. + * + * This method will remove the PHY from the selected PORT. This method returns + * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any + * other status is a failure to add the phy to the port. + */ +enum sci_status sci_port_remove_phy(struct isci_port *iport, + struct isci_phy *iphy) +{ + enum sci_status status; + enum sci_port_states state; + + state = iport->sm.current_state_id; + + switch (state) { + case SCI_PORT_STOPPED: + return sci_port_clear_phy(iport, iphy); + case SCI_PORT_SUB_OPERATIONAL: + status = sci_port_clear_phy(iport, iphy); + if (status != SCI_SUCCESS) + return status; + + sci_port_deactivate_phy(iport, iphy, true); + iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; + port_state_machine_change(iport, + SCI_PORT_SUB_CONFIGURING); + return SCI_SUCCESS; + case SCI_PORT_SUB_CONFIGURING: + status = sci_port_clear_phy(iport, iphy); + + if (status != SCI_SUCCESS) + return status; + sci_port_deactivate_phy(iport, iphy, true); + + /* Re-enter the configuring state since this may be the last phy in + * the port + */ + port_state_machine_change(iport, + SCI_PORT_SUB_CONFIGURING); + return SCI_SUCCESS; + default: + dev_warn(sciport_to_dev(iport), + "%s: in wrong state: %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } +} + +enum sci_status sci_port_link_up(struct isci_port *iport, + struct isci_phy *iphy) +{ + enum sci_port_states state; + + state = iport->sm.current_state_id; + switch (state) { + case SCI_PORT_SUB_WAITING: + /* Since this is the first phy going link up for the port we + * can just enable it and continue + */ + sci_port_activate_phy(iport, iphy, true); + + port_state_machine_change(iport, + SCI_PORT_SUB_OPERATIONAL); + return SCI_SUCCESS; + case SCI_PORT_SUB_OPERATIONAL: + sci_port_general_link_up_handler(iport, iphy, true); + return SCI_SUCCESS; + case SCI_PORT_RESETTING: + /* TODO We should make sure that the phy that has gone + * link up is the same one on which we sent the reset. It is + * possible that the phy on which we sent the reset is not the + * one that has gone link up and we want to make sure that + * phy being reset comes back. Consider the case where a + * reset is sent but before the hardware processes the reset it + * get a link up on the port because of a hot plug event. + * because of the reset request this phy will go link down + * almost immediately. + */ + + /* In the resetting state we don't notify the user regarding + * link up and link down notifications. + */ + sci_port_general_link_up_handler(iport, iphy, false); + return SCI_SUCCESS; + default: + dev_warn(sciport_to_dev(iport), + "%s: in wrong state: %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } +} + +enum sci_status sci_port_link_down(struct isci_port *iport, + struct isci_phy *iphy) +{ + enum sci_port_states state; + + state = iport->sm.current_state_id; + switch (state) { + case SCI_PORT_SUB_OPERATIONAL: + sci_port_deactivate_phy(iport, iphy, true); + + /* If there are no active phys left in the port, then + * transition the port to the WAITING state until such time + * as a phy goes link up + */ + if (iport->active_phy_mask == 0) + port_state_machine_change(iport, + SCI_PORT_SUB_WAITING); + return SCI_SUCCESS; + case SCI_PORT_RESETTING: + /* In the resetting state we don't notify the user regarding + * link up and link down notifications. */ + sci_port_deactivate_phy(iport, iphy, false); + return SCI_SUCCESS; + default: + dev_warn(sciport_to_dev(iport), + "%s: in wrong state: %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } +} + +enum sci_status sci_port_start_io(struct isci_port *iport, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + enum sci_port_states state; + + state = iport->sm.current_state_id; + switch (state) { + case SCI_PORT_SUB_WAITING: + return SCI_FAILURE_INVALID_STATE; + case SCI_PORT_SUB_OPERATIONAL: + iport->started_request_count++; + return SCI_SUCCESS; + default: + dev_warn(sciport_to_dev(iport), + "%s: in wrong state: %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } +} + +enum sci_status sci_port_complete_io(struct isci_port *iport, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + enum sci_port_states state; + + state = iport->sm.current_state_id; + switch (state) { + case SCI_PORT_STOPPED: + dev_warn(sciport_to_dev(iport), + "%s: in wrong state: %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + case SCI_PORT_STOPPING: + sci_port_decrement_request_count(iport); + + if (iport->started_request_count == 0) + port_state_machine_change(iport, + SCI_PORT_STOPPED); + break; + case SCI_PORT_READY: + case SCI_PORT_RESETTING: + case SCI_PORT_FAILED: + case SCI_PORT_SUB_WAITING: + case SCI_PORT_SUB_OPERATIONAL: + sci_port_decrement_request_count(iport); + break; + case SCI_PORT_SUB_CONFIGURING: + sci_port_decrement_request_count(iport); + if (iport->started_request_count == 0) { + port_state_machine_change(iport, + SCI_PORT_SUB_OPERATIONAL); + } + break; + } + return SCI_SUCCESS; +} + +static void sci_port_enable_port_task_scheduler(struct isci_port *iport) +{ + u32 pts_control_value; + + /* enable the port task scheduler in a suspended state */ + pts_control_value = readl(&iport->port_task_scheduler_registers->control); + pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND); + writel(pts_control_value, &iport->port_task_scheduler_registers->control); +} + +static void sci_port_disable_port_task_scheduler(struct isci_port *iport) +{ + u32 pts_control_value; + + pts_control_value = readl(&iport->port_task_scheduler_registers->control); + pts_control_value &= + ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND)); + writel(pts_control_value, &iport->port_task_scheduler_registers->control); +} + +static void sci_port_post_dummy_remote_node(struct isci_port *iport) +{ + struct isci_host *ihost = iport->owning_controller; + u8 phys_index = iport->physical_port_index; + union scu_remote_node_context *rnc; + u16 rni = iport->reserved_rni; + u32 command; + + rnc = &ihost->remote_node_context_table[rni]; + rnc->ssp.is_valid = true; + + command = SCU_CONTEXT_COMMAND_POST_RNC_32 | + phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; + + sci_controller_post_request(ihost, command); + + /* ensure hardware has seen the post rnc command and give it + * ample time to act before sending the suspend + */ + readl(&ihost->smu_registers->interrupt_status); /* flush */ + udelay(10); + + command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX | + phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; + + sci_controller_post_request(ihost, command); +} + +static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + + if (iport->sm.previous_state_id == SCI_PORT_STOPPING) { + /* + * If we enter this state becasuse of a request to stop + * the port then we want to disable the hardwares port + * task scheduler. */ + sci_port_disable_port_task_scheduler(iport); + } +} + +static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + + /* Enable and suspend the port task scheduler */ + sci_port_enable_port_task_scheduler(iport); +} + +static void sci_port_ready_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + struct isci_host *ihost = iport->owning_controller; + u32 prev_state; + + prev_state = iport->sm.previous_state_id; + if (prev_state == SCI_PORT_RESETTING) + isci_port_hard_reset_complete(iport, SCI_SUCCESS); + else + isci_port_not_ready(ihost, iport); + + /* Post and suspend the dummy remote node context for this port. */ + sci_port_post_dummy_remote_node(iport); + + /* Start the ready substate machine */ + port_state_machine_change(iport, + SCI_PORT_SUB_WAITING); +} + +static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + + sci_del_timer(&iport->timer); +} + +static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + + sci_del_timer(&iport->timer); + + sci_port_destroy_dummy_resources(iport); +} + +static void sci_port_failed_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + + isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT); +} + +/* --------------------------------------------------------------------------- */ + +static const struct sci_base_state sci_port_state_table[] = { + [SCI_PORT_STOPPED] = { + .enter_state = sci_port_stopped_state_enter, + .exit_state = sci_port_stopped_state_exit + }, + [SCI_PORT_STOPPING] = { + .exit_state = sci_port_stopping_state_exit + }, + [SCI_PORT_READY] = { + .enter_state = sci_port_ready_state_enter, + }, + [SCI_PORT_SUB_WAITING] = { + .enter_state = sci_port_ready_substate_waiting_enter, + }, + [SCI_PORT_SUB_OPERATIONAL] = { + .enter_state = sci_port_ready_substate_operational_enter, + .exit_state = sci_port_ready_substate_operational_exit + }, + [SCI_PORT_SUB_CONFIGURING] = { + .enter_state = sci_port_ready_substate_configuring_enter, + .exit_state = sci_port_ready_substate_configuring_exit + }, + [SCI_PORT_RESETTING] = { + .exit_state = sci_port_resetting_state_exit + }, + [SCI_PORT_FAILED] = { + .enter_state = sci_port_failed_state_enter, + } +}; + +void sci_port_construct(struct isci_port *iport, u8 index, + struct isci_host *ihost) +{ + sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED); + + iport->logical_port_index = SCIC_SDS_DUMMY_PORT; + iport->physical_port_index = index; + iport->active_phy_mask = 0; + iport->ready_exit = false; + + iport->owning_controller = ihost; + + iport->started_request_count = 0; + iport->assigned_device_count = 0; + + iport->reserved_rni = SCU_DUMMY_INDEX; + iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; + + sci_init_timer(&iport->timer, port_timeout); + + iport->port_task_scheduler_registers = NULL; + + for (index = 0; index < SCI_MAX_PHYS; index++) + iport->phy_table[index] = NULL; +} + +void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index) +{ + INIT_LIST_HEAD(&iport->remote_dev_list); + INIT_LIST_HEAD(&iport->domain_dev_list); + spin_lock_init(&iport->state_lock); + init_completion(&iport->start_complete); + iport->isci_host = ihost; + isci_port_change_state(iport, isci_freed); + atomic_set(&iport->event, 0); +} + +/** + * isci_port_get_state() - This function gets the status of the port object. + * @isci_port: This parameter points to the isci_port object + * + * status of the object as a isci_status enum. + */ +enum isci_status isci_port_get_state( + struct isci_port *isci_port) +{ + return isci_port->status; +} + +void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) +{ + struct isci_host *ihost = iport->owning_controller; + + /* notify the user. */ + isci_port_bc_change_received(ihost, iport, iphy); +} + +int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy) +{ + unsigned long flags; + enum sci_status status; + int idx, ret = TMF_RESP_FUNC_COMPLETE; + + dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n", + __func__, iport); + + init_completion(&iport->hard_reset_complete); + + spin_lock_irqsave(&ihost->scic_lock, flags); + + #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT + status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT); + + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + if (status == SCI_SUCCESS) { + wait_for_completion(&iport->hard_reset_complete); + + dev_dbg(&ihost->pdev->dev, + "%s: iport = %p; hard reset completion\n", + __func__, iport); + + if (iport->hard_reset_status != SCI_SUCCESS) + ret = TMF_RESP_FUNC_FAILED; + } else { + ret = TMF_RESP_FUNC_FAILED; + + dev_err(&ihost->pdev->dev, + "%s: iport = %p; sci_port_hard_reset call" + " failed 0x%x\n", + __func__, iport, status); + + } + + /* If the hard reset for the port has failed, consider this + * the same as link failures on all phys in the port. + */ + if (ret != TMF_RESP_FUNC_COMPLETE) { + + dev_err(&ihost->pdev->dev, + "%s: iport = %p; hard reset failed " + "(0x%x) - driving explicit link fail for all phys\n", + __func__, iport, iport->hard_reset_status); + + /* Down all phys in the port. */ + spin_lock_irqsave(&ihost->scic_lock, flags); + for (idx = 0; idx < SCI_MAX_PHYS; ++idx) { + struct isci_phy *iphy = iport->phy_table[idx]; + + if (!iphy) + continue; + sci_phy_stop(iphy); + sci_phy_start(iphy); + } + spin_unlock_irqrestore(&ihost->scic_lock, flags); + } + return ret; +} + +/** + * isci_port_deformed() - This function is called by libsas when a port becomes + * inactive. + * @phy: This parameter specifies the libsas phy with the inactive port. + * + */ +void isci_port_deformed(struct asd_sas_phy *phy) +{ + pr_debug("%s: sas_phy = %p\n", __func__, phy); +} + +/** + * isci_port_formed() - This function is called by libsas when a port becomes + * active. + * @phy: This parameter specifies the libsas phy with the active port. + * + */ +void isci_port_formed(struct asd_sas_phy *phy) +{ + pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port); +} diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h new file mode 100644 index 00000000000..b50ecd4e8f9 --- /dev/null +++ b/drivers/scsi/isci/port.h @@ -0,0 +1,306 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ISCI_PORT_H_ +#define _ISCI_PORT_H_ + +#include <scsi/libsas.h> +#include "isci.h" +#include "sas.h" +#include "phy.h" + +#define SCIC_SDS_DUMMY_PORT 0xFF + +struct isci_phy; +struct isci_host; + +enum isci_status { + isci_freed = 0x00, + isci_starting = 0x01, + isci_ready = 0x02, + isci_ready_for_io = 0x03, + isci_stopping = 0x04, + isci_stopped = 0x05, +}; + +/** + * struct isci_port - isci direct attached sas port object + * @event: counts bcns and port stop events (for bcn filtering) + * @ready_exit: several states constitute 'ready'. When exiting ready we + * need to take extra port-teardown actions that are + * skipped when exiting to another 'ready' state. + * @logical_port_index: software port index + * @physical_port_index: hardware port index + * @active_phy_mask: identifies phy members + * @reserved_tag: + * @reserved_rni: reserver for port task scheduler workaround + * @started_request_count: reference count for outstanding commands + * @not_ready_reason: set during state transitions and notified + * @timer: timeout start/stop operations + */ +struct isci_port { + enum isci_status status; + #define IPORT_BCN_BLOCKED 0 + #define IPORT_BCN_PENDING 1 + unsigned long flags; + atomic_t event; + struct isci_host *isci_host; + struct asd_sas_port sas_port; + struct list_head remote_dev_list; + spinlock_t state_lock; + struct list_head domain_dev_list; + struct completion start_complete; + struct completion hard_reset_complete; + enum sci_status hard_reset_status; + struct sci_base_state_machine sm; + bool ready_exit; + u8 logical_port_index; + u8 physical_port_index; + u8 active_phy_mask; + u16 reserved_rni; + u16 reserved_tag; + u32 started_request_count; + u32 assigned_device_count; + u32 not_ready_reason; + struct isci_phy *phy_table[SCI_MAX_PHYS]; + struct isci_host *owning_controller; + struct sci_timer timer; + struct scu_port_task_scheduler_registers __iomem *port_task_scheduler_registers; + /* XXX rework: only one register, no need to replicate per-port */ + u32 __iomem *port_pe_configuration_register; + struct scu_viit_entry __iomem *viit_registers; +}; + +enum sci_port_not_ready_reason_code { + SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS, + SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED, + SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION, + SCIC_PORT_NOT_READY_RECONFIGURING, + + SCIC_PORT_NOT_READY_REASON_CODE_MAX +}; + +struct sci_port_end_point_properties { + struct sci_sas_address sas_address; + struct sci_phy_proto protocols; +}; + +struct sci_port_properties { + u32 index; + struct sci_port_end_point_properties local; + struct sci_port_end_point_properties remote; + u32 phy_mask; +}; + +/** + * enum sci_port_states - This enumeration depicts all the states for the + * common port state machine. + * + * + */ +enum sci_port_states { + /** + * This state indicates that the port has successfully been stopped. + * In this state no new IO operations are permitted. + * This state is entered from the STOPPING state. + */ + SCI_PORT_STOPPED, + + /** + * This state indicates that the port is in the process of stopping. + * In this state no new IO operations are permitted, but existing IO + * operations are allowed to complete. + * This state is entered from the READY state. + */ + SCI_PORT_STOPPING, + + /** + * This state indicates the port is now ready. Thus, the user is + * able to perform IO operations on this port. + * This state is entered from the STARTING state. + */ + SCI_PORT_READY, + + /** + * The substate where the port is started and ready but has no + * active phys. + */ + SCI_PORT_SUB_WAITING, + + /** + * The substate where the port is started and ready and there is + * at least one phy operational. + */ + SCI_PORT_SUB_OPERATIONAL, + + /** + * The substate where the port is started and there was an + * add/remove phy event. This state is only used in Automatic + * Port Configuration Mode (APC) + */ + SCI_PORT_SUB_CONFIGURING, + + /** + * This state indicates the port is in the process of performing a hard + * reset. Thus, the user is unable to perform IO operations on this + * port. + * This state is entered from the READY state. + */ + SCI_PORT_RESETTING, + + /** + * This state indicates the port has failed a reset request. This state + * is entered when a port reset request times out. + * This state is entered from the RESETTING state. + */ + SCI_PORT_FAILED, + + +}; + +static inline void sci_port_decrement_request_count(struct isci_port *iport) +{ + if (WARN_ONCE(iport->started_request_count == 0, + "%s: tried to decrement started_request_count past 0!?", + __func__)) + /* pass */; + else + iport->started_request_count--; +} + +#define sci_port_active_phy(port, phy) \ + (((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0) + +void sci_port_construct( + struct isci_port *iport, + u8 port_index, + struct isci_host *ihost); + +enum sci_status sci_port_start(struct isci_port *iport); +enum sci_status sci_port_stop(struct isci_port *iport); + +enum sci_status sci_port_add_phy( + struct isci_port *iport, + struct isci_phy *iphy); + +enum sci_status sci_port_remove_phy( + struct isci_port *iport, + struct isci_phy *iphy); + +void sci_port_setup_transports( + struct isci_port *iport, + u32 device_id); + +void isci_port_bcn_enable(struct isci_host *, struct isci_port *); + +void sci_port_deactivate_phy( + struct isci_port *iport, + struct isci_phy *iphy, + bool do_notify_user); + +bool sci_port_link_detected( + struct isci_port *iport, + struct isci_phy *iphy); + +enum sci_status sci_port_link_up(struct isci_port *iport, + struct isci_phy *iphy); +enum sci_status sci_port_link_down(struct isci_port *iport, + struct isci_phy *iphy); + +struct isci_request; +struct isci_remote_device; +enum sci_status sci_port_start_io( + struct isci_port *iport, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sci_status sci_port_complete_io( + struct isci_port *iport, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sas_linkrate sci_port_get_max_allowed_speed( + struct isci_port *iport); + +void sci_port_broadcast_change_received( + struct isci_port *iport, + struct isci_phy *iphy); + +bool sci_port_is_valid_phy_assignment( + struct isci_port *iport, + u32 phy_index); + +void sci_port_get_sas_address( + struct isci_port *iport, + struct sci_sas_address *sas_address); + +void sci_port_get_attached_sas_address( + struct isci_port *iport, + struct sci_sas_address *sas_address); + +enum isci_status isci_port_get_state( + struct isci_port *isci_port); + +void isci_port_formed(struct asd_sas_phy *); +void isci_port_deformed(struct asd_sas_phy *); + +void isci_port_init( + struct isci_port *port, + struct isci_host *host, + int index); + +int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy); +#endif /* !defined(_ISCI_PORT_H_) */ diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c new file mode 100644 index 00000000000..486b113c634 --- /dev/null +++ b/drivers/scsi/isci/port_config.c @@ -0,0 +1,754 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "host.h" + +#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) +#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) +#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100) + +enum SCIC_SDS_APC_ACTIVITY { + SCIC_SDS_APC_SKIP_PHY, + SCIC_SDS_APC_ADD_PHY, + SCIC_SDS_APC_START_TIMER, + + SCIC_SDS_APC_ACTIVITY_MAX +}; + +/* + * ****************************************************************************** + * General port configuration agent routines + * ****************************************************************************** */ + +/** + * + * @address_one: A SAS Address to be compared. + * @address_two: A SAS Address to be compared. + * + * Compare the two SAS Address and if SAS Address One is greater than SAS + * Address Two then return > 0 else if SAS Address One is less than SAS Address + * Two return < 0 Otherwise they are the same return 0 A signed value of x > 0 + * > y where x is returned for Address One > Address Two y is returned for + * Address One < Address Two 0 is returned ofr Address One = Address Two + */ +static s32 sci_sas_address_compare( + struct sci_sas_address address_one, + struct sci_sas_address address_two) +{ + if (address_one.high > address_two.high) { + return 1; + } else if (address_one.high < address_two.high) { + return -1; + } else if (address_one.low > address_two.low) { + return 1; + } else if (address_one.low < address_two.low) { + return -1; + } + + /* The two SAS Address must be identical */ + return 0; +} + +/** + * + * @controller: The controller object used for the port search. + * @phy: The phy object to match. + * + * This routine will find a matching port for the phy. This means that the + * port and phy both have the same broadcast sas address and same received sas + * address. The port address or the NULL if there is no matching + * port. port address if the port can be found to match the phy. + * NULL if there is no matching port for the phy. + */ +static struct isci_port *sci_port_configuration_agent_find_port( + struct isci_host *ihost, + struct isci_phy *iphy) +{ + u8 i; + struct sci_sas_address port_sas_address; + struct sci_sas_address port_attached_device_address; + struct sci_sas_address phy_sas_address; + struct sci_sas_address phy_attached_device_address; + + /* + * Since this phy can be a member of a wide port check to see if one or + * more phys match the sent and received SAS address as this phy in which + * case it should participate in the same port. + */ + sci_phy_get_sas_address(iphy, &phy_sas_address); + sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address); + + for (i = 0; i < ihost->logical_port_entries; i++) { + struct isci_port *iport = &ihost->ports[i]; + + sci_port_get_sas_address(iport, &port_sas_address); + sci_port_get_attached_sas_address(iport, &port_attached_device_address); + + if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 && + sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0) + return iport; + } + + return NULL; +} + +/** + * + * @controller: This is the controller object that contains the port agent + * @port_agent: This is the port configruation agent for the controller. + * + * This routine will validate the port configuration is correct for the SCU + * hardware. The SCU hardware allows for port configurations as follows. LP0 + * -> (PE0), (PE0, PE1), (PE0, PE1, PE2, PE3) LP1 -> (PE1) LP2 -> (PE2), (PE2, + * PE3) LP3 -> (PE3) enum sci_status SCI_SUCCESS the port configuration is valid for + * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION + * the port configuration is not valid for this port configuration agent. + */ +static enum sci_status sci_port_configuration_agent_validate_ports( + struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent) +{ + struct sci_sas_address first_address; + struct sci_sas_address second_address; + + /* + * Sanity check the max ranges for all the phys the max index + * is always equal to the port range index */ + if (port_agent->phy_valid_port_range[0].max_index != 0 || + port_agent->phy_valid_port_range[1].max_index != 1 || + port_agent->phy_valid_port_range[2].max_index != 2 || + port_agent->phy_valid_port_range[3].max_index != 3) + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + + /* + * This is a request to configure a single x4 port or at least attempt + * to make all the phys into a single port */ + if (port_agent->phy_valid_port_range[0].min_index == 0 && + port_agent->phy_valid_port_range[1].min_index == 0 && + port_agent->phy_valid_port_range[2].min_index == 0 && + port_agent->phy_valid_port_range[3].min_index == 0) + return SCI_SUCCESS; + + /* + * This is a degenerate case where phy 1 and phy 2 are assigned + * to the same port this is explicitly disallowed by the hardware + * unless they are part of the same x4 port and this condition was + * already checked above. */ + if (port_agent->phy_valid_port_range[2].min_index == 1) { + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + + /* + * PE0 and PE3 can never have the same SAS Address unless they + * are part of the same x4 wide port and we have already checked + * for this condition. */ + sci_phy_get_sas_address(&ihost->phys[0], &first_address); + sci_phy_get_sas_address(&ihost->phys[3], &second_address); + + if (sci_sas_address_compare(first_address, second_address) == 0) { + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + + /* + * PE0 and PE1 are configured into a 2x1 ports make sure that the + * SAS Address for PE0 and PE2 are different since they can not be + * part of the same port. */ + if (port_agent->phy_valid_port_range[0].min_index == 0 && + port_agent->phy_valid_port_range[1].min_index == 1) { + sci_phy_get_sas_address(&ihost->phys[0], &first_address); + sci_phy_get_sas_address(&ihost->phys[2], &second_address); + + if (sci_sas_address_compare(first_address, second_address) == 0) { + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + } + + /* + * PE2 and PE3 are configured into a 2x1 ports make sure that the + * SAS Address for PE1 and PE3 are different since they can not be + * part of the same port. */ + if (port_agent->phy_valid_port_range[2].min_index == 2 && + port_agent->phy_valid_port_range[3].min_index == 3) { + sci_phy_get_sas_address(&ihost->phys[1], &first_address); + sci_phy_get_sas_address(&ihost->phys[3], &second_address); + + if (sci_sas_address_compare(first_address, second_address) == 0) { + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + } + + return SCI_SUCCESS; +} + +/* + * ****************************************************************************** + * Manual port configuration agent routines + * ****************************************************************************** */ + +/* verify all of the phys in the same port are using the same SAS address */ +static enum sci_status +sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent) +{ + u32 phy_mask; + u32 assigned_phy_mask; + struct sci_sas_address sas_address; + struct sci_sas_address phy_assigned_address; + u8 port_index; + u8 phy_index; + + assigned_phy_mask = 0; + sas_address.high = 0; + sas_address.low = 0; + + for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) { + phy_mask = ihost->oem_parameters.ports[port_index].phy_mask; + + if (!phy_mask) + continue; + /* + * Make sure that one or more of the phys were not already assinged to + * a different port. */ + if ((phy_mask & ~assigned_phy_mask) == 0) { + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + + /* Find the starting phy index for this round through the loop */ + for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) { + if ((phy_mask & (1 << phy_index)) == 0) + continue; + sci_phy_get_sas_address(&ihost->phys[phy_index], + &sas_address); + + /* + * The phy_index can be used as the starting point for the + * port range since the hardware starts all logical ports + * the same as the PE index. */ + port_agent->phy_valid_port_range[phy_index].min_index = port_index; + port_agent->phy_valid_port_range[phy_index].max_index = phy_index; + + if (phy_index != port_index) { + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + + break; + } + + /* + * See how many additional phys are being added to this logical port. + * Note: We have not moved the current phy_index so we will actually + * compare the startting phy with itself. + * This is expected and required to add the phy to the port. */ + while (phy_index < SCI_MAX_PHYS) { + if ((phy_mask & (1 << phy_index)) == 0) + continue; + sci_phy_get_sas_address(&ihost->phys[phy_index], + &phy_assigned_address); + + if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) { + /* + * The phy mask specified that this phy is part of the same port + * as the starting phy and it is not so fail this configuration */ + return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; + } + + port_agent->phy_valid_port_range[phy_index].min_index = port_index; + port_agent->phy_valid_port_range[phy_index].max_index = phy_index; + + sci_port_add_phy(&ihost->ports[port_index], + &ihost->phys[phy_index]); + + assigned_phy_mask |= (1 << phy_index); + } + + phy_index++; + } + + return sci_port_configuration_agent_validate_ports(ihost, port_agent); +} + +static void mpc_agent_timeout(unsigned long data) +{ + u8 index; + struct sci_timer *tmr = (struct sci_timer *)data; + struct sci_port_configuration_agent *port_agent; + struct isci_host *ihost; + unsigned long flags; + u16 configure_phy_mask; + + port_agent = container_of(tmr, typeof(*port_agent), timer); + ihost = container_of(port_agent, typeof(*ihost), port_agent); + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (tmr->cancel) + goto done; + + port_agent->timer_pending = false; + + /* Find the mask of phys that are reported read but as yet unconfigured into a port */ + configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask; + + for (index = 0; index < SCI_MAX_PHYS; index++) { + struct isci_phy *iphy = &ihost->phys[index]; + + if (configure_phy_mask & (1 << index)) { + port_agent->link_up_handler(ihost, port_agent, + phy_get_non_dummy_port(iphy), + iphy); + } + } + +done: + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} + +static void sci_mpc_agent_link_up(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, + struct isci_port *iport, + struct isci_phy *iphy) +{ + /* If the port is NULL then the phy was not assigned to a port. + * This is because the phy was not given the same SAS Address as + * the other PHYs in the port. + */ + if (!iport) + return; + + port_agent->phy_ready_mask |= (1 << iphy->phy_index); + sci_port_link_up(iport, iphy); + if ((iport->active_phy_mask & (1 << iphy->phy_index))) + port_agent->phy_configured_mask |= (1 << iphy->phy_index); +} + +/** + * + * @controller: This is the controller object that receives the link down + * notification. + * @port: This is the port object associated with the phy. If the is no + * associated port this is an NULL. The port is an invalid + * handle only if the phy was never port of this port. This happens when + * the phy is not broadcasting the same SAS address as the other phys in the + * assigned port. + * @phy: This is the phy object which has gone link down. + * + * This function handles the manual port configuration link down notifications. + * Since all ports and phys are associated at initialization time we just turn + * around and notifiy the port object of the link down event. If this PHY is + * not associated with a port there is no action taken. Is it possible to get a + * link down notification from a phy that has no assocoated port? + */ +static void sci_mpc_agent_link_down( + struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, + struct isci_port *iport, + struct isci_phy *iphy) +{ + if (iport != NULL) { + /* + * If we can form a new port from the remainder of the phys + * then we want to start the timer to allow the SCI User to + * cleanup old devices and rediscover the port before + * rebuilding the port with the phys that remain in the ready + * state. + */ + port_agent->phy_ready_mask &= ~(1 << iphy->phy_index); + port_agent->phy_configured_mask &= ~(1 << iphy->phy_index); + + /* + * Check to see if there are more phys waiting to be + * configured into a port. If there are allow the SCI User + * to tear down this port, if necessary, and then reconstruct + * the port after the timeout. + */ + if ((port_agent->phy_configured_mask == 0x0000) && + (port_agent->phy_ready_mask != 0x0000) && + !port_agent->timer_pending) { + port_agent->timer_pending = true; + + sci_mod_timer(&port_agent->timer, + SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT); + } + + sci_port_link_down(iport, iphy); + } +} + +/* verify phys are assigned a valid SAS address for automatic port + * configuration mode. + */ +static enum sci_status +sci_apc_agent_validate_phy_configuration(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent) +{ + u8 phy_index; + u8 port_index; + struct sci_sas_address sas_address; + struct sci_sas_address phy_assigned_address; + + phy_index = 0; + + while (phy_index < SCI_MAX_PHYS) { + port_index = phy_index; + + /* Get the assigned SAS Address for the first PHY on the controller. */ + sci_phy_get_sas_address(&ihost->phys[phy_index], + &sas_address); + + while (++phy_index < SCI_MAX_PHYS) { + sci_phy_get_sas_address(&ihost->phys[phy_index], + &phy_assigned_address); + + /* Verify each of the SAS address are all the same for every PHY */ + if (sci_sas_address_compare(sas_address, phy_assigned_address) == 0) { + port_agent->phy_valid_port_range[phy_index].min_index = port_index; + port_agent->phy_valid_port_range[phy_index].max_index = phy_index; + } else { + port_agent->phy_valid_port_range[phy_index].min_index = phy_index; + port_agent->phy_valid_port_range[phy_index].max_index = phy_index; + break; + } + } + } + + return sci_port_configuration_agent_validate_ports(ihost, port_agent); +} + +static void sci_apc_agent_configure_ports(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, + struct isci_phy *iphy, + bool start_timer) +{ + u8 port_index; + enum sci_status status; + struct isci_port *iport; + enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY; + + iport = sci_port_configuration_agent_find_port(ihost, iphy); + + if (iport) { + if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) + apc_activity = SCIC_SDS_APC_ADD_PHY; + else + apc_activity = SCIC_SDS_APC_SKIP_PHY; + } else { + /* + * There is no matching Port for this PHY so lets search through the + * Ports and see if we can add the PHY to its own port or maybe start + * the timer and wait to see if a wider port can be made. + * + * Note the break when we reach the condition of the port id == phy id */ + for (port_index = port_agent->phy_valid_port_range[iphy->phy_index].min_index; + port_index <= port_agent->phy_valid_port_range[iphy->phy_index].max_index; + port_index++) { + + iport = &ihost->ports[port_index]; + + /* First we must make sure that this PHY can be added to this Port. */ + if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) { + /* + * Port contains a PHY with a greater PHY ID than the current + * PHY that has gone link up. This phy can not be part of any + * port so skip it and move on. */ + if (iport->active_phy_mask > (1 << iphy->phy_index)) { + apc_activity = SCIC_SDS_APC_SKIP_PHY; + break; + } + + /* + * We have reached the end of our Port list and have not found + * any reason why we should not either add the PHY to the port + * or wait for more phys to become active. */ + if (iport->physical_port_index == iphy->phy_index) { + /* + * The Port either has no active PHYs. + * Consider that if the port had any active PHYs we would have + * or active PHYs with + * a lower PHY Id than this PHY. */ + if (apc_activity != SCIC_SDS_APC_START_TIMER) { + apc_activity = SCIC_SDS_APC_ADD_PHY; + } + + break; + } + + /* + * The current Port has no active PHYs and this PHY could be part + * of this Port. Since we dont know as yet setup to start the + * timer and see if there is a better configuration. */ + if (iport->active_phy_mask == 0) { + apc_activity = SCIC_SDS_APC_START_TIMER; + } + } else if (iport->active_phy_mask != 0) { + /* + * The Port has an active phy and the current Phy can not + * participate in this port so skip the PHY and see if + * there is a better configuration. */ + apc_activity = SCIC_SDS_APC_SKIP_PHY; + } + } + } + + /* + * Check to see if the start timer operations should instead map to an + * add phy operation. This is caused because we have been waiting to + * add a phy to a port but could not becuase the automatic port + * configuration engine had a choice of possible ports for the phy. + * Since we have gone through a timeout we are going to restrict the + * choice to the smallest possible port. */ + if ( + (start_timer == false) + && (apc_activity == SCIC_SDS_APC_START_TIMER) + ) { + apc_activity = SCIC_SDS_APC_ADD_PHY; + } + + switch (apc_activity) { + case SCIC_SDS_APC_ADD_PHY: + status = sci_port_add_phy(iport, iphy); + + if (status == SCI_SUCCESS) { + port_agent->phy_configured_mask |= (1 << iphy->phy_index); + } + break; + + case SCIC_SDS_APC_START_TIMER: + /* + * This can occur for either a link down event, or a link + * up event where we cannot yet tell the port to which a + * phy belongs. + */ + if (port_agent->timer_pending) + sci_del_timer(&port_agent->timer); + + port_agent->timer_pending = true; + sci_mod_timer(&port_agent->timer, + SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); + break; + + case SCIC_SDS_APC_SKIP_PHY: + default: + /* do nothing the PHY can not be made part of a port at this time. */ + break; + } +} + +/** + * sci_apc_agent_link_up - handle apc link up events + * @scic: This is the controller object that receives the link up + * notification. + * @sci_port: This is the port object associated with the phy. If the is no + * associated port this is an NULL. + * @sci_phy: This is the phy object which has gone link up. + * + * This method handles the automatic port configuration for link up + * notifications. Is it possible to get a link down notification from a phy + * that has no assocoated port? + */ +static void sci_apc_agent_link_up(struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, + struct isci_port *iport, + struct isci_phy *iphy) +{ + u8 phy_index = iphy->phy_index; + + if (!iport) { + /* the phy is not the part of this port */ + port_agent->phy_ready_mask |= 1 << phy_index; + sci_apc_agent_configure_ports(ihost, port_agent, iphy, true); + } else { + /* the phy is already the part of the port */ + u32 port_state = iport->sm.current_state_id; + + /* if the PORT'S state is resetting then the link up is from + * port hard reset in this case, we need to tell the port + * that link up is recieved + */ + BUG_ON(port_state != SCI_PORT_RESETTING); + port_agent->phy_ready_mask |= 1 << phy_index; + sci_port_link_up(iport, iphy); + } +} + +/** + * + * @controller: This is the controller object that receives the link down + * notification. + * @iport: This is the port object associated with the phy. If the is no + * associated port this is an NULL. + * @iphy: This is the phy object which has gone link down. + * + * This method handles the automatic port configuration link down + * notifications. not associated with a port there is no action taken. Is it + * possible to get a link down notification from a phy that has no assocoated + * port? + */ +static void sci_apc_agent_link_down( + struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent, + struct isci_port *iport, + struct isci_phy *iphy) +{ + port_agent->phy_ready_mask &= ~(1 << iphy->phy_index); + + if (!iport) + return; + if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) { + enum sci_status status; + + status = sci_port_remove_phy(iport, iphy); + + if (status == SCI_SUCCESS) + port_agent->phy_configured_mask &= ~(1 << iphy->phy_index); + } +} + +/* configure the phys into ports when the timer fires */ +static void apc_agent_timeout(unsigned long data) +{ + u32 index; + struct sci_timer *tmr = (struct sci_timer *)data; + struct sci_port_configuration_agent *port_agent; + struct isci_host *ihost; + unsigned long flags; + u16 configure_phy_mask; + + port_agent = container_of(tmr, typeof(*port_agent), timer); + ihost = container_of(port_agent, typeof(*ihost), port_agent); + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (tmr->cancel) + goto done; + + port_agent->timer_pending = false; + + configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask; + + if (!configure_phy_mask) + return; + + for (index = 0; index < SCI_MAX_PHYS; index++) { + if ((configure_phy_mask & (1 << index)) == 0) + continue; + + sci_apc_agent_configure_ports(ihost, port_agent, + &ihost->phys[index], false); + } + +done: + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} + +/* + * ****************************************************************************** + * Public port configuration agent routines + * ****************************************************************************** */ + +/** + * + * + * This method will construct the port configuration agent for operation. This + * call is universal for both manual port configuration and automatic port + * configuration modes. + */ +void sci_port_configuration_agent_construct( + struct sci_port_configuration_agent *port_agent) +{ + u32 index; + + port_agent->phy_configured_mask = 0x00; + port_agent->phy_ready_mask = 0x00; + + port_agent->link_up_handler = NULL; + port_agent->link_down_handler = NULL; + + port_agent->timer_pending = false; + + for (index = 0; index < SCI_MAX_PORTS; index++) { + port_agent->phy_valid_port_range[index].min_index = 0; + port_agent->phy_valid_port_range[index].max_index = 0; + } +} + +enum sci_status sci_port_configuration_agent_initialize( + struct isci_host *ihost, + struct sci_port_configuration_agent *port_agent) +{ + enum sci_status status; + enum sci_port_configuration_mode mode; + + mode = ihost->oem_parameters.controller.mode_type; + + if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { + status = sci_mpc_agent_validate_phy_configuration( + ihost, port_agent); + + port_agent->link_up_handler = sci_mpc_agent_link_up; + port_agent->link_down_handler = sci_mpc_agent_link_down; + + sci_init_timer(&port_agent->timer, mpc_agent_timeout); + } else { + status = sci_apc_agent_validate_phy_configuration( + ihost, port_agent); + + port_agent->link_up_handler = sci_apc_agent_link_up; + port_agent->link_down_handler = sci_apc_agent_link_down; + + sci_init_timer(&port_agent->timer, apc_agent_timeout); + } + + return status; +} diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c new file mode 100644 index 00000000000..b5f4341de24 --- /dev/null +++ b/drivers/scsi/isci/probe_roms.c @@ -0,0 +1,243 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + */ + +/* probe_roms - scan for oem parameters */ + +#include <linux/kernel.h> +#include <linux/firmware.h> +#include <linux/uaccess.h> +#include <linux/efi.h> +#include <asm/probe_roms.h> + +#include "isci.h" +#include "task.h" +#include "probe_roms.h" + +static efi_char16_t isci_efivar_name[] = { + 'R', 's', 't', 'S', 'c', 'u', 'O' +}; + +struct isci_orom *isci_request_oprom(struct pci_dev *pdev) +{ + void __iomem *oprom = pci_map_biosrom(pdev); + struct isci_orom *rom = NULL; + size_t len, i; + int j; + char oem_sig[4]; + struct isci_oem_hdr oem_hdr; + u8 *tmp, sum; + + if (!oprom) + return NULL; + + len = pci_biosrom_size(pdev); + rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL); + if (!rom) { + dev_warn(&pdev->dev, + "Unable to allocate memory for orom\n"); + return NULL; + } + + for (i = 0; i < len && rom; i += ISCI_OEM_SIG_SIZE) { + memcpy_fromio(oem_sig, oprom + i, ISCI_OEM_SIG_SIZE); + + /* we think we found the OEM table */ + if (memcmp(oem_sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) == 0) { + size_t copy_len; + + memcpy_fromio(&oem_hdr, oprom + i, sizeof(oem_hdr)); + + copy_len = min(oem_hdr.len - sizeof(oem_hdr), + sizeof(*rom)); + + memcpy_fromio(rom, + oprom + i + sizeof(oem_hdr), + copy_len); + + /* calculate checksum */ + tmp = (u8 *)&oem_hdr; + for (j = 0, sum = 0; j < sizeof(oem_hdr); j++, tmp++) + sum += *tmp; + + tmp = (u8 *)rom; + for (j = 0; j < sizeof(*rom); j++, tmp++) + sum += *tmp; + + if (sum != 0) { + dev_warn(&pdev->dev, + "OEM table checksum failed\n"); + continue; + } + + /* keep going if that's not the oem param table */ + if (memcmp(rom->hdr.signature, + ISCI_ROM_SIG, + ISCI_ROM_SIG_SIZE) != 0) + continue; + + dev_info(&pdev->dev, + "OEM parameter table found in OROM\n"); + break; + } + } + + if (i >= len) { + dev_err(&pdev->dev, "oprom parse error\n"); + devm_kfree(&pdev->dev, rom); + rom = NULL; + } + pci_unmap_biosrom(oprom); + + return rom; +} + +enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem, + struct isci_orom *orom, int scu_index) +{ + /* check for valid inputs */ + if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS || + scu_index > orom->hdr.num_elements || !oem) + return -EINVAL; + + *oem = orom->ctrl[scu_index]; + return 0; +} + +struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw) +{ + struct isci_orom *orom = NULL, *data; + int i, j; + + if (request_firmware(&fw, ISCI_FW_NAME, &pdev->dev) != 0) + return NULL; + + if (fw->size < sizeof(*orom)) + goto out; + + data = (struct isci_orom *)fw->data; + + if (strncmp(ISCI_ROM_SIG, data->hdr.signature, + strlen(ISCI_ROM_SIG)) != 0) + goto out; + + orom = devm_kzalloc(&pdev->dev, fw->size, GFP_KERNEL); + if (!orom) + goto out; + + memcpy(orom, fw->data, fw->size); + + if (is_c0(pdev)) + goto out; + + /* + * deprecated: override default amp_control for pre-preproduction + * silicon revisions + */ + for (i = 0; i < ARRAY_SIZE(orom->ctrl); i++) + for (j = 0; j < ARRAY_SIZE(orom->ctrl[i].phys); j++) { + orom->ctrl[i].phys[j].afe_tx_amp_control0 = 0xe7c03; + orom->ctrl[i].phys[j].afe_tx_amp_control1 = 0xe7c03; + orom->ctrl[i].phys[j].afe_tx_amp_control2 = 0xe7c03; + orom->ctrl[i].phys[j].afe_tx_amp_control3 = 0xe7c03; + } + out: + release_firmware(fw); + + return orom; +} + +static struct efi *get_efi(void) +{ +#ifdef CONFIG_EFI + return &efi; +#else + return NULL; +#endif +} + +struct isci_orom *isci_get_efi_var(struct pci_dev *pdev) +{ + efi_status_t status; + struct isci_orom *rom; + struct isci_oem_hdr *oem_hdr; + u8 *tmp, sum; + int j; + unsigned long data_len; + u8 *efi_data; + u32 efi_attrib = 0; + + data_len = 1024; + efi_data = devm_kzalloc(&pdev->dev, data_len, GFP_KERNEL); + if (!efi_data) { + dev_warn(&pdev->dev, + "Unable to allocate memory for EFI data\n"); + return NULL; + } + + rom = (struct isci_orom *)(efi_data + sizeof(struct isci_oem_hdr)); + + if (get_efi()) + status = get_efi()->get_variable(isci_efivar_name, + &ISCI_EFI_VENDOR_GUID, + &efi_attrib, + &data_len, + efi_data); + else + status = EFI_NOT_FOUND; + + if (status != EFI_SUCCESS) { + dev_warn(&pdev->dev, + "Unable to obtain EFI var data for OEM parms\n"); + return NULL; + } + + oem_hdr = (struct isci_oem_hdr *)efi_data; + + if (memcmp(oem_hdr->sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) != 0) { + dev_warn(&pdev->dev, + "Invalid OEM header signature\n"); + return NULL; + } + + /* calculate checksum */ + tmp = (u8 *)efi_data; + for (j = 0, sum = 0; j < (sizeof(*oem_hdr) + sizeof(*rom)); j++, tmp++) + sum += *tmp; + + if (sum != 0) { + dev_warn(&pdev->dev, + "OEM table checksum failed\n"); + return NULL; + } + + if (memcmp(rom->hdr.signature, + ISCI_ROM_SIG, + ISCI_ROM_SIG_SIZE) != 0) { + dev_warn(&pdev->dev, + "Invalid OEM table signature\n"); + return NULL; + } + + return rom; +} diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h new file mode 100644 index 00000000000..dc007e692f4 --- /dev/null +++ b/drivers/scsi/isci/probe_roms.h @@ -0,0 +1,249 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _ISCI_PROBE_ROMS_H_ +#define _ISCI_PROBE_ROMS_H_ + +#ifdef __KERNEL__ +#include <linux/firmware.h> +#include <linux/pci.h> +#include <linux/efi.h> +#include "isci.h" + +#define SCIC_SDS_PARM_NO_SPEED 0 + +/* generation 1 (i.e. 1.5 Gb/s) */ +#define SCIC_SDS_PARM_GEN1_SPEED 1 + +/* generation 2 (i.e. 3.0 Gb/s) */ +#define SCIC_SDS_PARM_GEN2_SPEED 2 + +/* generation 3 (i.e. 6.0 Gb/s) */ +#define SCIC_SDS_PARM_GEN3_SPEED 3 +#define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED + +/* parameters that can be set by module parameters */ +struct sci_user_parameters { + struct sci_phy_user_params { + /** + * This field specifies the NOTIFY (ENABLE SPIN UP) primitive + * insertion frequency for this phy index. + */ + u32 notify_enable_spin_up_insertion_frequency; + + /** + * This method specifies the number of transmitted DWORDs within which + * to transmit a single ALIGN primitive. This value applies regardless + * of what type of device is attached or connection state. A value of + * 0 indicates that no ALIGN primitives will be inserted. + */ + u16 align_insertion_frequency; + + /** + * This method specifies the number of transmitted DWORDs within which + * to transmit 2 ALIGN primitives. This applies for SAS connections + * only. A minimum value of 3 is required for this field. + */ + u16 in_connection_align_insertion_frequency; + + /** + * This field indicates the maximum speed generation to be utilized + * by phys in the supplied port. + * - A value of 1 indicates generation 1 (i.e. 1.5 Gb/s). + * - A value of 2 indicates generation 2 (i.e. 3.0 Gb/s). + * - A value of 3 indicates generation 3 (i.e. 6.0 Gb/s). + */ + u8 max_speed_generation; + + } phys[SCI_MAX_PHYS]; + + /** + * This field specifies the maximum number of direct attached devices + * that can have power supplied to them simultaneously. + */ + u8 max_number_concurrent_device_spin_up; + + /** + * This field specifies the number of seconds to allow a phy to consume + * power before yielding to another phy. + * + */ + u8 phy_spin_up_delay_interval; + + /** + * These timer values specifies how long a link will remain open with no + * activity in increments of a microsecond, it can be in increments of + * 100 microseconds if the upper most bit is set. + * + */ + u16 stp_inactivity_timeout; + u16 ssp_inactivity_timeout; + + /** + * These timer values specifies how long a link will remain open in increments + * of 100 microseconds. + * + */ + u16 stp_max_occupancy_timeout; + u16 ssp_max_occupancy_timeout; + + /** + * This timer value specifies how long a link will remain open with no + * outbound traffic in increments of a microsecond. + * + */ + u8 no_outbound_task_timeout; + +}; + +#define SCIC_SDS_PARM_PHY_MASK_MIN 0x0 +#define SCIC_SDS_PARM_PHY_MASK_MAX 0xF +#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4 + +struct sci_oem_params; +int sci_oem_parameters_validate(struct sci_oem_params *oem); + +struct isci_orom; +struct isci_orom *isci_request_oprom(struct pci_dev *pdev); +enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem, + struct isci_orom *orom, int scu_index); +struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw); +struct isci_orom *isci_get_efi_var(struct pci_dev *pdev); + +struct isci_oem_hdr { + u8 sig[4]; + u8 rev_major; + u8 rev_minor; + u16 len; + u8 checksum; + u8 reserved1; + u16 reserved2; +} __attribute__ ((packed)); + +#else +#define SCI_MAX_PORTS 4 +#define SCI_MAX_PHYS 4 +#define SCI_MAX_CONTROLLERS 2 +#endif + +#define ISCI_FW_NAME "isci/isci_firmware.bin" + +#define ROMSIGNATURE 0xaa55 + +#define ISCI_OEM_SIG "$OEM" +#define ISCI_OEM_SIG_SIZE 4 +#define ISCI_ROM_SIG "ISCUOEMB" +#define ISCI_ROM_SIG_SIZE 8 + +#define ISCI_EFI_VENDOR_GUID \ + EFI_GUID(0x193dfefa, 0xa445, 0x4302, 0x99, 0xd8, 0xef, 0x3a, 0xad, \ + 0x1a, 0x04, 0xc6) +#define ISCI_EFI_VAR_NAME "RstScuO" + +/* Allowed PORT configuration modes APC Automatic PORT configuration mode is + * defined by the OEM configuration parameters providing no PHY_MASK parameters + * for any PORT. i.e. There are no phys assigned to any of the ports at start. + * MPC Manual PORT configuration mode is defined by the OEM configuration + * parameters providing a PHY_MASK value for any PORT. It is assumed that any + * PORT with no PHY_MASK is an invalid port and not all PHYs must be assigned. + * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs + * being assigned is sufficient to declare manual PORT configuration. + */ +enum sci_port_configuration_mode { + SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0, + SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1 +}; + +struct sci_bios_oem_param_block_hdr { + uint8_t signature[ISCI_ROM_SIG_SIZE]; + uint16_t total_block_length; + uint8_t hdr_length; + uint8_t version; + uint8_t preboot_source; + uint8_t num_elements; + uint16_t element_length; + uint8_t reserved[8]; +} __attribute__ ((packed)); + +struct sci_oem_params { + struct { + uint8_t mode_type; + uint8_t max_concurrent_dev_spin_up; + uint8_t do_enable_ssc; + uint8_t reserved; + } controller; + + struct { + uint8_t phy_mask; + } ports[SCI_MAX_PORTS]; + + struct sci_phy_oem_params { + struct { + uint32_t high; + uint32_t low; + } sas_address; + + uint32_t afe_tx_amp_control0; + uint32_t afe_tx_amp_control1; + uint32_t afe_tx_amp_control2; + uint32_t afe_tx_amp_control3; + } phys[SCI_MAX_PHYS]; +} __attribute__ ((packed)); + +struct isci_orom { + struct sci_bios_oem_param_block_hdr hdr; + struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS]; +} __attribute__ ((packed)); + +#endif diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h new file mode 100644 index 00000000000..9b266c7428e --- /dev/null +++ b/drivers/scsi/isci/registers.h @@ -0,0 +1,1934 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SCU_REGISTERS_H_ +#define _SCU_REGISTERS_H_ + +/** + * This file contains the constants and structures for the SCU memory mapped + * registers. + * + * + */ + +#define SCU_VIIT_ENTRY_ID_MASK (0xC0000000) +#define SCU_VIIT_ENTRY_ID_SHIFT (30) + +#define SCU_VIIT_ENTRY_FUNCTION_MASK (0x0FF00000) +#define SCU_VIIT_ENTRY_FUNCTION_SHIFT (20) + +#define SCU_VIIT_ENTRY_IPPTMODE_MASK (0x0001F800) +#define SCU_VIIT_ENTRY_IPPTMODE_SHIFT (12) + +#define SCU_VIIT_ENTRY_LPVIE_MASK (0x00000F00) +#define SCU_VIIT_ENTRY_LPVIE_SHIFT (8) + +#define SCU_VIIT_ENTRY_STATUS_MASK (0x000000FF) +#define SCU_VIIT_ENTRY_STATUS_SHIFT (0) + +#define SCU_VIIT_ENTRY_ID_INVALID (0 << SCU_VIIT_ENTRY_ID_SHIFT) +#define SCU_VIIT_ENTRY_ID_VIIT (1 << SCU_VIIT_ENTRY_ID_SHIFT) +#define SCU_VIIT_ENTRY_ID_IIT (2 << SCU_VIIT_ENTRY_ID_SHIFT) +#define SCU_VIIT_ENTRY_ID_VIRT_EXP (3 << SCU_VIIT_ENTRY_ID_SHIFT) + +#define SCU_VIIT_IPPT_SSP_INITIATOR (0x01 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT) +#define SCU_VIIT_IPPT_SMP_INITIATOR (0x02 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT) +#define SCU_VIIT_IPPT_STP_INITIATOR (0x04 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT) +#define SCU_VIIT_IPPT_INITIATOR \ + (\ + SCU_VIIT_IPPT_SSP_INITIATOR \ + | SCU_VIIT_IPPT_SMP_INITIATOR \ + | SCU_VIIT_IPPT_STP_INITIATOR \ + ) + +#define SCU_VIIT_STATUS_RNC_VALID (0x01 << SCU_VIIT_ENTRY_STATUS_SHIFT) +#define SCU_VIIT_STATUS_ADDRESS_VALID (0x02 << SCU_VIIT_ENTRY_STATUS_SHIFT) +#define SCU_VIIT_STATUS_RNI_VALID (0x04 << SCU_VIIT_ENTRY_STATUS_SHIFT) +#define SCU_VIIT_STATUS_ALL_VALID \ + (\ + SCU_VIIT_STATUS_RNC_VALID \ + | SCU_VIIT_STATUS_ADDRESS_VALID \ + | SCU_VIIT_STATUS_RNI_VALID \ + ) + +#define SCU_VIIT_IPPT_SMP_TARGET (0x10 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT) + +/** + * struct scu_viit_entry - This is the SCU Virtual Initiator Table Entry + * + * + */ +struct scu_viit_entry { + /** + * This must be encoded as to the type of initiator that is being constructed + * for this port. + */ + u32 status; + + /** + * Virtual initiator high SAS Address + */ + u32 initiator_sas_address_hi; + + /** + * Virtual initiator low SAS Address + */ + u32 initiator_sas_address_lo; + + /** + * This must be 0 + */ + u32 reserved; + +}; + + +/* IIT Status Defines */ +#define SCU_IIT_ENTRY_ID_MASK (0xC0000000) +#define SCU_IIT_ENTRY_ID_SHIFT (30) + +#define SCU_IIT_ENTRY_STATUS_UPDATE_MASK (0x20000000) +#define SCU_IIT_ENTRY_STATUS_UPDATE_SHIFT (29) + +#define SCU_IIT_ENTRY_LPI_MASK (0x00000F00) +#define SCU_IIT_ENTRY_LPI_SHIFT (8) + +#define SCU_IIT_ENTRY_STATUS_MASK (0x000000FF) +#define SCU_IIT_ENTRY_STATUS_SHIFT (0) + +/* IIT Remote Initiator Defines */ +#define SCU_IIT_ENTRY_REMOTE_TAG_MASK (0x0000FFFF) +#define SCU_IIT_ENTRY_REMOTE_TAG_SHIFT (0) + +#define SCU_IIT_ENTRY_REMOTE_RNC_MASK (0x0FFF0000) +#define SCU_IIT_ENTRY_REMOTE_RNC_SHIFT (16) + +#define SCU_IIT_ENTRY_ID_INVALID (0 << SCU_IIT_ENTRY_ID_SHIFT) +#define SCU_IIT_ENTRY_ID_VIIT (1 << SCU_IIT_ENTRY_ID_SHIFT) +#define SCU_IIT_ENTRY_ID_IIT (2 << SCU_IIT_ENTRY_ID_SHIFT) +#define SCU_IIT_ENTRY_ID_VIRT_EXP (3 << SCU_IIT_ENTRY_ID_SHIFT) + +/** + * struct scu_iit_entry - This will be implemented later when we support + * virtual functions + * + * + */ +struct scu_iit_entry { + u32 status; + u32 remote_initiator_sas_address_hi; + u32 remote_initiator_sas_address_lo; + u32 remote_initiator; + +}; + +/* Generate a value for an SCU register */ +#define SCU_GEN_VALUE(name, value) \ + (((value) << name ## _SHIFT) & (name ## _MASK)) + +/* + * Generate a bit value for an SCU register + * Make sure that the register MASK is just a single bit */ +#define SCU_GEN_BIT(name) \ + SCU_GEN_VALUE(name, ((u32)1)) + +#define SCU_SET_BIT(name, reg_value) \ + ((reg_value) | SCU_GEN_BIT(name)) + +#define SCU_CLEAR_BIT(name, reg_value) \ + ((reg_value)$ ~(SCU_GEN_BIT(name))) + +/* + * ***************************************************************************** + * Unions for bitfield definitions of SCU Registers + * SMU Post Context Port + * ***************************************************************************** */ +#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_SHIFT (0) +#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_MASK (0x00000FFF) +#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_SHIFT (12) +#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_MASK (0x0000F000) +#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_SHIFT (16) +#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_MASK (0x00030000) +#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_SHIFT (18) +#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_MASK (0x00FC0000) +#define SMU_POST_CONTEXT_PORT_RESERVED_MASK (0xFF000000) + +#define SMU_PCP_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_POST_CONTEXT_PORT_ ## name, value) + +/* ***************************************************************************** */ +#define SMU_INTERRUPT_STATUS_COMPLETION_SHIFT (31) +#define SMU_INTERRUPT_STATUS_COMPLETION_MASK (0x80000000) +#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_SHIFT (1) +#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_MASK (0x00000002) +#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_SHIFT (0) +#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_MASK (0x00000001) +#define SMU_INTERRUPT_STATUS_RESERVED_MASK (0x7FFFFFFC) + +#define SMU_ISR_GEN_BIT(name) \ + SCU_GEN_BIT(SMU_INTERRUPT_STATUS_ ## name) + +#define SMU_ISR_QUEUE_ERROR SMU_ISR_GEN_BIT(QUEUE_ERROR) +#define SMU_ISR_QUEUE_SUSPEND SMU_ISR_GEN_BIT(QUEUE_SUSPEND) +#define SMU_ISR_COMPLETION SMU_ISR_GEN_BIT(COMPLETION) + +/* ***************************************************************************** */ +#define SMU_INTERRUPT_MASK_COMPLETION_SHIFT (31) +#define SMU_INTERRUPT_MASK_COMPLETION_MASK (0x80000000) +#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_SHIFT (1) +#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_MASK (0x00000002) +#define SMU_INTERRUPT_MASK_QUEUE_ERROR_SHIFT (0) +#define SMU_INTERRUPT_MASK_QUEUE_ERROR_MASK (0x00000001) +#define SMU_INTERRUPT_MASK_RESERVED_MASK (0x7FFFFFFC) + +#define SMU_IMR_GEN_BIT(name) \ + SCU_GEN_BIT(SMU_INTERRUPT_MASK_ ## name) + +#define SMU_IMR_QUEUE_ERROR SMU_IMR_GEN_BIT(QUEUE_ERROR) +#define SMU_IMR_QUEUE_SUSPEND SMU_IMR_GEN_BIT(QUEUE_SUSPEND) +#define SMU_IMR_COMPLETION SMU_IMR_GEN_BIT(COMPLETION) + +/* ***************************************************************************** */ +#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_SHIFT (0) +#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_MASK (0x0000001F) +#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_SHIFT (8) +#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_MASK (0x0000FF00) +#define SMU_INTERRUPT_COALESCING_CONTROL_RESERVED_MASK (0xFFFF00E0) + +#define SMU_ICC_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_INTERRUPT_COALESCING_CONTROL_ ## name, value) + +/* ***************************************************************************** */ +#define SMU_TASK_CONTEXT_RANGE_START_SHIFT (0) +#define SMU_TASK_CONTEXT_RANGE_START_MASK (0x00000FFF) +#define SMU_TASK_CONTEXT_RANGE_ENDING_SHIFT (16) +#define SMU_TASK_CONTEXT_RANGE_ENDING_MASK (0x0FFF0000) +#define SMU_TASK_CONTEXT_RANGE_ENABLE_SHIFT (31) +#define SMU_TASK_CONTEXT_RANGE_ENABLE_MASK (0x80000000) +#define SMU_TASK_CONTEXT_RANGE_RESERVED_MASK (0x7000F000) + +#define SMU_TCR_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_TASK_CONTEXT_RANGE_ ## name, value) + +#define SMU_TCR_GEN_BIT(name, value) \ + SCU_GEN_BIT(SMU_TASK_CONTEXT_RANGE_ ## name) + +/* ***************************************************************************** */ + +#define SMU_COMPLETION_QUEUE_PUT_POINTER_SHIFT (0) +#define SMU_COMPLETION_QUEUE_PUT_POINTER_MASK (0x00003FFF) +#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_SHIFT (15) +#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_MASK (0x00008000) +#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_SHIFT (16) +#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_MASK (0x03FF0000) +#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_SHIFT (26) +#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_MASK (0x04000000) +#define SMU_COMPLETION_QUEUE_PUT_RESERVED_MASK (0xF8004000) + +#define SMU_CQPR_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_PUT_ ## name, value) + +#define SMU_CQPR_GEN_BIT(name) \ + SCU_GEN_BIT(SMU_COMPLETION_QUEUE_PUT_ ## name) + +/* ***************************************************************************** */ + +#define SMU_COMPLETION_QUEUE_GET_POINTER_SHIFT (0) +#define SMU_COMPLETION_QUEUE_GET_POINTER_MASK (0x00003FFF) +#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT (15) +#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_MASK (0x00008000) +#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT (16) +#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK (0x03FF0000) +#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT (26) +#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_MASK (0x04000000) +#define SMU_COMPLETION_QUEUE_GET_ENABLE_SHIFT (30) +#define SMU_COMPLETION_QUEUE_GET_ENABLE_MASK (0x40000000) +#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_SHIFT (31) +#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_MASK (0x80000000) +#define SMU_COMPLETION_QUEUE_GET_RESERVED_MASK (0x38004000) + +#define SMU_CQGR_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_GET_ ## name, value) + +#define SMU_CQGR_GEN_BIT(name) \ + SCU_GEN_BIT(SMU_COMPLETION_QUEUE_GET_ ## name) + +#define SMU_CQGR_CYCLE_BIT \ + SMU_CQGR_GEN_BIT(CYCLE_BIT) + +#define SMU_CQGR_EVENT_CYCLE_BIT \ + SMU_CQGR_GEN_BIT(EVENT_CYCLE_BIT) + +#define SMU_CQGR_GET_POINTER_SET(value) \ + SMU_CQGR_GEN_VAL(POINTER, value) + + +/* ***************************************************************************** */ +#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_SHIFT (0) +#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_MASK (0x00003FFF) +#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_SHIFT (16) +#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_MASK (0x03FF0000) +#define SMU_COMPLETION_QUEUE_CONTROL_RESERVED_MASK (0xFC00C000) + +#define SMU_CQC_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_CONTROL_ ## name, value) + +#define SMU_CQC_QUEUE_LIMIT_SET(value) \ + SMU_CQC_GEN_VAL(QUEUE_LIMIT, value) + +#define SMU_CQC_EVENT_LIMIT_SET(value) \ + SMU_CQC_GEN_VAL(EVENT_LIMIT, value) + + +/* ***************************************************************************** */ +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT (0) +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK (0x00000FFF) +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT (12) +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK (0x00007000) +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT (15) +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK (0x07FF8000) +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_SHIFT (27) +#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK (0x08000000) +#define SMU_DEVICE_CONTEXT_CAPACITY_RESERVED_MASK (0xF0000000) + +#define SMU_DCC_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_DEVICE_CONTEXT_CAPACITY_ ## name, value) + +#define SMU_DCC_GET_MAX_PEG(value) \ + (\ + ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK) \ + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \ + ) + +#define SMU_DCC_GET_MAX_LP(value) \ + (\ + ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \ + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \ + ) + +#define SMU_DCC_GET_MAX_TC(value) \ + (\ + ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \ + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT \ + ) + +#define SMU_DCC_GET_MAX_RNC(value) \ + (\ + ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \ + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \ + ) + +/* -------------------------------------------------------------------------- */ + +#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0) +#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_MASK (0x00000001) +#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_SHIFT (1) +#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_MASK (0x00000002) +#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_SHIFT (16) +#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_MASK (0x00010000) +#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_SHIFT (17) +#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_MASK (0x00020000) +#define SMU_CONTROL_STATUS_RESERVED_MASK (0xFFFCFFFC) + +#define SMU_SMUCSR_GEN_BIT(name) \ + SCU_GEN_BIT(SMU_CONTROL_STATUS_ ## name) + +#define SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \ + (SMU_SMUCSR_GEN_BIT(SCHEDULER_RAM_INIT_COMPLETED)) + +#define SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \ + (SMU_SMUCSR_GEN_BIT(CONTEXT_RAM_INIT_COMPLETED)) + +#define SCU_RAM_INIT_COMPLETED \ + (\ + SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \ + | SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \ + ) + +/* -------------------------------------------------------------------------- */ + +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_SHIFT (0) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_MASK (0x00000001) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_SHIFT (1) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_MASK (0x00000002) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_SHIFT (2) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_MASK (0x00000004) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_SHIFT (3) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_MASK (0x00000008) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_SHIFT (8) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_MASK (0x00000100) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_SHIFT (9) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_MASK (0x00000200) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_SHIFT (10) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_MASK (0x00000400) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_SHIFT (11) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_MASK (0x00000800) + +#define SMU_RESET_PROTOCOL_ENGINE(peg, pe) \ + ((1 << (pe)) << ((peg) * 8)) + +#define SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \ + (\ + SMU_RESET_PROTOCOL_ENGINE(peg, 0) \ + | SMU_RESET_PROTOCOL_ENGINE(peg, 1) \ + | SMU_RESET_PROTOCOL_ENGINE(peg, 2) \ + | SMU_RESET_PROTOCOL_ENGINE(peg, 3) \ + ) + +#define SMU_RESET_ALL_PROTOCOL_ENGINES() \ + (\ + SMU_RESET_PEG_PROTOCOL_ENGINES(0) \ + | SMU_RESET_PEG_PROTOCOL_ENGINES(1) \ + ) + +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_SHIFT (16) +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_MASK (0x00010000) +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_SHIFT (17) +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_MASK (0x00020000) +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_SHIFT (18) +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_MASK (0x00040000) +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_SHIFT (19) +#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_MASK (0x00080000) + +#define SMU_RESET_WIDE_PORT_QUEUE(peg, wide_port) \ + ((1 << ((wide_port) / 2)) << ((peg) * 2) << 16) + +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_SHIFT (20) +#define SMU_SOFTRESET_CONTROL_RESET_PEG0_MASK (0x00100000) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_SHIFT (21) +#define SMU_SOFTRESET_CONTROL_RESET_PEG1_MASK (0x00200000) +#define SMU_SOFTRESET_CONTROL_RESET_SCU_SHIFT (22) +#define SMU_SOFTRESET_CONTROL_RESET_SCU_MASK (0x00400000) + +/* + * It seems to make sense that if you are going to reset the protocol + * engine group that you would also reset all of the protocol engines */ +#define SMU_RESET_PROTOCOL_ENGINE_GROUP(peg) \ + (\ + (1 << ((peg) + 20)) \ + | SMU_RESET_WIDE_PORT_QUEUE(peg, 0) \ + | SMU_RESET_WIDE_PORT_QUEUE(peg, 1) \ + | SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \ + ) + +#define SMU_RESET_ALL_PROTOCOL_ENGINE_GROUPS() \ + (\ + SMU_RESET_PROTOCOL_ENGINE_GROUP(0) \ + | SMU_RESET_PROTOCOL_ENGINE_GROUP(1) \ + ) + +#define SMU_RESET_SCU() (0xFFFFFFFF) + + + +/* ***************************************************************************** */ +#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_SHIFT (0) +#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_MASK (0x00000FFF) +#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_SHIFT (16) +#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_MASK (0x0FFF0000) +#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_SHIFT (31) +#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_MASK (0x80000000) +#define SMU_TASK_CONTEXT_ASSIGNMENT_RESERVED_MASK (0x7000F000) + +#define SMU_TCA_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name, value) + +#define SMU_TCA_GEN_BIT(name) \ + SCU_GEN_BIT(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name) + +/* ***************************************************************************** */ +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_SHIFT (0) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_MASK (0x00000FFF) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_RESERVED_MASK (0xFFFFF000) + +#define SCU_UFQC_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_ ## name, value) + +#define SCU_UFQC_QUEUE_SIZE_SET(value) \ + SCU_UFQC_GEN_VAL(QUEUE_SIZE, value) + +/* ***************************************************************************** */ +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_SHIFT (0) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_MASK (0x00000FFF) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_SHIFT (12) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_MASK (0x00001000) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_RESERVED_MASK (0xFFFFE000) + +#define SCU_UFQPP_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name, value) + +#define SCU_UFQPP_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name) + +/* + * ***************************************************************************** + * * SDMA Registers + * ***************************************************************************** */ +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_SHIFT (0) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_MASK (0x00000FFF) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_SHIFT (12) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_MASK (12) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_SHIFT (31) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_MASK (0x80000000) +#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_RESERVED_MASK (0x7FFFE000) + +#define SCU_UFQGP_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name, value) + +#define SCU_UFQGP_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name) + +#define SCU_UFQGP_CYCLE_BIT(value) \ + SCU_UFQGP_GEN_BIT(CYCLE_BIT, value) + +#define SCU_UFQGP_GET_POINTER(value) \ + SCU_UFQGP_GEN_VALUE(POINTER, value) + +#define SCU_UFQGP_ENABLE(value) \ + (SCU_UFQGP_GEN_BIT(ENABLE) | value) + +#define SCU_UFQGP_DISABLE(value) \ + (~SCU_UFQGP_GEN_BIT(ENABLE) & value) + +#define SCU_UFQGP_VALUE(bit, value) \ + (SCU_UFQGP_CYCLE_BIT(bit) | SCU_UFQGP_GET_POINTER(value)) + +/* ***************************************************************************** */ +#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SHIFT (0) +#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_MASK (0x0000FFFF) +#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (16) +#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00010000) +#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_SHIFT (17) +#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_MASK (0x00020000) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_SHIFT (18) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_MASK (0x00040000) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_SHIFT (19) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_MASK (0x00080000) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_SHIFT (20) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_MASK (0x00100000) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_SHIFT (21) +#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_MASK (0x00200000) +#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_SHIFT (22) +#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_MASK (0x00400000) +#define SCU_PDMA_CONFIGURATION_RESERVED_MASK (0xFF800000) + +#define SCU_PDMACR_GEN_VALUE(name, value) \ + SCU_GEN_VALUE(SCU_PDMA_CONFIGURATION_ ## name, value) + +#define SCU_PDMACR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_PDMA_CONFIGURATION_ ## name) + +#define SCU_PDMACR_BE_GEN_BIT(name) \ + SCU_PCMACR_GEN_BIT(BIG_ENDIAN_CONTROL_ ## name) + +/* ***************************************************************************** */ +#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (8) +#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00000100) + +#define SCU_CDMACR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_CDMA_CONFIGURATION_ ## name) + +/* + * ***************************************************************************** + * * SCU Link Layer Registers + * ***************************************************************************** */ +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_SHIFT (0) +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_MASK (0x000000FF) +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_SHIFT (8) +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_MASK (0x0000FF00) +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_SHIFT (16) +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_MASK (0x00FF0000) +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_SHIFT (24) +#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_MASK (0xFF000000) +#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_REQUIRED_MASK (0x00000000) +#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_DEFAULT_MASK (0x7D00676F) +#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_RESERVED_MASK (0x00FF0000) + +#define SCU_SAS_SPDTOV_GEN_VALUE(name, value) \ + SCU_GEN_VALUE(SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_ ## name, value) + + +#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_SHIFT (2) +#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_MASK (0x00000004) +#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_SHIFT (4) +#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_MASK (0x00000010) +#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_SHIFT (5) +#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_MASK (0x00000020) +#define SCU_LINK_STATUS_RESERVED_MASK (0xFFFFFFCD) + +#define SCU_SAS_LLSTA_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_LINK_STATUS_ ## name) + + +/* TODO: Where is the SATA_PSELTOV register? */ + +/* + * ***************************************************************************** + * * SCU SAS Maximum Arbitration Wait Time Timeout Register + * ***************************************************************************** */ +#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_SHIFT (0) +#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_MASK (0x00007FFF) +#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_SHIFT (15) +#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_MASK (0x00008000) + +#define SCU_SAS_MAWTTOV_GEN_VALUE(name, value) \ + SCU_GEN_VALUE(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name, value) + +#define SCU_SAS_MAWTTOV_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name) + + +/* + * TODO: Where is the SAS_LNKTOV regsiter? + * TODO: Where is the SAS_PHYTOV register? */ + +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT (1) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_MASK (0x00000002) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_SHIFT (2) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_MASK (0x00000004) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_SHIFT (3) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_MASK (0x00000008) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_SHIFT (8) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_MASK (0x00000100) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_SHIFT (9) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_MASK (0x00000200) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_SHIFT (10) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_MASK (0x00000400) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_SHIFT (11) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_MASK (0x00000800) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_SHIFT (16) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_MASK (0x000F0000) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_SHIFT (24) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_MASK (0x0F000000) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_SHIFT (28) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_MASK (0x70000000) +#define SCU_SAS_TRANSMIT_IDENTIFICATION_RESERVED_MASK (0x80F0F1F1) + +#define SCU_SAS_TIID_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name, value) + +#define SCU_SAS_TIID_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name) + +/* SAS Identify Frame PHY Identifier Register */ +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_SHIFT (16) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_MASK (0x00010000) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_SHIFT (17) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_MASK (0x00020000) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_SHIFT (18) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_MASK (0x00040000) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_SHIFT (24) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_MASK (0xFF000000) +#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_RESERVED_MASK (0x00F800FF) + +#define SCU_SAS_TIPID_GEN_VALUE(name, value) \ + SCU_GEN_VALUE(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name, value) + +#define SCU_SAS_TIPID_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name) + + +#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_SHIFT (4) +#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_MASK (0x00000010) +#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_SHIFT (6) +#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_MASK (0x00000040) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_SHIFT (7) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_MASK (0x00000080) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_SHIFT (8) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_MASK (0x00000100) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_SHIFT (9) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_MASK (0x00000200) +#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_SHIFT (11) +#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_MASK (0x00000800) +#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_SHIFT (12) +#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_MASK (0x00001000) +#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_SHIFT (13) +#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_MASK (0x00002000) +#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_SHIFT (14) +#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_MASK (0x00004000) +#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_SHIFT (15) +#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_MASK (0x00008000) +#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_SHIFT (23) +#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_MASK (0x00800000) +#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_SHIFT (27) +#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_MASK (0x08000000) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_SHIFT (28) +#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_MASK (0x10000000) +#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_SHIFT (29) +#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_MASK (0x20000000) +#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_SHIFT (30) +#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_MASK (0x40000000) +#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_SHIFT (31) +#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_MASK (0x80000000) +#define SCU_SAS_PHY_CONFIGURATION_REQUIRED_MASK (0x0100000F) +#define SCU_SAS_PHY_CONFIGURATION_DEFAULT_MASK (0x4180100F) +#define SCU_SAS_PHY_CONFIGURATION_RESERVED_MASK (0x00000000) + +#define SCU_SAS_PCFG_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_SAS_PHY_CONFIGURATION_ ## name) + +#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_SHIFT (0) +#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_MASK (0x000007FF) +#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_SHIFT (16) +#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_MASK (0x00ff0000) + +#define SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_##name, value) + +#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_SHIFT (0) +#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_MASK (0x0003FFFF) +#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_SHIFT (31) +#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_MASK (0x80000000) +#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_RESERVED_MASK (0x7FFC0000) + +#define SCU_ENSPINUP_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name, value) + +#define SCU_ENSPINUP_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name) + + +#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_SHIFT (1) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_MASK (0x00000002) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_SHIFT (4) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_MASK (0x000000F0) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_SHIFT (8) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_MASK (0x00000100) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_SHIFT (9) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_MASK (0x00000201) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_SHIFT (10) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_MASK (0x00000401) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_SHIFT (11) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_MASK (0x00000801) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_SHIFT (12) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_MASK (0x00001001) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_SHIFT (13) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_MASK (0x00002001) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_SHIFT (31) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_MASK (0x80000000) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_DEFAULT_MASK (0x00003F01) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_REQUIRED_MASK (0x00000001) +#define SCU_LINK_LAYER_PHY_CAPABILITIES_RESERVED_MASK (0x7FFFC00D) + +#define SCU_SAS_PHYCAP_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name, value) + +#define SCU_SAS_PHYCAP_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name) + + +#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_SHIFT (0) +#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_MASK (0x000000FF) +#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_SHIFT (31) +#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_MASK (0x80000000) +#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_RESERVED_MASK (0x7FFFFF00) + +#define SCU_PSZGCR_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name, value) + +#define SCU_PSZGCR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name) + +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_SHIFT (1) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_MASK (0x00000002) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_SHIFT (2) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_MASK (0x00000004) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_SHIFT (4) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_MASK (0x00000010) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_SHIFT (5) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_MASK (0x00000020) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_SHIFT (16) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_MASK (0x00030000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_SHIFT (19) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_MASK (0x00080000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_SHIFT (20) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_MASK (0x00300000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_SHIFT (23) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_MASK (0x00800000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_SHIFT (24) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_MASK (0x03000000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_SHIFT (27) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_MASK (0x08000000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_SHIFT (28) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_MASK (0x30000000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_SHIFT (31) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_MASK (0x80000000) +#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_RESERVED_MASK (0x4444FFC9) + +#define SCU_PEG_SCUVZECR_GEN_VAL(name, val) \ + SCU_GEN_VALUE(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name, val) + +#define SCU_PEG_SCUVZECR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name) + + +/* + * ***************************************************************************** + * * Port Task Scheduler registers shift and mask values + * ***************************************************************************** */ +#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_SHIFT (0) +#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_MASK (0x0000FFFF) +#define SCU_PTSG_CONTROL_TASK_TIMEOUT_SHIFT (16) +#define SCU_PTSG_CONTROL_TASK_TIMEOUT_MASK (0x00FF0000) +#define SCU_PTSG_CONTROL_PTSG_ENABLE_SHIFT (24) +#define SCU_PTSG_CONTROL_PTSG_ENABLE_MASK (0x01000000) +#define SCU_PTSG_CONTROL_ETM_ENABLE_SHIFT (25) +#define SCU_PTSG_CONTROL_ETM_ENABLE_MASK (0x02000000) +#define SCU_PTSG_CONTROL_DEFAULT_MASK (0x00020002) +#define SCU_PTSG_CONTROL_REQUIRED_MASK (0x00000000) +#define SCU_PTSG_CONTROL_RESERVED_MASK (0xFC000000) + +#define SCU_PTSGCR_GEN_VAL(name, val) \ + SCU_GEN_VALUE(SCU_PTSG_CONTROL_ ## name, val) + +#define SCU_PTSGCR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_PTSG_CONTROL_ ## name) + + +/* ***************************************************************************** */ +#define SCU_PTSG_REAL_TIME_CLOCK_SHIFT (0) +#define SCU_PTSG_REAL_TIME_CLOCK_MASK (0x0000FFFF) +#define SCU_PTSG_REAL_TIME_CLOCK_RESERVED_MASK (0xFFFF0000) + +#define SCU_RTCR_GEN_VAL(name, val) \ + SCU_GEN_VALUE(SCU_PTSG_ ## name, val) + + +#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_SHIFT (0) +#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_MASK (0x00FFFFFF) +#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_RESERVED_MASK (0xFF000000) + +#define SCU_RTCCR_GEN_VAL(name, val) \ + SCU_GEN_VALUE(SCU_PTSG_REAL_TIME_CLOCK_CONTROL_ ## name, val) + + +#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_SHIFT (0) +#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_MASK (0x00000001) +#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_SHIFT (1) +#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_MASK (0x00000002) +#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_RESERVED_MASK (0xFFFFFFFC) + +#define SCU_PTSxCR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ ## name) + + +#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_SHIFT (0) +#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_MASK (0x00000001) +#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_SHIFT (1) +#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_MASK (0x00000002) +#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_SHIFT (2) +#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_MASK (0x00000004) +#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_RESERVED_MASK (0xFFFFFFF8) + +#define SCU_PTSxSR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name) + + +/* + * ***************************************************************************** + * * SGPIO Register shift and mask values + * ***************************************************************************** */ +#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_SHIFT (0) +#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_MASK (0x00000001) +#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_SHIFT (1) +#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_MASK (0x00000002) +#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_SHIFT (2) +#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_MASK (0x00000004) +#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_SHIFT (15) +#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_MASK (0x00008000) +#define SCU_SGPIO_CONTROL_SGPIO_RESERVED_MASK (0xFFFF7FF8) + +#define SCU_SGICRx_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_SGPIO_CONTROL_SGPIO_ ## name) + +#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_SHIFT (0) +#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_MASK (0x0000000F) +#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_SHIFT (4) +#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_MASK (0x000000F0) +#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_SHIFT (8) +#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_MASK (0x00000F00) +#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_SHIFT (12) +#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_MASK (0x0000F000) +#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_RESERVED_MASK (0xFFFF0000) + +#define SCU_SGPBRx_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_ ## name, value) + +#define SCU_SGPIO_START_DRIVE_LOWER_R0_SHIFT (0) +#define SCU_SGPIO_START_DRIVE_LOWER_R0_MASK (0x00000003) +#define SCU_SGPIO_START_DRIVE_LOWER_R1_SHIFT (4) +#define SCU_SGPIO_START_DRIVE_LOWER_R1_MASK (0x00000030) +#define SCU_SGPIO_START_DRIVE_LOWER_R2_SHIFT (8) +#define SCU_SGPIO_START_DRIVE_LOWER_R2_MASK (0x00000300) +#define SCU_SGPIO_START_DRIVE_LOWER_R3_SHIFT (12) +#define SCU_SGPIO_START_DRIVE_LOWER_R3_MASK (0x00003000) +#define SCU_SGPIO_START_DRIVE_LOWER_RESERVED_MASK (0xFFFF8888) + +#define SCU_SGSDLRx_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value) + +#define SCU_SGPIO_START_DRIVE_UPPER_R0_SHIFT (0) +#define SCU_SGPIO_START_DRIVE_UPPER_R0_MASK (0x00000003) +#define SCU_SGPIO_START_DRIVE_UPPER_R1_SHIFT (4) +#define SCU_SGPIO_START_DRIVE_UPPER_R1_MASK (0x00000030) +#define SCU_SGPIO_START_DRIVE_UPPER_R2_SHIFT (8) +#define SCU_SGPIO_START_DRIVE_UPPER_R2_MASK (0x00000300) +#define SCU_SGPIO_START_DRIVE_UPPER_R3_SHIFT (12) +#define SCU_SGPIO_START_DRIVE_UPPER_R3_MASK (0x00003000) +#define SCU_SGPIO_START_DRIVE_UPPER_RESERVED_MASK (0xFFFF8888) + +#define SCU_SGSDURx_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value) + +#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_SHIFT (0) +#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_MASK (0x00000003) +#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_SHIFT (4) +#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_MASK (0x00000030) +#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_SHIFT (8) +#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_MASK (0x00000300) +#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_SHIFT (12) +#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_MASK (0x00003000) +#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_RESERVED_MASK (0xFFFF8888) + +#define SCU_SGSIDLRx_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value) + +#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_SHIFT (0) +#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_MASK (0x00000003) +#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_SHIFT (4) +#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_MASK (0x00000030) +#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_SHIFT (8) +#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_MASK (0x00000300) +#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_SHIFT (12) +#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_MASK (0x00003000) +#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_RESERVED_MASK (0xFFFF8888) + +#define SCU_SGSIDURx_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value) + +#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_SHIFT (0) +#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_MASK (0x0000000F) +#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_RESERVED_MASK (0xFFFFFFF0) + +#define SCU_SGVSCR_GEN_VAL(value) \ + SCU_GEN_VALUE(SCU_SGPIO_VENDOR_SPECIFIC_CODE ## name, value) + +#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_SHIFT (0) +#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_MASK (0x00000003) +#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_SHIFT (2) +#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_MASK (0x00000004) +#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_SHIFT (3) +#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_MASK (0x00000008) +#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_SHIFT (4) +#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_MASK (0x00000030) +#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_SHIFT (6) +#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_MASK (0x00000040) +#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_SHIFT (7) +#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_MASK (0x00000080) +#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_SHIFT (8) +#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_MASK (0x00000300) +#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_SHIFT (10) +#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_MASK (0x00000400) +#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_SHIFT (11) +#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_MASK (0x00000800) +#define SCU_SGPIO_OUPUT_DATA_SELECT_RESERVED_MASK (0xFFFFF000) + +#define SCU_SGODSR_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name, value) + +#define SCU_SGODSR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name) + +/* + * ***************************************************************************** + * * SMU Registers + * ***************************************************************************** */ + +/* + * ---------------------------------------------------------------------------- + * SMU Registers + * These registers are based off of BAR0 + * + * To calculate the offset for other functions use + * BAR0 + FN# * SystemPageSize * 2 + * + * The TCA is only accessable from FN#0 (Physical Function) and each + * is programmed by (BAR0 + SCU_SMU_TCA_OFFSET + (FN# * 0x04)) or + * TCA0 for FN#0 is at BAR0 + 0x0400 + * TCA1 for FN#1 is at BAR0 + 0x0404 + * etc. + * ---------------------------------------------------------------------------- + * Accessable to all FN#s */ +#define SCU_SMU_PCP_OFFSET 0x0000 +#define SCU_SMU_AMR_OFFSET 0x0004 +#define SCU_SMU_ISR_OFFSET 0x0010 +#define SCU_SMU_IMR_OFFSET 0x0014 +#define SCU_SMU_ICC_OFFSET 0x0018 +#define SCU_SMU_HTTLBAR_OFFSET 0x0020 +#define SCU_SMU_HTTUBAR_OFFSET 0x0024 +#define SCU_SMU_TCR_OFFSET 0x0028 +#define SCU_SMU_CQLBAR_OFFSET 0x0030 +#define SCU_SMU_CQUBAR_OFFSET 0x0034 +#define SCU_SMU_CQPR_OFFSET 0x0040 +#define SCU_SMU_CQGR_OFFSET 0x0044 +#define SCU_SMU_CQC_OFFSET 0x0048 +/* Accessable to FN#0 only */ +#define SCU_SMU_RNCLBAR_OFFSET 0x0080 +#define SCU_SMU_RNCUBAR_OFFSET 0x0084 +#define SCU_SMU_DCC_OFFSET 0x0090 +#define SCU_SMU_DFC_OFFSET 0x0094 +#define SCU_SMU_SMUCSR_OFFSET 0x0098 +#define SCU_SMU_SCUSRCR_OFFSET 0x009C +#define SCU_SMU_SMAW_OFFSET 0x00A0 +#define SCU_SMU_SMDW_OFFSET 0x00A4 +/* Accessable to FN#0 only */ +#define SCU_SMU_TCA_OFFSET 0x0400 +/* Accessable to all FN#s */ +#define SCU_SMU_MT_MLAR0_OFFSET 0x2000 +#define SCU_SMU_MT_MUAR0_OFFSET 0x2004 +#define SCU_SMU_MT_MDR0_OFFSET 0x2008 +#define SCU_SMU_MT_VCR0_OFFSET 0x200C +#define SCU_SMU_MT_MLAR1_OFFSET 0x2010 +#define SCU_SMU_MT_MUAR1_OFFSET 0x2014 +#define SCU_SMU_MT_MDR1_OFFSET 0x2018 +#define SCU_SMU_MT_VCR1_OFFSET 0x201C +#define SCU_SMU_MPBA_OFFSET 0x3000 + +/** + * struct smu_registers - These are the SMU registers + * + * + */ +struct smu_registers { +/* 0x0000 PCP */ + u32 post_context_port; +/* 0x0004 AMR */ + u32 address_modifier; + u32 reserved_08; + u32 reserved_0C; +/* 0x0010 ISR */ + u32 interrupt_status; +/* 0x0014 IMR */ + u32 interrupt_mask; +/* 0x0018 ICC */ + u32 interrupt_coalesce_control; + u32 reserved_1C; +/* 0x0020 HTTLBAR */ + u32 host_task_table_lower; +/* 0x0024 HTTUBAR */ + u32 host_task_table_upper; +/* 0x0028 TCR */ + u32 task_context_range; + u32 reserved_2C; +/* 0x0030 CQLBAR */ + u32 completion_queue_lower; +/* 0x0034 CQUBAR */ + u32 completion_queue_upper; + u32 reserved_38; + u32 reserved_3C; +/* 0x0040 CQPR */ + u32 completion_queue_put; +/* 0x0044 CQGR */ + u32 completion_queue_get; +/* 0x0048 CQC */ + u32 completion_queue_control; + u32 reserved_4C; + u32 reserved_5x[4]; + u32 reserved_6x[4]; + u32 reserved_7x[4]; +/* + * Accessable to FN#0 only + * 0x0080 RNCLBAR */ + u32 remote_node_context_lower; +/* 0x0084 RNCUBAR */ + u32 remote_node_context_upper; + u32 reserved_88; + u32 reserved_8C; +/* 0x0090 DCC */ + u32 device_context_capacity; +/* 0x0094 DFC */ + u32 device_function_capacity; +/* 0x0098 SMUCSR */ + u32 control_status; +/* 0x009C SCUSRCR */ + u32 soft_reset_control; +/* 0x00A0 SMAW */ + u32 mmr_address_window; +/* 0x00A4 SMDW */ + u32 mmr_data_window; + u32 reserved_A8; + u32 reserved_AC; +/* A whole bunch of reserved space */ + u32 reserved_Bx[4]; + u32 reserved_Cx[4]; + u32 reserved_Dx[4]; + u32 reserved_Ex[4]; + u32 reserved_Fx[4]; + u32 reserved_1xx[64]; + u32 reserved_2xx[64]; + u32 reserved_3xx[64]; +/* + * Accessable to FN#0 only + * 0x0400 TCA */ + u32 task_context_assignment[256]; +/* MSI-X registers not included */ +}; + +/* + * ***************************************************************************** + * SDMA Registers + * ***************************************************************************** */ +#define SCU_SDMA_BASE 0x6000 +#define SCU_SDMA_PUFATLHAR_OFFSET 0x0000 +#define SCU_SDMA_PUFATUHAR_OFFSET 0x0004 +#define SCU_SDMA_UFLHBAR_OFFSET 0x0008 +#define SCU_SDMA_UFUHBAR_OFFSET 0x000C +#define SCU_SDMA_UFQC_OFFSET 0x0010 +#define SCU_SDMA_UFQPP_OFFSET 0x0014 +#define SCU_SDMA_UFQGP_OFFSET 0x0018 +#define SCU_SDMA_PDMACR_OFFSET 0x001C +#define SCU_SDMA_CDMACR_OFFSET 0x0080 + +/** + * struct scu_sdma_registers - These are the SCU SDMA Registers + * + * + */ +struct scu_sdma_registers { +/* 0x0000 PUFATLHAR */ + u32 uf_address_table_lower; +/* 0x0004 PUFATUHAR */ + u32 uf_address_table_upper; +/* 0x0008 UFLHBAR */ + u32 uf_header_base_address_lower; +/* 0x000C UFUHBAR */ + u32 uf_header_base_address_upper; +/* 0x0010 UFQC */ + u32 unsolicited_frame_queue_control; +/* 0x0014 UFQPP */ + u32 unsolicited_frame_put_pointer; +/* 0x0018 UFQGP */ + u32 unsolicited_frame_get_pointer; +/* 0x001C PDMACR */ + u32 pdma_configuration; +/* Reserved until offset 0x80 */ + u32 reserved_0020_007C[0x18]; +/* 0x0080 CDMACR */ + u32 cdma_configuration; +/* Remainder SDMA register space */ + u32 reserved_0084_0400[0xDF]; + +}; + +/* + * ***************************************************************************** + * * SCU Link Registers + * ***************************************************************************** */ +#define SCU_PEG0_OFFSET 0x0000 +#define SCU_PEG1_OFFSET 0x8000 + +#define SCU_TL0_OFFSET 0x0000 +#define SCU_TL1_OFFSET 0x0400 +#define SCU_TL2_OFFSET 0x0800 +#define SCU_TL3_OFFSET 0x0C00 + +#define SCU_LL_OFFSET 0x0080 +#define SCU_LL0_OFFSET (SCU_TL0_OFFSET + SCU_LL_OFFSET) +#define SCU_LL1_OFFSET (SCU_TL1_OFFSET + SCU_LL_OFFSET) +#define SCU_LL2_OFFSET (SCU_TL2_OFFSET + SCU_LL_OFFSET) +#define SCU_LL3_OFFSET (SCU_TL3_OFFSET + SCU_LL_OFFSET) + +/* Transport Layer Offsets (PEG + TL) */ +#define SCU_TLCR_OFFSET 0x0000 +#define SCU_TLADTR_OFFSET 0x0004 +#define SCU_TLTTMR_OFFSET 0x0008 +#define SCU_TLEECR0_OFFSET 0x000C +#define SCU_STPTLDARNI_OFFSET 0x0010 + + +#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_SHIFT (0) +#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_MASK (0x00000001) +#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_SHIFT (1) +#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_MASK (0x00000002) +#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_SHIFT (3) +#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_MASK (0x00000008) +#define SCU_TLCR_CMD_NAK_STATUS_CODE_SHIFT (4) +#define SCU_TLCR_CMD_NAK_STATUS_CODE_MASK (0x00000010) +#define SCU_TLCR_RESERVED_MASK (0xFFFFFFEB) + +#define SCU_TLCR_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_TLCR_ ## name) + +/** + * struct scu_transport_layer_registers - These are the SCU Transport Layer + * registers + * + * + */ +struct scu_transport_layer_registers { + /* 0x0000 TLCR */ + u32 control; + /* 0x0004 TLADTR */ + u32 arbitration_delay_timer; + /* 0x0008 TLTTMR */ + u32 timer_test_mode; + /* 0x000C reserved */ + u32 reserved_0C; + /* 0x0010 STPTLDARNI */ + u32 stp_rni; + /* 0x0014 TLFEWPORCTRL */ + u32 tlfe_wpo_read_control; + /* 0x0018 TLFEWPORDATA */ + u32 tlfe_wpo_read_data; + /* 0x001C RXTLSSCSR1 */ + u32 rxtl_single_step_control_status_1; + /* 0x0020 RXTLSSCSR2 */ + u32 rxtl_single_step_control_status_2; + /* 0x0024 AWTRDDCR */ + u32 tlfe_awt_retry_delay_debug_control; + /* Remainder of TL memory space */ + u32 reserved_0028_007F[0x16]; + +}; + +/* Protocol Engine Group Registers */ +#define SCU_SCUVZECRx_OFFSET 0x1080 + +/* Link Layer Offsets (PEG + TL + LL) */ +#define SCU_SAS_SPDTOV_OFFSET 0x0000 +#define SCU_SAS_LLSTA_OFFSET 0x0004 +#define SCU_SATA_PSELTOV_OFFSET 0x0008 +#define SCU_SAS_TIMETOV_OFFSET 0x0010 +#define SCU_SAS_LOSTOT_OFFSET 0x0014 +#define SCU_SAS_LNKTOV_OFFSET 0x0018 +#define SCU_SAS_PHYTOV_OFFSET 0x001C +#define SCU_SAS_AFERCNT_OFFSET 0x0020 +#define SCU_SAS_WERCNT_OFFSET 0x0024 +#define SCU_SAS_TIID_OFFSET 0x0028 +#define SCU_SAS_TIDNH_OFFSET 0x002C +#define SCU_SAS_TIDNL_OFFSET 0x0030 +#define SCU_SAS_TISSAH_OFFSET 0x0034 +#define SCU_SAS_TISSAL_OFFSET 0x0038 +#define SCU_SAS_TIPID_OFFSET 0x003C +#define SCU_SAS_TIRES2_OFFSET 0x0040 +#define SCU_SAS_ADRSTA_OFFSET 0x0044 +#define SCU_SAS_MAWTTOV_OFFSET 0x0048 +#define SCU_SAS_FRPLDFIL_OFFSET 0x0054 +#define SCU_SAS_RFCNT_OFFSET 0x0060 +#define SCU_SAS_TFCNT_OFFSET 0x0064 +#define SCU_SAS_RFDCNT_OFFSET 0x0068 +#define SCU_SAS_TFDCNT_OFFSET 0x006C +#define SCU_SAS_LERCNT_OFFSET 0x0070 +#define SCU_SAS_RDISERRCNT_OFFSET 0x0074 +#define SCU_SAS_CRERCNT_OFFSET 0x0078 +#define SCU_STPCTL_OFFSET 0x007C +#define SCU_SAS_PCFG_OFFSET 0x0080 +#define SCU_SAS_CLKSM_OFFSET 0x0084 +#define SCU_SAS_TXCOMWAKE_OFFSET 0x0088 +#define SCU_SAS_TXCOMINIT_OFFSET 0x008C +#define SCU_SAS_TXCOMSAS_OFFSET 0x0090 +#define SCU_SAS_COMINIT_OFFSET 0x0094 +#define SCU_SAS_COMWAKE_OFFSET 0x0098 +#define SCU_SAS_COMSAS_OFFSET 0x009C +#define SCU_SAS_SFERCNT_OFFSET 0x00A0 +#define SCU_SAS_CDFERCNT_OFFSET 0x00A4 +#define SCU_SAS_DNFERCNT_OFFSET 0x00A8 +#define SCU_SAS_PRSTERCNT_OFFSET 0x00AC +#define SCU_SAS_CNTCTL_OFFSET 0x00B0 +#define SCU_SAS_SSPTOV_OFFSET 0x00B4 +#define SCU_FTCTL_OFFSET 0x00B8 +#define SCU_FRCTL_OFFSET 0x00BC +#define SCU_FTWMRK_OFFSET 0x00C0 +#define SCU_ENSPINUP_OFFSET 0x00C4 +#define SCU_SAS_TRNTOV_OFFSET 0x00C8 +#define SCU_SAS_PHYCAP_OFFSET 0x00CC +#define SCU_SAS_PHYCTL_OFFSET 0x00D0 +#define SCU_SAS_LLCTL_OFFSET 0x00D8 +#define SCU_AFE_XCVRCR_OFFSET 0x00DC +#define SCU_AFE_LUTCR_OFFSET 0x00E0 + +#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0) +#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003) +#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0) +#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2 (1) +#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3 (2) +#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_SHIFT (2) +#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_MASK (0x000003FC) +#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_SHIFT (16) +#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_MASK (0x00010000) +#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_SHIFT (17) +#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_MASK (0x00020000) +#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_SHIFT (24) +#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_MASK (0xFF000000) +#define SCU_SAS_LINK_LAYER_CONTROL_RESERVED (0x00FCFC00) + +#define SCU_SAS_LLCTL_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_CONTROL_ ## name, value) + +#define SCU_SAS_LLCTL_GEN_BIT(name) \ + SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name) + + +/* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */ +#define SCU_PSZGCR_OFFSET 0x00E4 +#define SCU_SAS_RECPHYCAP_OFFSET 0x00E8 +/* #define SCU_TX_LUTSEL_OFFSET 0x00B8 */ + +#define SCU_SAS_PTxC_OFFSET 0x00D4 /* Same offset as SAS_TCTSTM */ + +/** + * struct scu_link_layer_registers - SCU Link Layer Registers + * + * + */ +struct scu_link_layer_registers { +/* 0x0000 SAS_SPDTOV */ + u32 speed_negotiation_timers; +/* 0x0004 SAS_LLSTA */ + u32 link_layer_status; +/* 0x0008 SATA_PSELTOV */ + u32 port_selector_timeout; + u32 reserved0C; +/* 0x0010 SAS_TIMETOV */ + u32 timeout_unit_value; +/* 0x0014 SAS_RCDTOV */ + u32 rcd_timeout; +/* 0x0018 SAS_LNKTOV */ + u32 link_timer_timeouts; +/* 0x001C SAS_PHYTOV */ + u32 sas_phy_timeouts; +/* 0x0020 SAS_AFERCNT */ + u32 received_address_frame_error_counter; +/* 0x0024 SAS_WERCNT */ + u32 invalid_dword_counter; +/* 0x0028 SAS_TIID */ + u32 transmit_identification; +/* 0x002C SAS_TIDNH */ + u32 sas_device_name_high; +/* 0x0030 SAS_TIDNL */ + u32 sas_device_name_low; +/* 0x0034 SAS_TISSAH */ + u32 source_sas_address_high; +/* 0x0038 SAS_TISSAL */ + u32 source_sas_address_low; +/* 0x003C SAS_TIPID */ + u32 identify_frame_phy_id; +/* 0x0040 SAS_TIRES2 */ + u32 identify_frame_reserved; +/* 0x0044 SAS_ADRSTA */ + u32 received_address_frame; +/* 0x0048 SAS_MAWTTOV */ + u32 maximum_arbitration_wait_timer_timeout; +/* 0x004C SAS_PTxC */ + u32 transmit_primitive; +/* 0x0050 SAS_RORES */ + u32 error_counter_event_notification_control; +/* 0x0054 SAS_FRPLDFIL */ + u32 frxq_payload_fill_threshold; +/* 0x0058 SAS_LLHANG_TOT */ + u32 link_layer_hang_detection_timeout; + u32 reserved_5C; +/* 0x0060 SAS_RFCNT */ + u32 received_frame_count; +/* 0x0064 SAS_TFCNT */ + u32 transmit_frame_count; +/* 0x0068 SAS_RFDCNT */ + u32 received_dword_count; +/* 0x006C SAS_TFDCNT */ + u32 transmit_dword_count; +/* 0x0070 SAS_LERCNT */ + u32 loss_of_sync_error_count; +/* 0x0074 SAS_RDISERRCNT */ + u32 running_disparity_error_count; +/* 0x0078 SAS_CRERCNT */ + u32 received_frame_crc_error_count; +/* 0x007C STPCTL */ + u32 stp_control; +/* 0x0080 SAS_PCFG */ + u32 phy_configuration; +/* 0x0084 SAS_CLKSM */ + u32 clock_skew_management; +/* 0x0088 SAS_TXCOMWAKE */ + u32 transmit_comwake_signal; +/* 0x008C SAS_TXCOMINIT */ + u32 transmit_cominit_signal; +/* 0x0090 SAS_TXCOMSAS */ + u32 transmit_comsas_signal; +/* 0x0094 SAS_COMINIT */ + u32 cominit_control; +/* 0x0098 SAS_COMWAKE */ + u32 comwake_control; +/* 0x009C SAS_COMSAS */ + u32 comsas_control; +/* 0x00A0 SAS_SFERCNT */ + u32 received_short_frame_count; +/* 0x00A4 SAS_CDFERCNT */ + u32 received_frame_without_credit_count; +/* 0x00A8 SAS_DNFERCNT */ + u32 received_frame_after_done_count; +/* 0x00AC SAS_PRSTERCNT */ + u32 phy_reset_problem_count; +/* 0x00B0 SAS_CNTCTL */ + u32 counter_control; +/* 0x00B4 SAS_SSPTOV */ + u32 ssp_timer_timeout_values; +/* 0x00B8 FTCTL */ + u32 ftx_control; +/* 0x00BC FRCTL */ + u32 frx_control; +/* 0x00C0 FTWMRK */ + u32 ftx_watermark; +/* 0x00C4 ENSPINUP */ + u32 notify_enable_spinup_control; +/* 0x00C8 SAS_TRNTOV */ + u32 sas_training_sequence_timer_values; +/* 0x00CC SAS_PHYCAP */ + u32 phy_capabilities; +/* 0x00D0 SAS_PHYCTL */ + u32 phy_control; + u32 reserved_d4; +/* 0x00D8 LLCTL */ + u32 link_layer_control; +/* 0x00DC AFE_XCVRCR */ + u32 afe_xcvr_control; +/* 0x00E0 AFE_LUTCR */ + u32 afe_lookup_table_control; +/* 0x00E4 PSZGCR */ + u32 phy_source_zone_group_control; +/* 0x00E8 SAS_RECPHYCAP */ + u32 receive_phycap; + u32 reserved_ec; +/* 0x00F0 SNAFERXRSTCTL */ + u32 speed_negotiation_afe_rx_reset_control; +/* 0x00F4 SAS_SSIPMCTL */ + u32 power_management_control; +/* 0x00F8 SAS_PSPREQ_PRIM */ + u32 sas_pm_partial_request_primitive; +/* 0x00FC SAS_PSSREQ_PRIM */ + u32 sas_pm_slumber_request_primitive; +/* 0x0100 SAS_PPSACK_PRIM */ + u32 sas_pm_ack_primitive_register; +/* 0x0104 SAS_PSNAK_PRIM */ + u32 sas_pm_nak_primitive_register; +/* 0x0108 SAS_SSIPMTOV */ + u32 sas_primitive_timeout; + u32 reserved_10c; +/* 0x0110 - 0x011C PLAPRDCTRLxREG */ + u32 pla_product_control[4]; +/* 0x0120 PLAPRDSUMREG */ + u32 pla_product_sum; +/* 0x0124 PLACONTROLREG */ + u32 pla_control; +/* Remainder of memory space 896 bytes */ + u32 reserved_0128_037f[0x96]; + +}; + +/* + * 0x00D4 // Same offset as SAS_TCTSTM SAS_PTxC + * u32 primitive_transmit_control; */ + +/* + * ---------------------------------------------------------------------------- + * SGPIO + * ---------------------------------------------------------------------------- */ +#define SCU_SGPIO_OFFSET 0x1400 + +/* #define SCU_SGPIO_OFFSET 0x6000 // later moves to 0x1400 see HSD 652625 */ +#define SCU_SGPIO_SGICR_OFFSET 0x0000 +#define SCU_SGPIO_SGPBR_OFFSET 0x0004 +#define SCU_SGPIO_SGSDLR_OFFSET 0x0008 +#define SCU_SGPIO_SGSDUR_OFFSET 0x000C +#define SCU_SGPIO_SGSIDLR_OFFSET 0x0010 +#define SCU_SGPIO_SGSIDUR_OFFSET 0x0014 +#define SCU_SGPIO_SGVSCR_OFFSET 0x0018 +/* Address from 0x0820 to 0x083C */ +#define SCU_SGPIO_SGODSR_OFFSET 0x0020 + +/** + * struct scu_sgpio_registers - SCU SGPIO Registers + * + * + */ +struct scu_sgpio_registers { +/* 0x0000 SGPIO_SGICR */ + u32 interface_control; +/* 0x0004 SGPIO_SGPBR */ + u32 blink_rate; +/* 0x0008 SGPIO_SGSDLR */ + u32 start_drive_lower; +/* 0x000C SGPIO_SGSDUR */ + u32 start_drive_upper; +/* 0x0010 SGPIO_SGSIDLR */ + u32 serial_input_lower; +/* 0x0014 SGPIO_SGSIDUR */ + u32 serial_input_upper; +/* 0x0018 SGPIO_SGVSCR */ + u32 vendor_specific_code; +/* 0x0020 SGPIO_SGODSR */ + u32 ouput_data_select[8]; +/* Remainder of memory space 256 bytes */ + u32 reserved_1444_14ff[0x31]; + +}; + +/* + * ***************************************************************************** + * * Defines for VIIT entry offsets + * * Access additional entries by SCU_VIIT_BASE + index * 0x10 + * ***************************************************************************** */ +#define SCU_VIIT_BASE 0x1c00 + +struct scu_viit_registers { + u32 registers[256]; +}; + +/* + * ***************************************************************************** + * * SCU PORT TASK SCHEDULER REGISTERS + * ***************************************************************************** */ + +#define SCU_PTSG_BASE 0x1000 + +#define SCU_PTSG_PTSGCR_OFFSET 0x0000 +#define SCU_PTSG_RTCR_OFFSET 0x0004 +#define SCU_PTSG_RTCCR_OFFSET 0x0008 +#define SCU_PTSG_PTS0CR_OFFSET 0x0010 +#define SCU_PTSG_PTS0SR_OFFSET 0x0014 +#define SCU_PTSG_PTS1CR_OFFSET 0x0018 +#define SCU_PTSG_PTS1SR_OFFSET 0x001C +#define SCU_PTSG_PTS2CR_OFFSET 0x0020 +#define SCU_PTSG_PTS2SR_OFFSET 0x0024 +#define SCU_PTSG_PTS3CR_OFFSET 0x0028 +#define SCU_PTSG_PTS3SR_OFFSET 0x002C +#define SCU_PTSG_PCSPE0CR_OFFSET 0x0030 +#define SCU_PTSG_PCSPE1CR_OFFSET 0x0034 +#define SCU_PTSG_PCSPE2CR_OFFSET 0x0038 +#define SCU_PTSG_PCSPE3CR_OFFSET 0x003C +#define SCU_PTSG_ETMTSCCR_OFFSET 0x0040 +#define SCU_PTSG_ETMRNSCCR_OFFSET 0x0044 + +/** + * struct scu_port_task_scheduler_registers - These are the control/stats pairs + * for each Port Task Scheduler. + * + * + */ +struct scu_port_task_scheduler_registers { + u32 control; + u32 status; +}; + +/** + * struct scu_port_task_scheduler_group_registers - These are the PORT Task + * Scheduler registers + * + * + */ +struct scu_port_task_scheduler_group_registers { +/* 0x0000 PTSGCR */ + u32 control; +/* 0x0004 RTCR */ + u32 real_time_clock; +/* 0x0008 RTCCR */ + u32 real_time_clock_control; +/* 0x000C */ + u32 reserved_0C; +/* + * 0x0010 PTS0CR + * 0x0014 PTS0SR + * 0x0018 PTS1CR + * 0x001C PTS1SR + * 0x0020 PTS2CR + * 0x0024 PTS2SR + * 0x0028 PTS3CR + * 0x002C PTS3SR */ + struct scu_port_task_scheduler_registers port[4]; +/* + * 0x0030 PCSPE0CR + * 0x0034 PCSPE1CR + * 0x0038 PCSPE2CR + * 0x003C PCSPE3CR */ + u32 protocol_engine[4]; +/* 0x0040 ETMTSCCR */ + u32 tc_scanning_interval_control; +/* 0x0044 ETMRNSCCR */ + u32 rnc_scanning_interval_control; +/* Remainder of memory space 128 bytes */ + u32 reserved_1048_107f[0x0E]; + +}; + +#define SCU_PTSG_SCUVZECR_OFFSET 0x003C + +/* + * ***************************************************************************** + * * AFE REGISTERS + * ***************************************************************************** */ +#define SCU_AFE_MMR_BASE 0xE000 + +/* + * AFE 0 is at offset 0x0800 + * AFE 1 is at offset 0x0900 + * AFE 2 is at offset 0x0a00 + * AFE 3 is at offset 0x0b00 */ +struct scu_afe_transceiver { + /* 0x0000 AFE_XCVR_CTRL0 */ + u32 afe_xcvr_control0; + /* 0x0004 AFE_XCVR_CTRL1 */ + u32 afe_xcvr_control1; + /* 0x0008 */ + u32 reserved_0008; + /* 0x000c afe_dfx_rx_control0 */ + u32 afe_dfx_rx_control0; + /* 0x0010 AFE_DFX_RX_CTRL1 */ + u32 afe_dfx_rx_control1; + /* 0x0014 */ + u32 reserved_0014; + /* 0x0018 AFE_DFX_RX_STS0 */ + u32 afe_dfx_rx_status0; + /* 0x001c AFE_DFX_RX_STS1 */ + u32 afe_dfx_rx_status1; + /* 0x0020 */ + u32 reserved_0020; + /* 0x0024 AFE_TX_CTRL */ + u32 afe_tx_control; + /* 0x0028 AFE_TX_AMP_CTRL0 */ + u32 afe_tx_amp_control0; + /* 0x002c AFE_TX_AMP_CTRL1 */ + u32 afe_tx_amp_control1; + /* 0x0030 AFE_TX_AMP_CTRL2 */ + u32 afe_tx_amp_control2; + /* 0x0034 AFE_TX_AMP_CTRL3 */ + u32 afe_tx_amp_control3; + /* 0x0038 afe_tx_ssc_control */ + u32 afe_tx_ssc_control; + /* 0x003c */ + u32 reserved_003c; + /* 0x0040 AFE_RX_SSC_CTRL0 */ + u32 afe_rx_ssc_control0; + /* 0x0044 AFE_RX_SSC_CTRL1 */ + u32 afe_rx_ssc_control1; + /* 0x0048 AFE_RX_SSC_CTRL2 */ + u32 afe_rx_ssc_control2; + /* 0x004c AFE_RX_EQ_STS0 */ + u32 afe_rx_eq_status0; + /* 0x0050 AFE_RX_EQ_STS1 */ + u32 afe_rx_eq_status1; + /* 0x0054 AFE_RX_CDR_STS */ + u32 afe_rx_cdr_status; + /* 0x0058 */ + u32 reserved_0058; + /* 0x005c AFE_CHAN_CTRL */ + u32 afe_channel_control; + /* 0x0060-0x006c */ + u32 reserved_0060_006c[0x04]; + /* 0x0070 AFE_XCVR_EC_STS0 */ + u32 afe_xcvr_error_capture_status0; + /* 0x0074 AFE_XCVR_EC_STS1 */ + u32 afe_xcvr_error_capture_status1; + /* 0x0078 AFE_XCVR_EC_STS2 */ + u32 afe_xcvr_error_capture_status2; + /* 0x007c afe_xcvr_ec_status3 */ + u32 afe_xcvr_error_capture_status3; + /* 0x0080 AFE_XCVR_EC_STS4 */ + u32 afe_xcvr_error_capture_status4; + /* 0x0084 AFE_XCVR_EC_STS5 */ + u32 afe_xcvr_error_capture_status5; + /* 0x0088-0x00fc */ + u32 reserved_008c_00fc[0x1e]; +}; + +/** + * struct scu_afe_registers - AFE Regsiters + * + * + */ +/* Uaoa AFE registers */ +struct scu_afe_registers { + /* 0Xe000 AFE_BIAS_CTRL */ + u32 afe_bias_control; + u32 reserved_0004; + /* 0x0008 AFE_PLL_CTRL0 */ + u32 afe_pll_control0; + /* 0x000c AFE_PLL_CTRL1 */ + u32 afe_pll_control1; + /* 0x0010 AFE_PLL_CTRL2 */ + u32 afe_pll_control2; + /* 0x0014 AFE_CB_STS */ + u32 afe_common_block_status; + /* 0x0018-0x007c */ + u32 reserved_18_7c[0x1a]; + /* 0x0080 AFE_PMSN_MCTRL0 */ + u32 afe_pmsn_master_control0; + /* 0x0084 AFE_PMSN_MCTRL1 */ + u32 afe_pmsn_master_control1; + /* 0x0088 AFE_PMSN_MCTRL2 */ + u32 afe_pmsn_master_control2; + /* 0x008C-0x00fc */ + u32 reserved_008c_00fc[0x1D]; + /* 0x0100 AFE_DFX_MST_CTRL0 */ + u32 afe_dfx_master_control0; + /* 0x0104 AFE_DFX_MST_CTRL1 */ + u32 afe_dfx_master_control1; + /* 0x0108 AFE_DFX_DCL_CTRL */ + u32 afe_dfx_dcl_control; + /* 0x010c AFE_DFX_DMON_CTRL */ + u32 afe_dfx_digital_monitor_control; + /* 0x0110 AFE_DFX_AMONP_CTRL */ + u32 afe_dfx_analog_p_monitor_control; + /* 0x0114 AFE_DFX_AMONN_CTRL */ + u32 afe_dfx_analog_n_monitor_control; + /* 0x0118 AFE_DFX_NTL_STS */ + u32 afe_dfx_ntl_status; + /* 0x011c AFE_DFX_FIFO_STS0 */ + u32 afe_dfx_fifo_status0; + /* 0x0120 AFE_DFX_FIFO_STS1 */ + u32 afe_dfx_fifo_status1; + /* 0x0124 AFE_DFX_MPAT_CTRL */ + u32 afe_dfx_master_pattern_control; + /* 0x0128 AFE_DFX_P0_CTRL */ + u32 afe_dfx_p0_control; + /* 0x012c-0x01a8 AFE_DFX_P0_DRx */ + u32 afe_dfx_p0_data[32]; + /* 0x01ac */ + u32 reserved_01ac; + /* 0x01b0-0x020c AFE_DFX_P0_IRx */ + u32 afe_dfx_p0_instruction[24]; + /* 0x0210 */ + u32 reserved_0210; + /* 0x0214 AFE_DFX_P1_CTRL */ + u32 afe_dfx_p1_control; + /* 0x0218-0x245 AFE_DFX_P1_DRx */ + u32 afe_dfx_p1_data[16]; + /* 0x0258-0x029c */ + u32 reserved_0258_029c[0x12]; + /* 0x02a0-0x02bc AFE_DFX_P1_IRx */ + u32 afe_dfx_p1_instruction[8]; + /* 0x02c0-0x2fc */ + u32 reserved_02c0_02fc[0x10]; + /* 0x0300 AFE_DFX_TX_PMSN_CTRL */ + u32 afe_dfx_tx_pmsn_control; + /* 0x0304 AFE_DFX_RX_PMSN_CTRL */ + u32 afe_dfx_rx_pmsn_control; + u32 reserved_0308; + /* 0x030c AFE_DFX_NOA_CTRL0 */ + u32 afe_dfx_noa_control0; + /* 0x0310 AFE_DFX_NOA_CTRL1 */ + u32 afe_dfx_noa_control1; + /* 0x0314 AFE_DFX_NOA_CTRL2 */ + u32 afe_dfx_noa_control2; + /* 0x0318 AFE_DFX_NOA_CTRL3 */ + u32 afe_dfx_noa_control3; + /* 0x031c AFE_DFX_NOA_CTRL4 */ + u32 afe_dfx_noa_control4; + /* 0x0320 AFE_DFX_NOA_CTRL5 */ + u32 afe_dfx_noa_control5; + /* 0x0324 AFE_DFX_NOA_CTRL6 */ + u32 afe_dfx_noa_control6; + /* 0x0328 AFE_DFX_NOA_CTRL7 */ + u32 afe_dfx_noa_control7; + /* 0x032c-0x07fc */ + u32 reserved_032c_07fc[0x135]; + + /* 0x0800-0x0bfc */ + struct scu_afe_transceiver scu_afe_xcvr[4]; + + /* 0x0c00-0x0ffc */ + u32 reserved_0c00_0ffc[0x0100]; +}; + +struct scu_protocol_engine_group_registers { + u32 table[0xE0]; +}; + + +struct scu_viit_iit { + u32 table[256]; +}; + +/** + * Placeholder for the ZONE Partition Table information ZONING will not be + * included in the 1.1 release. + * + * + */ +struct scu_zone_partition_table { + u32 table[2048]; +}; + +/** + * Placeholder for the CRAM register since I am not sure if we need to + * read/write to these registers as yet. + * + * + */ +struct scu_completion_ram { + u32 ram[128]; +}; + +/** + * Placeholder for the FBRAM registers since I am not sure if we need to + * read/write to these registers as yet. + * + * + */ +struct scu_frame_buffer_ram { + u32 ram[128]; +}; + +#define scu_scratch_ram_SIZE_IN_DWORDS 256 + +/** + * Placeholder for the scratch RAM registers. + * + * + */ +struct scu_scratch_ram { + u32 ram[scu_scratch_ram_SIZE_IN_DWORDS]; +}; + +/** + * Placeholder since I am not yet sure what these registers are here for. + * + * + */ +struct noa_protocol_engine_partition { + u32 reserved[64]; +}; + +/** + * Placeholder since I am not yet sure what these registers are here for. + * + * + */ +struct noa_hub_partition { + u32 reserved[64]; +}; + +/** + * Placeholder since I am not yet sure what these registers are here for. + * + * + */ +struct noa_host_interface_partition { + u32 reserved[64]; +}; + +/** + * struct transport_link_layer_pair - The SCU Hardware pairs up the TL + * registers with the LL registers so we must place them adjcent to make the + * array of registers in the PEG. + * + * + */ +struct transport_link_layer_pair { + struct scu_transport_layer_registers tl; + struct scu_link_layer_registers ll; +}; + +/** + * struct scu_peg_registers - SCU Protocol Engine Memory mapped register space. + * These registers are unique to each protocol engine group. There can be + * at most two PEG for a single SCU part. + * + * + */ +struct scu_peg_registers { + struct transport_link_layer_pair pe[4]; + struct scu_port_task_scheduler_group_registers ptsg; + struct scu_protocol_engine_group_registers peg; + struct scu_sgpio_registers sgpio; + u32 reserved_01500_1BFF[0x1C0]; + struct scu_viit_entry viit[64]; + struct scu_zone_partition_table zpt0; + struct scu_zone_partition_table zpt1; +}; + +/** + * struct scu_registers - SCU regsiters including both PEG registers if we turn + * on that compile option. All of these registers are in the memory mapped + * space returned from BAR1. + * + * + */ +struct scu_registers { + /* 0x0000 - PEG 0 */ + struct scu_peg_registers peg0; + + /* 0x6000 - SDMA and Miscellaneous */ + struct scu_sdma_registers sdma; + struct scu_completion_ram cram; + struct scu_frame_buffer_ram fbram; + u32 reserved_6800_69FF[0x80]; + struct noa_protocol_engine_partition noa_pe; + struct noa_hub_partition noa_hub; + struct noa_host_interface_partition noa_if; + u32 reserved_6d00_7fff[0x4c0]; + + /* 0x8000 - PEG 1 */ + struct scu_peg_registers peg1; + + /* 0xE000 - AFE Registers */ + struct scu_afe_registers afe; + + /* 0xF000 - reserved */ + u32 reserved_f000_211fff[0x80c00]; + + /* 0x212000 - scratch RAM */ + struct scu_scratch_ram scratch_ram; +}; + +#endif /* _SCU_REGISTERS_HEADER_ */ diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c new file mode 100644 index 00000000000..b6e6368c266 --- /dev/null +++ b/drivers/scsi/isci/remote_device.c @@ -0,0 +1,1501 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <scsi/sas.h> +#include "isci.h" +#include "port.h" +#include "remote_device.h" +#include "request.h" +#include "remote_node_context.h" +#include "scu_event_codes.h" +#include "task.h" + +/** + * isci_remote_device_not_ready() - This function is called by the ihost when + * the remote device is not ready. We mark the isci device as ready (not + * "ready_for_io") and signal the waiting proccess. + * @isci_host: This parameter specifies the isci host object. + * @isci_device: This parameter specifies the remote device + * + * sci_lock is held on entrance to this function. + */ +static void isci_remote_device_not_ready(struct isci_host *ihost, + struct isci_remote_device *idev, u32 reason) +{ + struct isci_request *ireq; + + dev_dbg(&ihost->pdev->dev, + "%s: isci_device = %p\n", __func__, idev); + + switch (reason) { + case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED: + set_bit(IDEV_GONE, &idev->flags); + break; + case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: + set_bit(IDEV_IO_NCQERROR, &idev->flags); + + /* Kill all outstanding requests for the device. */ + list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) { + + dev_dbg(&ihost->pdev->dev, + "%s: isci_device = %p request = %p\n", + __func__, idev, ireq); + + sci_controller_terminate_request(ihost, + idev, + ireq); + } + /* Fall through into the default case... */ + default: + clear_bit(IDEV_IO_READY, &idev->flags); + break; + } +} + +/** + * isci_remote_device_ready() - This function is called by the ihost when the + * remote device is ready. We mark the isci device as ready and signal the + * waiting proccess. + * @ihost: our valid isci_host + * @idev: remote device + * + */ +static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev) +{ + dev_dbg(&ihost->pdev->dev, + "%s: idev = %p\n", __func__, idev); + + clear_bit(IDEV_IO_NCQERROR, &idev->flags); + set_bit(IDEV_IO_READY, &idev->flags); + if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags)) + wake_up(&ihost->eventq); +} + +/* called once the remote node context is ready to be freed. + * The remote device can now report that its stop operation is complete. none + */ +static void rnc_destruct_done(void *_dev) +{ + struct isci_remote_device *idev = _dev; + + BUG_ON(idev->started_request_count != 0); + sci_change_state(&idev->sm, SCI_DEV_STOPPED); +} + +static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev) +{ + struct isci_host *ihost = idev->owning_port->owning_controller; + enum sci_status status = SCI_SUCCESS; + u32 i; + + for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { + struct isci_request *ireq = ihost->reqs[i]; + enum sci_status s; + + if (!test_bit(IREQ_ACTIVE, &ireq->flags) || + ireq->target_device != idev) + continue; + + s = sci_controller_terminate_request(ihost, idev, ireq); + if (s != SCI_SUCCESS) + status = s; + } + + return status; +} + +enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, + u32 timeout) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + + switch (state) { + case SCI_DEV_INITIAL: + case SCI_DEV_FAILED: + case SCI_DEV_FINAL: + default: + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", + __func__, state); + return SCI_FAILURE_INVALID_STATE; + case SCI_DEV_STOPPED: + return SCI_SUCCESS; + case SCI_DEV_STARTING: + /* device not started so there had better be no requests */ + BUG_ON(idev->started_request_count != 0); + sci_remote_node_context_destruct(&idev->rnc, + rnc_destruct_done, idev); + /* Transition to the stopping state and wait for the + * remote node to complete being posted and invalidated. + */ + sci_change_state(sm, SCI_DEV_STOPPING); + return SCI_SUCCESS; + case SCI_DEV_READY: + case SCI_STP_DEV_IDLE: + case SCI_STP_DEV_CMD: + case SCI_STP_DEV_NCQ: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_AWAIT_RESET: + case SCI_SMP_DEV_IDLE: + case SCI_SMP_DEV_CMD: + sci_change_state(sm, SCI_DEV_STOPPING); + if (idev->started_request_count == 0) { + sci_remote_node_context_destruct(&idev->rnc, + rnc_destruct_done, idev); + return SCI_SUCCESS; + } else + return sci_remote_device_terminate_requests(idev); + break; + case SCI_DEV_STOPPING: + /* All requests should have been terminated, but if there is an + * attempt to stop a device already in the stopping state, then + * try again to terminate. + */ + return sci_remote_device_terminate_requests(idev); + case SCI_DEV_RESETTING: + sci_change_state(sm, SCI_DEV_STOPPING); + return SCI_SUCCESS; + } +} + +enum sci_status sci_remote_device_reset(struct isci_remote_device *idev) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + + switch (state) { + case SCI_DEV_INITIAL: + case SCI_DEV_STOPPED: + case SCI_DEV_STARTING: + case SCI_SMP_DEV_IDLE: + case SCI_SMP_DEV_CMD: + case SCI_DEV_STOPPING: + case SCI_DEV_FAILED: + case SCI_DEV_RESETTING: + case SCI_DEV_FINAL: + default: + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", + __func__, state); + return SCI_FAILURE_INVALID_STATE; + case SCI_DEV_READY: + case SCI_STP_DEV_IDLE: + case SCI_STP_DEV_CMD: + case SCI_STP_DEV_NCQ: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_AWAIT_RESET: + sci_change_state(sm, SCI_DEV_RESETTING); + return SCI_SUCCESS; + } +} + +enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + + if (state != SCI_DEV_RESETTING) { + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", + __func__, state); + return SCI_FAILURE_INVALID_STATE; + } + + sci_change_state(sm, SCI_DEV_READY); + return SCI_SUCCESS; +} + +enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, + u32 suspend_type) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + + if (state != SCI_STP_DEV_CMD) { + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", + __func__, state); + return SCI_FAILURE_INVALID_STATE; + } + + return sci_remote_node_context_suspend(&idev->rnc, + suspend_type, NULL, NULL); +} + +enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, + u32 frame_index) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + struct isci_host *ihost = idev->owning_port->owning_controller; + enum sci_status status; + + switch (state) { + case SCI_DEV_INITIAL: + case SCI_DEV_STOPPED: + case SCI_DEV_STARTING: + case SCI_STP_DEV_IDLE: + case SCI_SMP_DEV_IDLE: + case SCI_DEV_FINAL: + default: + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", + __func__, state); + /* Return the frame back to the controller */ + sci_controller_release_frame(ihost, frame_index); + return SCI_FAILURE_INVALID_STATE; + case SCI_DEV_READY: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_AWAIT_RESET: + case SCI_DEV_STOPPING: + case SCI_DEV_FAILED: + case SCI_DEV_RESETTING: { + struct isci_request *ireq; + struct ssp_frame_hdr hdr; + void *frame_header; + ssize_t word_cnt; + + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + &frame_header); + if (status != SCI_SUCCESS) + return status; + + word_cnt = sizeof(hdr) / sizeof(u32); + sci_swab32_cpy(&hdr, frame_header, word_cnt); + + ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag)); + if (ireq && ireq->target_device == idev) { + /* The IO request is now in charge of releasing the frame */ + status = sci_io_request_frame_handler(ireq, frame_index); + } else { + /* We could not map this tag to a valid IO + * request Just toss the frame and continue + */ + sci_controller_release_frame(ihost, frame_index); + } + break; + } + case SCI_STP_DEV_NCQ: { + struct dev_to_host_fis *hdr; + + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&hdr); + if (status != SCI_SUCCESS) + return status; + + if (hdr->fis_type == FIS_SETDEVBITS && + (hdr->status & ATA_ERR)) { + idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; + + /* TODO Check sactive and complete associated IO if any. */ + sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR); + } else if (hdr->fis_type == FIS_REGD2H && + (hdr->status & ATA_ERR)) { + /* + * Some devices return D2H FIS when an NCQ error is detected. + * Treat this like an SDB error FIS ready reason. + */ + idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; + sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR); + } else + status = SCI_FAILURE; + + sci_controller_release_frame(ihost, frame_index); + break; + } + case SCI_STP_DEV_CMD: + case SCI_SMP_DEV_CMD: + /* The device does not process any UF received from the hardware while + * in this state. All unsolicited frames are forwarded to the io request + * object. + */ + status = sci_io_request_frame_handler(idev->working_request, frame_index); + break; + } + + return status; +} + +static bool is_remote_device_ready(struct isci_remote_device *idev) +{ + + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + + switch (state) { + case SCI_DEV_READY: + case SCI_STP_DEV_IDLE: + case SCI_STP_DEV_CMD: + case SCI_STP_DEV_NCQ: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_AWAIT_RESET: + case SCI_SMP_DEV_IDLE: + case SCI_SMP_DEV_CMD: + return true; + default: + return false; + } +} + +enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, + u32 event_code) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + enum sci_status status; + + switch (scu_get_event_type(event_code)) { + case SCU_EVENT_TYPE_RNC_OPS_MISC: + case SCU_EVENT_TYPE_RNC_SUSPEND_TX: + case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: + status = sci_remote_node_context_event_handler(&idev->rnc, event_code); + break; + case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: + if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) { + status = SCI_SUCCESS; + + /* Suspend the associated RNC */ + sci_remote_node_context_suspend(&idev->rnc, + SCI_SOFTWARE_SUSPENSION, + NULL, NULL); + + dev_dbg(scirdev_to_dev(idev), + "%s: device: %p event code: %x: %s\n", + __func__, idev, event_code, + is_remote_device_ready(idev) + ? "I_T_Nexus_Timeout event" + : "I_T_Nexus_Timeout event in wrong state"); + + break; + } + /* Else, fall through and treat as unhandled... */ + default: + dev_dbg(scirdev_to_dev(idev), + "%s: device: %p event code: %x: %s\n", + __func__, idev, event_code, + is_remote_device_ready(idev) + ? "unexpected event" + : "unexpected event in wrong state"); + status = SCI_FAILURE_INVALID_STATE; + break; + } + + if (status != SCI_SUCCESS) + return status; + + if (state == SCI_STP_DEV_IDLE) { + + /* We pick up suspension events to handle specifically to this + * state. We resume the RNC right away. + */ + if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || + scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) + status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL); + } + + return status; +} + +static void sci_remote_device_start_request(struct isci_remote_device *idev, + struct isci_request *ireq, + enum sci_status status) +{ + struct isci_port *iport = idev->owning_port; + + /* cleanup requests that failed after starting on the port */ + if (status != SCI_SUCCESS) + sci_port_complete_io(iport, idev, ireq); + else { + kref_get(&idev->kref); + idev->started_request_count++; + } +} + +enum sci_status sci_remote_device_start_io(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + struct isci_port *iport = idev->owning_port; + enum sci_status status; + + switch (state) { + case SCI_DEV_INITIAL: + case SCI_DEV_STOPPED: + case SCI_DEV_STARTING: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_DEV_STOPPING: + case SCI_DEV_FAILED: + case SCI_DEV_RESETTING: + case SCI_DEV_FINAL: + default: + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", + __func__, state); + return SCI_FAILURE_INVALID_STATE; + case SCI_DEV_READY: + /* attempt to start an io request for this device object. The remote + * device object will issue the start request for the io and if + * successful it will start the request for the port object then + * increment its own request count. + */ + status = sci_port_start_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + status = sci_remote_node_context_start_io(&idev->rnc, ireq); + if (status != SCI_SUCCESS) + break; + + status = sci_request_start(ireq); + break; + case SCI_STP_DEV_IDLE: { + /* handle the start io operation for a sata device that is in + * the command idle state. - Evalute the type of IO request to + * be started - If its an NCQ request change to NCQ substate - + * If its any other command change to the CMD substate + * + * If this is a softreset we may want to have a different + * substate. + */ + enum sci_remote_device_states new_state; + struct sas_task *task = isci_request_access_task(ireq); + + status = sci_port_start_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + status = sci_remote_node_context_start_io(&idev->rnc, ireq); + if (status != SCI_SUCCESS) + break; + + status = sci_request_start(ireq); + if (status != SCI_SUCCESS) + break; + + if (task->ata_task.use_ncq) + new_state = SCI_STP_DEV_NCQ; + else { + idev->working_request = ireq; + new_state = SCI_STP_DEV_CMD; + } + sci_change_state(sm, new_state); + break; + } + case SCI_STP_DEV_NCQ: { + struct sas_task *task = isci_request_access_task(ireq); + + if (task->ata_task.use_ncq) { + status = sci_port_start_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + status = sci_remote_node_context_start_io(&idev->rnc, ireq); + if (status != SCI_SUCCESS) + break; + + status = sci_request_start(ireq); + } else + return SCI_FAILURE_INVALID_STATE; + break; + } + case SCI_STP_DEV_AWAIT_RESET: + return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; + case SCI_SMP_DEV_IDLE: + status = sci_port_start_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + status = sci_remote_node_context_start_io(&idev->rnc, ireq); + if (status != SCI_SUCCESS) + break; + + status = sci_request_start(ireq); + if (status != SCI_SUCCESS) + break; + + idev->working_request = ireq; + sci_change_state(&idev->sm, SCI_SMP_DEV_CMD); + break; + case SCI_STP_DEV_CMD: + case SCI_SMP_DEV_CMD: + /* device is already handling a command it can not accept new commands + * until this one is complete. + */ + return SCI_FAILURE_INVALID_STATE; + } + + sci_remote_device_start_request(idev, ireq, status); + return status; +} + +static enum sci_status common_complete_io(struct isci_port *iport, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + enum sci_status status; + + status = sci_request_complete(ireq); + if (status != SCI_SUCCESS) + return status; + + status = sci_port_complete_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + sci_remote_device_decrement_request_count(idev); + return status; +} + +enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + struct isci_port *iport = idev->owning_port; + enum sci_status status; + + switch (state) { + case SCI_DEV_INITIAL: + case SCI_DEV_STOPPED: + case SCI_DEV_STARTING: + case SCI_STP_DEV_IDLE: + case SCI_SMP_DEV_IDLE: + case SCI_DEV_FAILED: + case SCI_DEV_FINAL: + default: + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", + __func__, state); + return SCI_FAILURE_INVALID_STATE; + case SCI_DEV_READY: + case SCI_STP_DEV_AWAIT_RESET: + case SCI_DEV_RESETTING: + status = common_complete_io(iport, idev, ireq); + break; + case SCI_STP_DEV_CMD: + case SCI_STP_DEV_NCQ: + case SCI_STP_DEV_NCQ_ERROR: + status = common_complete_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + break; + + if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { + /* This request causes hardware error, device needs to be Lun Reset. + * So here we force the state machine to IDLE state so the rest IOs + * can reach RNC state handler, these IOs will be completed by RNC with + * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". + */ + sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); + } else if (idev->started_request_count == 0) + sci_change_state(sm, SCI_STP_DEV_IDLE); + break; + case SCI_SMP_DEV_CMD: + status = common_complete_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + break; + sci_change_state(sm, SCI_SMP_DEV_IDLE); + break; + case SCI_DEV_STOPPING: + status = common_complete_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + break; + + if (idev->started_request_count == 0) + sci_remote_node_context_destruct(&idev->rnc, + rnc_destruct_done, + idev); + break; + } + + if (status != SCI_SUCCESS) + dev_err(scirdev_to_dev(idev), + "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x " + "could not complete\n", __func__, iport, + idev, ireq, status); + else + isci_put_device(idev); + + return status; +} + +static void sci_remote_device_continue_request(void *dev) +{ + struct isci_remote_device *idev = dev; + + /* we need to check if this request is still valid to continue. */ + if (idev->working_request) + sci_controller_continue_io(idev->working_request); +} + +enum sci_status sci_remote_device_start_task(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + struct isci_port *iport = idev->owning_port; + enum sci_status status; + + switch (state) { + case SCI_DEV_INITIAL: + case SCI_DEV_STOPPED: + case SCI_DEV_STARTING: + case SCI_SMP_DEV_IDLE: + case SCI_SMP_DEV_CMD: + case SCI_DEV_STOPPING: + case SCI_DEV_FAILED: + case SCI_DEV_RESETTING: + case SCI_DEV_FINAL: + default: + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", + __func__, state); + return SCI_FAILURE_INVALID_STATE; + case SCI_STP_DEV_IDLE: + case SCI_STP_DEV_CMD: + case SCI_STP_DEV_NCQ: + case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_AWAIT_RESET: + status = sci_port_start_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + status = sci_remote_node_context_start_task(&idev->rnc, ireq); + if (status != SCI_SUCCESS) + goto out; + + status = sci_request_start(ireq); + if (status != SCI_SUCCESS) + goto out; + + /* Note: If the remote device state is not IDLE this will + * replace the request that probably resulted in the task + * management request. + */ + idev->working_request = ireq; + sci_change_state(sm, SCI_STP_DEV_CMD); + + /* The remote node context must cleanup the TCi to NCQ mapping + * table. The only way to do this correctly is to either write + * to the TLCR register or to invalidate and repost the RNC. In + * either case the remote node context state machine will take + * the correct action when the remote node context is suspended + * and later resumed. + */ + sci_remote_node_context_suspend(&idev->rnc, + SCI_SOFTWARE_SUSPENSION, NULL, NULL); + sci_remote_node_context_resume(&idev->rnc, + sci_remote_device_continue_request, + idev); + + out: + sci_remote_device_start_request(idev, ireq, status); + /* We need to let the controller start request handler know that + * it can't post TC yet. We will provide a callback function to + * post TC when RNC gets resumed. + */ + return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; + case SCI_DEV_READY: + status = sci_port_start_io(iport, idev, ireq); + if (status != SCI_SUCCESS) + return status; + + status = sci_remote_node_context_start_task(&idev->rnc, ireq); + if (status != SCI_SUCCESS) + break; + + status = sci_request_start(ireq); + break; + } + sci_remote_device_start_request(idev, ireq, status); + + return status; +} + +void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request) +{ + struct isci_port *iport = idev->owning_port; + u32 context; + + context = request | + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + idev->rnc.remote_node_index; + + sci_controller_post_request(iport->owning_controller, context); +} + +/* called once the remote node context has transisitioned to a + * ready state. This is the indication that the remote device object can also + * transition to ready. + */ +static void remote_device_resume_done(void *_dev) +{ + struct isci_remote_device *idev = _dev; + + if (is_remote_device_ready(idev)) + return; + + /* go 'ready' if we are not already in a ready state */ + sci_change_state(&idev->sm, SCI_DEV_READY); +} + +static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) +{ + struct isci_remote_device *idev = _dev; + struct isci_host *ihost = idev->owning_port->owning_controller; + + /* For NCQ operation we do not issue a isci_remote_device_not_ready(). + * As a result, avoid sending the ready notification. + */ + if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ) + isci_remote_device_ready(ihost, idev); +} + +static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + + /* Initial state is a transitional state to the stopped state */ + sci_change_state(&idev->sm, SCI_DEV_STOPPED); +} + +/** + * sci_remote_device_destruct() - free remote node context and destruct + * @remote_device: This parameter specifies the remote device to be destructed. + * + * Remote device objects are a limited resource. As such, they must be + * protected. Thus calls to construct and destruct are mutually exclusive and + * non-reentrant. The return value shall indicate if the device was + * successfully destructed or if some failure occurred. enum sci_status This value + * is returned if the device is successfully destructed. + * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied + * device isn't valid (e.g. it's already been destoryed, the handle isn't + * valid, etc.). + */ +static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + struct isci_host *ihost; + + if (state != SCI_DEV_STOPPED) { + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", + __func__, state); + return SCI_FAILURE_INVALID_STATE; + } + + ihost = idev->owning_port->owning_controller; + sci_controller_free_remote_node_context(ihost, idev, + idev->rnc.remote_node_index); + idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; + sci_change_state(sm, SCI_DEV_FINAL); + + return SCI_SUCCESS; +} + +/** + * isci_remote_device_deconstruct() - This function frees an isci_remote_device. + * @ihost: This parameter specifies the isci host object. + * @idev: This parameter specifies the remote device to be freed. + * + */ +static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev) +{ + dev_dbg(&ihost->pdev->dev, + "%s: isci_device = %p\n", __func__, idev); + + /* There should not be any outstanding io's. All paths to + * here should go through isci_remote_device_nuke_requests. + * If we hit this condition, we will need a way to complete + * io requests in process */ + BUG_ON(!list_empty(&idev->reqs_in_process)); + + sci_remote_device_destruct(idev); + list_del_init(&idev->node); + isci_put_device(idev); +} + +static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + u32 prev_state; + + /* If we are entering from the stopping state let the SCI User know that + * the stop operation has completed. + */ + prev_state = idev->sm.previous_state_id; + if (prev_state == SCI_DEV_STOPPING) + isci_remote_device_deconstruct(ihost, idev); + + sci_controller_remote_device_stopped(ihost, idev); +} + +static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + + isci_remote_device_not_ready(ihost, idev, + SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); +} + +static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + struct domain_device *dev = idev->domain_dev; + + if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { + sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); + } else if (dev_is_expander(dev)) { + sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); + } else + isci_remote_device_ready(ihost, idev); +} + +static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct domain_device *dev = idev->domain_dev; + + if (dev->dev_type == SAS_END_DEV) { + struct isci_host *ihost = idev->owning_port->owning_controller; + + isci_remote_device_not_ready(ihost, idev, + SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED); + } +} + +static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + + sci_remote_node_context_suspend( + &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); +} + +static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + + sci_remote_node_context_resume(&idev->rnc, NULL, NULL); +} + +static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + + idev->working_request = NULL; + if (sci_remote_node_context_is_ready(&idev->rnc)) { + /* + * Since the RNC is ready, it's alright to finish completion + * processing (e.g. signal the remote device is ready). */ + sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); + } else { + sci_remote_node_context_resume(&idev->rnc, + sci_stp_remote_device_ready_idle_substate_resume_complete_handler, + idev); + } +} + +static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + + BUG_ON(idev->working_request == NULL); + + isci_remote_device_not_ready(ihost, idev, + SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); +} + +static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + + if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) + isci_remote_device_not_ready(ihost, idev, + idev->not_ready_reason); +} + +static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + + isci_remote_device_ready(ihost, idev); +} + +static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + + BUG_ON(idev->working_request == NULL); + + isci_remote_device_not_ready(ihost, idev, + SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); +} + +static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) +{ + struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + + idev->working_request = NULL; +} + +static const struct sci_base_state sci_remote_device_state_table[] = { + [SCI_DEV_INITIAL] = { + .enter_state = sci_remote_device_initial_state_enter, + }, + [SCI_DEV_STOPPED] = { + .enter_state = sci_remote_device_stopped_state_enter, + }, + [SCI_DEV_STARTING] = { + .enter_state = sci_remote_device_starting_state_enter, + }, + [SCI_DEV_READY] = { + .enter_state = sci_remote_device_ready_state_enter, + .exit_state = sci_remote_device_ready_state_exit + }, + [SCI_STP_DEV_IDLE] = { + .enter_state = sci_stp_remote_device_ready_idle_substate_enter, + }, + [SCI_STP_DEV_CMD] = { + .enter_state = sci_stp_remote_device_ready_cmd_substate_enter, + }, + [SCI_STP_DEV_NCQ] = { }, + [SCI_STP_DEV_NCQ_ERROR] = { + .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, + }, + [SCI_STP_DEV_AWAIT_RESET] = { }, + [SCI_SMP_DEV_IDLE] = { + .enter_state = sci_smp_remote_device_ready_idle_substate_enter, + }, + [SCI_SMP_DEV_CMD] = { + .enter_state = sci_smp_remote_device_ready_cmd_substate_enter, + .exit_state = sci_smp_remote_device_ready_cmd_substate_exit, + }, + [SCI_DEV_STOPPING] = { }, + [SCI_DEV_FAILED] = { }, + [SCI_DEV_RESETTING] = { + .enter_state = sci_remote_device_resetting_state_enter, + .exit_state = sci_remote_device_resetting_state_exit + }, + [SCI_DEV_FINAL] = { }, +}; + +/** + * sci_remote_device_construct() - common construction + * @sci_port: SAS/SATA port through which this device is accessed. + * @sci_dev: remote device to construct + * + * This routine just performs benign initialization and does not + * allocate the remote_node_context which is left to + * sci_remote_device_[de]a_construct(). sci_remote_device_destruct() + * frees the remote_node_context(s) for the device. + */ +static void sci_remote_device_construct(struct isci_port *iport, + struct isci_remote_device *idev) +{ + idev->owning_port = iport; + idev->started_request_count = 0; + + sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL); + + sci_remote_node_context_construct(&idev->rnc, + SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); +} + +/** + * sci_remote_device_da_construct() - construct direct attached device. + * + * The information (e.g. IAF, Signature FIS, etc.) necessary to build + * the device is known to the SCI Core since it is contained in the + * sci_phy object. Remote node context(s) is/are a global resource + * allocated by this routine, freed by sci_remote_device_destruct(). + * + * Returns: + * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. + * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to + * sata-only controller instance. + * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. + */ +static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, + struct isci_remote_device *idev) +{ + enum sci_status status; + struct domain_device *dev = idev->domain_dev; + + sci_remote_device_construct(iport, idev); + + /* + * This information is request to determine how many remote node context + * entries will be needed to store the remote node. + */ + idev->is_direct_attached = true; + status = sci_controller_allocate_remote_node_context(iport->owning_controller, + idev, + &idev->rnc.remote_node_index); + + if (status != SCI_SUCCESS) + return status; + + if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || + (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev)) + /* pass */; + else + return SCI_FAILURE_UNSUPPORTED_PROTOCOL; + + idev->connection_rate = sci_port_get_max_allowed_speed(iport); + + /* / @todo Should I assign the port width by reading all of the phys on the port? */ + idev->device_port_width = 1; + + return SCI_SUCCESS; +} + +/** + * sci_remote_device_ea_construct() - construct expander attached device + * + * Remote node context(s) is/are a global resource allocated by this + * routine, freed by sci_remote_device_destruct(). + * + * Returns: + * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. + * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to + * sata-only controller instance. + * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. + */ +static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, + struct isci_remote_device *idev) +{ + struct domain_device *dev = idev->domain_dev; + enum sci_status status; + + sci_remote_device_construct(iport, idev); + + status = sci_controller_allocate_remote_node_context(iport->owning_controller, + idev, + &idev->rnc.remote_node_index); + if (status != SCI_SUCCESS) + return status; + + if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || + (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev)) + /* pass */; + else + return SCI_FAILURE_UNSUPPORTED_PROTOCOL; + + /* + * For SAS-2 the physical link rate is actually a logical link + * rate that incorporates multiplexing. The SCU doesn't + * incorporate multiplexing and for the purposes of the + * connection the logical link rate is that same as the + * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay + * one another, so this code works for both situations. */ + idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), + dev->linkrate); + + /* / @todo Should I assign the port width by reading all of the phys on the port? */ + idev->device_port_width = 1; + + return SCI_SUCCESS; +} + +/** + * sci_remote_device_start() - This method will start the supplied remote + * device. This method enables normal IO requests to flow through to the + * remote device. + * @remote_device: This parameter specifies the device to be started. + * @timeout: This parameter specifies the number of milliseconds in which the + * start operation should complete. + * + * An indication of whether the device was successfully started. SCI_SUCCESS + * This value is returned if the device was successfully started. + * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start + * the device when there have been no phys added to it. + */ +static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, + u32 timeout) +{ + struct sci_base_state_machine *sm = &idev->sm; + enum sci_remote_device_states state = sm->current_state_id; + enum sci_status status; + + if (state != SCI_DEV_STOPPED) { + dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", + __func__, state); + return SCI_FAILURE_INVALID_STATE; + } + + status = sci_remote_node_context_resume(&idev->rnc, + remote_device_resume_done, + idev); + if (status != SCI_SUCCESS) + return status; + + sci_change_state(sm, SCI_DEV_STARTING); + + return SCI_SUCCESS; +} + +static enum sci_status isci_remote_device_construct(struct isci_port *iport, + struct isci_remote_device *idev) +{ + struct isci_host *ihost = iport->isci_host; + struct domain_device *dev = idev->domain_dev; + enum sci_status status; + + if (dev->parent && dev_is_expander(dev->parent)) + status = sci_remote_device_ea_construct(iport, idev); + else + status = sci_remote_device_da_construct(iport, idev); + + if (status != SCI_SUCCESS) { + dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n", + __func__, status); + + return status; + } + + /* start the device. */ + status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); + + if (status != SCI_SUCCESS) + dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n", + status); + + return status; +} + +void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev) +{ + DECLARE_COMPLETION_ONSTACK(aborted_task_completion); + + dev_dbg(&ihost->pdev->dev, + "%s: idev = %p\n", __func__, idev); + + /* Cleanup all requests pending for this device. */ + isci_terminate_pending_requests(ihost, idev); + + dev_dbg(&ihost->pdev->dev, + "%s: idev = %p, done\n", __func__, idev); +} + +/** + * This function builds the isci_remote_device when a libsas dev_found message + * is received. + * @isci_host: This parameter specifies the isci host object. + * @port: This parameter specifies the isci_port conected to this device. + * + * pointer to new isci_remote_device. + */ +static struct isci_remote_device * +isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport) +{ + struct isci_remote_device *idev; + int i; + + for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { + idev = &ihost->devices[i]; + if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags)) + break; + } + + if (i >= SCI_MAX_REMOTE_DEVICES) { + dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); + return NULL; + } + + if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n")) + return NULL; + + if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) + return NULL; + + return idev; +} + +void isci_remote_device_release(struct kref *kref) +{ + struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref); + struct isci_host *ihost = idev->isci_port->isci_host; + + idev->domain_dev = NULL; + idev->isci_port = NULL; + clear_bit(IDEV_START_PENDING, &idev->flags); + clear_bit(IDEV_STOP_PENDING, &idev->flags); + clear_bit(IDEV_IO_READY, &idev->flags); + clear_bit(IDEV_GONE, &idev->flags); + clear_bit(IDEV_EH, &idev->flags); + smp_mb__before_clear_bit(); + clear_bit(IDEV_ALLOCATED, &idev->flags); + wake_up(&ihost->eventq); +} + +/** + * isci_remote_device_stop() - This function is called internally to stop the + * remote device. + * @isci_host: This parameter specifies the isci host object. + * @isci_device: This parameter specifies the remote device. + * + * The status of the ihost request to stop. + */ +enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev) +{ + enum sci_status status; + unsigned long flags; + + dev_dbg(&ihost->pdev->dev, + "%s: isci_device = %p\n", __func__, idev); + + spin_lock_irqsave(&ihost->scic_lock, flags); + idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ + set_bit(IDEV_GONE, &idev->flags); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + /* Kill all outstanding requests. */ + isci_remote_device_nuke_requests(ihost, idev); + + set_bit(IDEV_STOP_PENDING, &idev->flags); + + spin_lock_irqsave(&ihost->scic_lock, flags); + status = sci_remote_device_stop(idev, 50); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + /* Wait for the stop complete callback. */ + if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n")) + /* nothing to wait for */; + else + wait_for_device_stop(ihost, idev); + + return status; +} + +/** + * isci_remote_device_gone() - This function is called by libsas when a domain + * device is removed. + * @domain_device: This parameter specifies the libsas domain device. + * + */ +void isci_remote_device_gone(struct domain_device *dev) +{ + struct isci_host *ihost = dev_to_ihost(dev); + struct isci_remote_device *idev = dev->lldd_dev; + + dev_dbg(&ihost->pdev->dev, + "%s: domain_device = %p, isci_device = %p, isci_port = %p\n", + __func__, dev, idev, idev->isci_port); + + isci_remote_device_stop(ihost, idev); +} + + +/** + * isci_remote_device_found() - This function is called by libsas when a remote + * device is discovered. A remote device object is created and started. the + * function then sleeps until the sci core device started message is + * received. + * @domain_device: This parameter specifies the libsas domain device. + * + * status, zero indicates success. + */ +int isci_remote_device_found(struct domain_device *domain_dev) +{ + struct isci_host *isci_host = dev_to_ihost(domain_dev); + struct isci_port *isci_port; + struct isci_phy *isci_phy; + struct asd_sas_port *sas_port; + struct asd_sas_phy *sas_phy; + struct isci_remote_device *isci_device; + enum sci_status status; + + dev_dbg(&isci_host->pdev->dev, + "%s: domain_device = %p\n", __func__, domain_dev); + + wait_for_start(isci_host); + + sas_port = domain_dev->port; + sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy, + port_phy_el); + isci_phy = to_iphy(sas_phy); + isci_port = isci_phy->isci_port; + + /* we are being called for a device on this port, + * so it has to come up eventually + */ + wait_for_completion(&isci_port->start_complete); + + if ((isci_stopping == isci_port_get_state(isci_port)) || + (isci_stopped == isci_port_get_state(isci_port))) + return -ENODEV; + + isci_device = isci_remote_device_alloc(isci_host, isci_port); + if (!isci_device) + return -ENODEV; + + kref_init(&isci_device->kref); + INIT_LIST_HEAD(&isci_device->node); + + spin_lock_irq(&isci_host->scic_lock); + isci_device->domain_dev = domain_dev; + isci_device->isci_port = isci_port; + list_add_tail(&isci_device->node, &isci_port->remote_dev_list); + + set_bit(IDEV_START_PENDING, &isci_device->flags); + status = isci_remote_device_construct(isci_port, isci_device); + + dev_dbg(&isci_host->pdev->dev, + "%s: isci_device = %p\n", + __func__, isci_device); + + if (status == SCI_SUCCESS) { + /* device came up, advertise it to the world */ + domain_dev->lldd_dev = isci_device; + } else + isci_put_device(isci_device); + spin_unlock_irq(&isci_host->scic_lock); + + /* wait for the device ready callback. */ + wait_for_device_start(isci_host, isci_device); + + return status == SCI_SUCCESS ? 0 : -ENODEV; +} +/** + * isci_device_is_reset_pending() - This function will check if there is any + * pending reset condition on the device. + * @request: This parameter is the isci_device object. + * + * true if there is a reset pending for the device. + */ +bool isci_device_is_reset_pending( + struct isci_host *isci_host, + struct isci_remote_device *isci_device) +{ + struct isci_request *isci_request; + struct isci_request *tmp_req; + bool reset_is_pending = false; + unsigned long flags; + + dev_dbg(&isci_host->pdev->dev, + "%s: isci_device = %p\n", __func__, isci_device); + + spin_lock_irqsave(&isci_host->scic_lock, flags); + + /* Check for reset on all pending requests. */ + list_for_each_entry_safe(isci_request, tmp_req, + &isci_device->reqs_in_process, dev_node) { + dev_dbg(&isci_host->pdev->dev, + "%s: isci_device = %p request = %p\n", + __func__, isci_device, isci_request); + + if (isci_request->ttype == io_task) { + struct sas_task *task = isci_request_access_task( + isci_request); + + spin_lock(&task->task_state_lock); + if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) + reset_is_pending = true; + spin_unlock(&task->task_state_lock); + } + } + + spin_unlock_irqrestore(&isci_host->scic_lock, flags); + + dev_dbg(&isci_host->pdev->dev, + "%s: isci_device = %p reset_is_pending = %d\n", + __func__, isci_device, reset_is_pending); + + return reset_is_pending; +} + +/** + * isci_device_clear_reset_pending() - This function will clear if any pending + * reset condition flags on the device. + * @request: This parameter is the isci_device object. + * + * true if there is a reset pending for the device. + */ +void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev) +{ + struct isci_request *isci_request; + struct isci_request *tmp_req; + unsigned long flags = 0; + + dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n", + __func__, idev, ihost); + + spin_lock_irqsave(&ihost->scic_lock, flags); + + /* Clear reset pending on all pending requests. */ + list_for_each_entry_safe(isci_request, tmp_req, + &idev->reqs_in_process, dev_node) { + dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n", + __func__, idev, isci_request); + + if (isci_request->ttype == io_task) { + + unsigned long flags2; + struct sas_task *task = isci_request_access_task( + isci_request); + + spin_lock_irqsave(&task->task_state_lock, flags2); + task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET; + spin_unlock_irqrestore(&task->task_state_lock, flags2); + } + } + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h new file mode 100644 index 00000000000..57ccfc3d6ad --- /dev/null +++ b/drivers/scsi/isci/remote_device.h @@ -0,0 +1,352 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ISCI_REMOTE_DEVICE_H_ +#define _ISCI_REMOTE_DEVICE_H_ +#include <scsi/libsas.h> +#include <linux/kref.h> +#include "scu_remote_node_context.h" +#include "remote_node_context.h" +#include "port.h" + +enum sci_remote_device_not_ready_reason_code { + SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED, + SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED, + SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED, + SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED, + SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED, + SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX +}; + +/** + * isci_remote_device - isci representation of a sas expander / end point + * @device_port_width: hw setting for number of simultaneous connections + * @connection_rate: per-taskcontext connection rate for this device + * @working_request: SATA requests have no tag we for unaccelerated + * protocols we need a method to associate unsolicited + * frames with a pending request + */ +struct isci_remote_device { + #define IDEV_START_PENDING 0 + #define IDEV_STOP_PENDING 1 + #define IDEV_ALLOCATED 2 + #define IDEV_EH 3 + #define IDEV_GONE 4 + #define IDEV_IO_READY 5 + #define IDEV_IO_NCQERROR 6 + unsigned long flags; + struct kref kref; + struct isci_port *isci_port; + struct domain_device *domain_dev; + struct list_head node; + struct list_head reqs_in_process; + struct sci_base_state_machine sm; + u32 device_port_width; + enum sas_linkrate connection_rate; + bool is_direct_attached; + struct isci_port *owning_port; + struct sci_remote_node_context rnc; + /* XXX unify with device reference counting and delete */ + u32 started_request_count; + struct isci_request *working_request; + u32 not_ready_reason; +}; + +#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 + +/* device reference routines must be called under sci_lock */ +static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) +{ + struct isci_remote_device *idev = dev->lldd_dev; + + if (idev && !test_bit(IDEV_GONE, &idev->flags)) { + kref_get(&idev->kref); + return idev; + } + + return NULL; +} + +void isci_remote_device_release(struct kref *kref); +static inline void isci_put_device(struct isci_remote_device *idev) +{ + if (idev) + kref_put(&idev->kref, isci_remote_device_release); +} + +enum sci_status isci_remote_device_stop(struct isci_host *ihost, + struct isci_remote_device *idev); +void isci_remote_device_nuke_requests(struct isci_host *ihost, + struct isci_remote_device *idev); +void isci_remote_device_gone(struct domain_device *domain_dev); +int isci_remote_device_found(struct domain_device *domain_dev); +bool isci_device_is_reset_pending(struct isci_host *ihost, + struct isci_remote_device *idev); +void isci_device_clear_reset_pending(struct isci_host *ihost, + struct isci_remote_device *idev); +/** + * sci_remote_device_stop() - This method will stop both transmission and + * reception of link activity for the supplied remote device. This method + * disables normal IO requests from flowing through to the remote device. + * @remote_device: This parameter specifies the device to be stopped. + * @timeout: This parameter specifies the number of milliseconds in which the + * stop operation should complete. + * + * An indication of whether the device was successfully stopped. SCI_SUCCESS + * This value is returned if the transmission and reception for the device was + * successfully stopped. + */ +enum sci_status sci_remote_device_stop( + struct isci_remote_device *idev, + u32 timeout); + +/** + * sci_remote_device_reset() - This method will reset the device making it + * ready for operation. This method must be called anytime the device is + * reset either through a SMP phy control or a port hard reset request. + * @remote_device: This parameter specifies the device to be reset. + * + * This method does not actually cause the device hardware to be reset. This + * method resets the software object so that it will be operational after a + * device hardware reset completes. An indication of whether the device reset + * was accepted. SCI_SUCCESS This value is returned if the device reset is + * started. + */ +enum sci_status sci_remote_device_reset( + struct isci_remote_device *idev); + +/** + * sci_remote_device_reset_complete() - This method informs the device object + * that the reset operation is complete and the device can resume operation + * again. + * @remote_device: This parameter specifies the device which is to be informed + * of the reset complete operation. + * + * An indication that the device is resuming operation. SCI_SUCCESS the device + * is resuming operation. + */ +enum sci_status sci_remote_device_reset_complete( + struct isci_remote_device *idev); + +/** + * enum sci_remote_device_states - This enumeration depicts all the states + * for the common remote device state machine. + * + * + */ +enum sci_remote_device_states { + /** + * Simply the initial state for the base remote device state machine. + */ + SCI_DEV_INITIAL, + + /** + * This state indicates that the remote device has successfully been + * stopped. In this state no new IO operations are permitted. + * This state is entered from the INITIAL state. + * This state is entered from the STOPPING state. + */ + SCI_DEV_STOPPED, + + /** + * This state indicates the the remote device is in the process of + * becoming ready (i.e. starting). In this state no new IO operations + * are permitted. + * This state is entered from the STOPPED state. + */ + SCI_DEV_STARTING, + + /** + * This state indicates the remote device is now ready. Thus, the user + * is able to perform IO operations on the remote device. + * This state is entered from the STARTING state. + */ + SCI_DEV_READY, + + /** + * This is the idle substate for the stp remote device. When there are no + * active IO for the device it is is in this state. + */ + SCI_STP_DEV_IDLE, + + /** + * This is the command state for for the STP remote device. This state is + * entered when the device is processing a non-NCQ command. The device object + * will fail any new start IO requests until this command is complete. + */ + SCI_STP_DEV_CMD, + + /** + * This is the NCQ state for the STP remote device. This state is entered + * when the device is processing an NCQ reuqest. It will remain in this state + * so long as there is one or more NCQ requests being processed. + */ + SCI_STP_DEV_NCQ, + + /** + * This is the NCQ error state for the STP remote device. This state is + * entered when an SDB error FIS is received by the device object while in the + * NCQ state. The device object will only accept a READ LOG command while in + * this state. + */ + SCI_STP_DEV_NCQ_ERROR, + + /** + * This is the READY substate indicates the device is waiting for the RESET task + * coming to be recovered from certain hardware specific error. + */ + SCI_STP_DEV_AWAIT_RESET, + + /** + * This is the ready operational substate for the remote device. This is the + * normal operational state for a remote device. + */ + SCI_SMP_DEV_IDLE, + + /** + * This is the suspended state for the remote device. This is the state that + * the device is placed in when a RNC suspend is received by the SCU hardware. + */ + SCI_SMP_DEV_CMD, + + /** + * This state indicates that the remote device is in the process of + * stopping. In this state no new IO operations are permitted, but + * existing IO operations are allowed to complete. + * This state is entered from the READY state. + * This state is entered from the FAILED state. + */ + SCI_DEV_STOPPING, + + /** + * This state indicates that the remote device has failed. + * In this state no new IO operations are permitted. + * This state is entered from the INITIALIZING state. + * This state is entered from the READY state. + */ + SCI_DEV_FAILED, + + /** + * This state indicates the device is being reset. + * In this state no new IO operations are permitted. + * This state is entered from the READY state. + */ + SCI_DEV_RESETTING, + + /** + * Simply the final state for the base remote device state machine. + */ + SCI_DEV_FINAL, +}; + +static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc) +{ + struct isci_remote_device *idev; + + idev = container_of(rnc, typeof(*idev), rnc); + + return idev; +} + +static inline bool dev_is_expander(struct domain_device *dev) +{ + return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; +} + +static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev) +{ + /* XXX delete this voodoo when converting to the top-level device + * reference count + */ + if (WARN_ONCE(idev->started_request_count == 0, + "%s: tried to decrement started_request_count past 0!?", + __func__)) + /* pass */; + else + idev->started_request_count--; +} + +enum sci_status sci_remote_device_frame_handler( + struct isci_remote_device *idev, + u32 frame_index); + +enum sci_status sci_remote_device_event_handler( + struct isci_remote_device *idev, + u32 event_code); + +enum sci_status sci_remote_device_start_io( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sci_status sci_remote_device_start_task( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sci_status sci_remote_device_complete_io( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sci_status sci_remote_device_suspend( + struct isci_remote_device *idev, + u32 suspend_type); + +void sci_remote_device_post_request( + struct isci_remote_device *idev, + u32 request); + +#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c new file mode 100644 index 00000000000..748e8339d1e --- /dev/null +++ b/drivers/scsi/isci/remote_node_context.c @@ -0,0 +1,627 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "host.h" +#include "isci.h" +#include "remote_device.h" +#include "remote_node_context.h" +#include "scu_event_codes.h" +#include "scu_task_context.h" + + +/** + * + * @sci_rnc: The RNC for which the is posted request is being made. + * + * This method will return true if the RNC is not in the initial state. In all + * other states the RNC is considered active and this will return true. The + * destroy request of the state machine drives the RNC back to the initial + * state. If the state machine changes then this routine will also have to be + * changed. bool true if the state machine is not in the initial state false if + * the state machine is in the initial state + */ + +/** + * + * @sci_rnc: The state of the remote node context object to check. + * + * This method will return true if the remote node context is in a READY state + * otherwise it will return false bool true if the remote node context is in + * the ready state. false if the remote node context is not in the ready state. + */ +bool sci_remote_node_context_is_ready( + struct sci_remote_node_context *sci_rnc) +{ + u32 current_state = sci_rnc->sm.current_state_id; + + if (current_state == SCI_RNC_READY) { + return true; + } + + return false; +} + +static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) +{ + if (id < ihost->remote_node_entries && + ihost->device_table[id]) + return &ihost->remote_node_context_table[id]; + + return NULL; +} + +static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc) +{ + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); + struct domain_device *dev = idev->domain_dev; + int rni = sci_rnc->remote_node_index; + union scu_remote_node_context *rnc; + struct isci_host *ihost; + __le64 sas_addr; + + ihost = idev->owning_port->owning_controller; + rnc = sci_rnc_by_id(ihost, rni); + + memset(rnc, 0, sizeof(union scu_remote_node_context) + * sci_remote_device_node_count(idev)); + + rnc->ssp.remote_node_index = rni; + rnc->ssp.remote_node_port_width = idev->device_port_width; + rnc->ssp.logical_port_index = idev->owning_port->physical_port_index; + + /* sas address is __be64, context ram format is __le64 */ + sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr)); + rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr); + rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr); + + rnc->ssp.nexus_loss_timer_enable = true; + rnc->ssp.check_bit = false; + rnc->ssp.is_valid = false; + rnc->ssp.is_remote_node_context = true; + rnc->ssp.function_number = 0; + + rnc->ssp.arbitration_wait_time = 0; + + if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { + rnc->ssp.connection_occupancy_timeout = + ihost->user_parameters.stp_max_occupancy_timeout; + rnc->ssp.connection_inactivity_timeout = + ihost->user_parameters.stp_inactivity_timeout; + } else { + rnc->ssp.connection_occupancy_timeout = + ihost->user_parameters.ssp_max_occupancy_timeout; + rnc->ssp.connection_inactivity_timeout = + ihost->user_parameters.ssp_inactivity_timeout; + } + + rnc->ssp.initial_arbitration_wait_time = 0; + + /* Open Address Frame Parameters */ + rnc->ssp.oaf_connection_rate = idev->connection_rate; + rnc->ssp.oaf_features = 0; + rnc->ssp.oaf_source_zone_group = 0; + rnc->ssp.oaf_more_compatibility_features = 0; +} + +/** + * + * @sci_rnc: + * @callback: + * @callback_parameter: + * + * This method will setup the remote node context object so it will transition + * to its ready state. If the remote node context is already setup to + * transition to its final state then this function does nothing. none + */ +static void sci_remote_node_context_setup_to_resume( + struct sci_remote_node_context *sci_rnc, + scics_sds_remote_node_context_callback callback, + void *callback_parameter) +{ + if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) { + sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY; + sci_rnc->user_callback = callback; + sci_rnc->user_cookie = callback_parameter; + } +} + +static void sci_remote_node_context_setup_to_destory( + struct sci_remote_node_context *sci_rnc, + scics_sds_remote_node_context_callback callback, + void *callback_parameter) +{ + sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL; + sci_rnc->user_callback = callback; + sci_rnc->user_cookie = callback_parameter; +} + +/** + * + * + * This method just calls the user callback function and then resets the + * callback. + */ +static void sci_remote_node_context_notify_user( + struct sci_remote_node_context *rnc) +{ + if (rnc->user_callback != NULL) { + (*rnc->user_callback)(rnc->user_cookie); + + rnc->user_callback = NULL; + rnc->user_cookie = NULL; + } +} + +static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) +{ + if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) + sci_remote_node_context_resume(rnc, rnc->user_callback, + rnc->user_cookie); +} + +static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) +{ + union scu_remote_node_context *rnc_buffer; + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); + struct domain_device *dev = idev->domain_dev; + struct isci_host *ihost = idev->owning_port->owning_controller; + + rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); + + rnc_buffer->ssp.is_valid = true; + + if (!idev->is_direct_attached && + (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) { + sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); + } else { + sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); + + if (idev->is_direct_attached) + sci_port_setup_transports(idev->owning_port, + sci_rnc->remote_node_index); + } +} + +static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc) +{ + union scu_remote_node_context *rnc_buffer; + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); + struct isci_host *ihost = idev->owning_port->owning_controller; + + rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); + + rnc_buffer->ssp.is_valid = false; + + sci_remote_device_post_request(rnc_to_dev(sci_rnc), + SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE); +} + +static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + + /* Check to see if we have gotten back to the initial state because + * someone requested to destroy the remote node context object. + */ + if (sm->previous_state_id == SCI_RNC_INVALIDATING) { + rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; + sci_remote_node_context_notify_user(rnc); + } +} + +static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); + + sci_remote_node_context_validate_context_buffer(sci_rnc); +} + +static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + + sci_remote_node_context_invalidate_context_buffer(rnc); +} + +static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct isci_remote_device *idev; + struct domain_device *dev; + + idev = rnc_to_dev(rnc); + dev = idev->domain_dev; + + /* + * For direct attached SATA devices we need to clear the TLCR + * NCQ to TCi tag mapping on the phy and in cases where we + * resume because of a target reset we also need to update + * the STPTLDARNI register with the RNi of the device + */ + if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && + idev->is_direct_attached) + sci_port_setup_transports(idev->owning_port, + rnc->remote_node_index); + + sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); +} + +static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + + rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; + + if (rnc->user_callback) + sci_remote_node_context_notify_user(rnc); +} + +static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + + sci_remote_node_context_continue_state_transitions(rnc); +} + +static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + + sci_remote_node_context_continue_state_transitions(rnc); +} + +static const struct sci_base_state sci_remote_node_context_state_table[] = { + [SCI_RNC_INITIAL] = { + .enter_state = sci_remote_node_context_initial_state_enter, + }, + [SCI_RNC_POSTING] = { + .enter_state = sci_remote_node_context_posting_state_enter, + }, + [SCI_RNC_INVALIDATING] = { + .enter_state = sci_remote_node_context_invalidating_state_enter, + }, + [SCI_RNC_RESUMING] = { + .enter_state = sci_remote_node_context_resuming_state_enter, + }, + [SCI_RNC_READY] = { + .enter_state = sci_remote_node_context_ready_state_enter, + }, + [SCI_RNC_TX_SUSPENDED] = { + .enter_state = sci_remote_node_context_tx_suspended_state_enter, + }, + [SCI_RNC_TX_RX_SUSPENDED] = { + .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, + }, + [SCI_RNC_AWAIT_SUSPENSION] = { }, +}; + +void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, + u16 remote_node_index) +{ + memset(rnc, 0, sizeof(struct sci_remote_node_context)); + + rnc->remote_node_index = remote_node_index; + rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; + + sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); +} + +enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, + u32 event_code) +{ + enum scis_sds_remote_node_context_states state; + + state = sci_rnc->sm.current_state_id; + switch (state) { + case SCI_RNC_POSTING: + switch (scu_get_event_code(event_code)) { + case SCU_EVENT_POST_RNC_COMPLETE: + sci_change_state(&sci_rnc->sm, SCI_RNC_READY); + break; + default: + goto out; + } + break; + case SCI_RNC_INVALIDATING: + if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) { + if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) + state = SCI_RNC_INITIAL; + else + state = SCI_RNC_POSTING; + sci_change_state(&sci_rnc->sm, state); + } else { + switch (scu_get_event_type(event_code)) { + case SCU_EVENT_TYPE_RNC_SUSPEND_TX: + case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: + /* We really dont care if the hardware is going to suspend + * the device since it's being invalidated anyway */ + dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: SCIC Remote Node Context 0x%p was " + "suspeneded by hardware while being " + "invalidated.\n", __func__, sci_rnc); + break; + default: + goto out; + } + } + break; + case SCI_RNC_RESUMING: + if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) { + sci_change_state(&sci_rnc->sm, SCI_RNC_READY); + } else { + switch (scu_get_event_type(event_code)) { + case SCU_EVENT_TYPE_RNC_SUSPEND_TX: + case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: + /* We really dont care if the hardware is going to suspend + * the device since it's being resumed anyway */ + dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: SCIC Remote Node Context 0x%p was " + "suspeneded by hardware while being resumed.\n", + __func__, sci_rnc); + break; + default: + goto out; + } + } + break; + case SCI_RNC_READY: + switch (scu_get_event_type(event_code)) { + case SCU_EVENT_TL_RNC_SUSPEND_TX: + sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); + sci_rnc->suspension_code = scu_get_event_specifier(event_code); + break; + case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: + sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); + sci_rnc->suspension_code = scu_get_event_specifier(event_code); + break; + default: + goto out; + } + break; + case SCI_RNC_AWAIT_SUSPENSION: + switch (scu_get_event_type(event_code)) { + case SCU_EVENT_TL_RNC_SUSPEND_TX: + sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); + sci_rnc->suspension_code = scu_get_event_specifier(event_code); + break; + case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: + sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); + sci_rnc->suspension_code = scu_get_event_specifier(event_code); + break; + default: + goto out; + } + break; + default: + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } + return SCI_SUCCESS; + + out: + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: code: %#x state: %d\n", __func__, event_code, state); + return SCI_FAILURE; + +} + +enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p) +{ + enum scis_sds_remote_node_context_states state; + + state = sci_rnc->sm.current_state_id; + switch (state) { + case SCI_RNC_INVALIDATING: + sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); + return SCI_SUCCESS; + case SCI_RNC_POSTING: + case SCI_RNC_RESUMING: + case SCI_RNC_READY: + case SCI_RNC_TX_SUSPENDED: + case SCI_RNC_TX_RX_SUSPENDED: + case SCI_RNC_AWAIT_SUSPENSION: + sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); + sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); + return SCI_SUCCESS; + case SCI_RNC_INITIAL: + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state %d\n", __func__, state); + /* We have decided that the destruct request on the remote node context + * can not fail since it is either in the initial/destroyed state or is + * can be destroyed. + */ + return SCI_SUCCESS; + default: + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } +} + +enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, + u32 suspend_type, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p) +{ + enum scis_sds_remote_node_context_states state; + + state = sci_rnc->sm.current_state_id; + if (state != SCI_RNC_READY) { + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } + + sci_rnc->user_callback = cb_fn; + sci_rnc->user_cookie = cb_p; + sci_rnc->suspension_code = suspend_type; + + if (suspend_type == SCI_SOFTWARE_SUSPENSION) { + sci_remote_device_post_request(rnc_to_dev(sci_rnc), + SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX); + } + + sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION); + return SCI_SUCCESS; +} + +enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p) +{ + enum scis_sds_remote_node_context_states state; + + state = sci_rnc->sm.current_state_id; + switch (state) { + case SCI_RNC_INITIAL: + if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) + return SCI_FAILURE_INVALID_STATE; + + sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_construct_buffer(sci_rnc); + sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); + return SCI_SUCCESS; + case SCI_RNC_POSTING: + case SCI_RNC_INVALIDATING: + case SCI_RNC_RESUMING: + if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) + return SCI_FAILURE_INVALID_STATE; + + sci_rnc->user_callback = cb_fn; + sci_rnc->user_cookie = cb_p; + return SCI_SUCCESS; + case SCI_RNC_TX_SUSPENDED: { + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); + struct domain_device *dev = idev->domain_dev; + + sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); + + /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */ + if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev)) + sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); + else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { + if (idev->is_direct_attached) { + /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */ + sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); + } else { + sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); + } + } else + return SCI_FAILURE; + return SCI_SUCCESS; + } + case SCI_RNC_TX_RX_SUSPENDED: + sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); + sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); + return SCI_FAILURE_INVALID_STATE; + case SCI_RNC_AWAIT_SUSPENSION: + sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); + return SCI_SUCCESS; + default: + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } +} + +enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, + struct isci_request *ireq) +{ + enum scis_sds_remote_node_context_states state; + + state = sci_rnc->sm.current_state_id; + + switch (state) { + case SCI_RNC_READY: + return SCI_SUCCESS; + case SCI_RNC_TX_SUSPENDED: + case SCI_RNC_TX_RX_SUSPENDED: + case SCI_RNC_AWAIT_SUSPENSION: + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state %d\n", __func__, state); + return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; + default: + break; + } + dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: requested to start IO while still resuming, %d\n", + __func__, state); + return SCI_FAILURE_INVALID_STATE; +} + +enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, + struct isci_request *ireq) +{ + enum scis_sds_remote_node_context_states state; + + state = sci_rnc->sm.current_state_id; + switch (state) { + case SCI_RNC_RESUMING: + case SCI_RNC_READY: + case SCI_RNC_AWAIT_SUSPENSION: + return SCI_SUCCESS; + case SCI_RNC_TX_SUSPENDED: + case SCI_RNC_TX_RX_SUSPENDED: + sci_remote_node_context_resume(sci_rnc, NULL, NULL); + return SCI_SUCCESS; + default: + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } +} diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h new file mode 100644 index 00000000000..41580ad1252 --- /dev/null +++ b/drivers/scsi/isci/remote_node_context.h @@ -0,0 +1,224 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ +#define _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ + +/** + * This file contains the structures, constants, and prototypes associated with + * the remote node context in the silicon. It exists to model and manage + * the remote node context in the silicon. + * + * + */ + +#include "isci.h" + +/** + * + * + * This constant represents an invalid remote device id, it is used to program + * the STPDARNI register so the driver knows when it has received a SIGNATURE + * FIS from the SCU. + */ +#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF + +#define SCU_HARDWARE_SUSPENSION (0) +#define SCI_SOFTWARE_SUSPENSION (1) + +struct isci_request; +struct isci_remote_device; +struct sci_remote_node_context; + +typedef void (*scics_sds_remote_node_context_callback)(void *); + +/** + * This is the enumeration of the remote node context states. + */ +enum scis_sds_remote_node_context_states { + /** + * This state is the initial state for a remote node context. On a resume + * request the remote node context will transition to the posting state. + */ + SCI_RNC_INITIAL, + + /** + * This is a transition state that posts the RNi to the hardware. Once the RNC + * is posted the remote node context will be made ready. + */ + SCI_RNC_POSTING, + + /** + * This is a transition state that will post an RNC invalidate to the + * hardware. Once the invalidate is complete the remote node context will + * transition to the posting state. + */ + SCI_RNC_INVALIDATING, + + /** + * This is a transition state that will post an RNC resume to the hardare. + * Once the event notification of resume complete is received the remote node + * context will transition to the ready state. + */ + SCI_RNC_RESUMING, + + /** + * This is the state that the remote node context must be in to accept io + * request operations. + */ + SCI_RNC_READY, + + /** + * This is the state that the remote node context transitions to when it gets + * a TX suspend notification from the hardware. + */ + SCI_RNC_TX_SUSPENDED, + + /** + * This is the state that the remote node context transitions to when it gets + * a TX RX suspend notification from the hardware. + */ + SCI_RNC_TX_RX_SUSPENDED, + + /** + * This state is a wait state for the remote node context that waits for a + * suspend notification from the hardware. This state is entered when either + * there is a request to supend the remote node context or when there is a TC + * completion where the remote node will be suspended by the hardware. + */ + SCI_RNC_AWAIT_SUSPENSION +}; + +/** + * + * + * This enumeration is used to define the end destination state for the remote + * node context. + */ +enum sci_remote_node_context_destination_state { + SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED, + SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY, + SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL +}; + +/** + * struct sci_remote_node_context - This structure contains the data + * associated with the remote node context object. The remote node context + * (RNC) object models the the remote device information necessary to manage + * the silicon RNC. + */ +struct sci_remote_node_context { + /** + * This field indicates the remote node index (RNI) associated with + * this RNC. + */ + u16 remote_node_index; + + /** + * This field is the recored suspension code or the reason for the remote node + * context suspension. + */ + u32 suspension_code; + + /** + * This field is true if the remote node context is resuming from its current + * state. This can cause an automatic resume on receiving a suspension + * notification. + */ + enum sci_remote_node_context_destination_state destination_state; + + /** + * This field contains the callback function that the user requested to be + * called when the requested state transition is complete. + */ + scics_sds_remote_node_context_callback user_callback; + + /** + * This field contains the parameter that is called when the user requested + * state transition is completed. + */ + void *user_cookie; + + /** + * This field contains the data for the object's state machine. + */ + struct sci_base_state_machine sm; +}; + +void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, + u16 remote_node_index); + + +bool sci_remote_node_context_is_ready( + struct sci_remote_node_context *sci_rnc); + +enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, + u32 event_code); +enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, + scics_sds_remote_node_context_callback callback, + void *callback_parameter); +enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, + u32 suspend_type, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p); +enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p); +enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, + struct isci_request *ireq); +enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, + struct isci_request *ireq); + +#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c new file mode 100644 index 00000000000..301b3141945 --- /dev/null +++ b/drivers/scsi/isci/remote_node_table.c @@ -0,0 +1,598 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE + * public, protected, and private methods. + * + * + */ +#include "remote_node_table.h" +#include "remote_node_context.h" + +/** + * + * @remote_node_table: This is the remote node index table from which the + * selection will be made. + * @group_table_index: This is the index to the group table from which to + * search for an available selection. + * + * This routine will find the bit position in absolute bit terms of the next 32 + * + bit position. If there are available bits in the first u32 then it is + * just bit position. u32 This is the absolute bit position for an available + * group. + */ +static u32 sci_remote_node_table_get_group_index( + struct sci_remote_node_table *remote_node_table, + u32 group_table_index) +{ + u32 dword_index; + u32 *group_table; + u32 bit_index; + + group_table = remote_node_table->remote_node_groups[group_table_index]; + + for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) { + if (group_table[dword_index] != 0) { + for (bit_index = 0; bit_index < 32; bit_index++) { + if ((group_table[dword_index] & (1 << bit_index)) != 0) { + return (dword_index * 32) + bit_index; + } + } + } + } + + return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX; +} + +/** + * + * @out]: remote_node_table This the remote node table in which to clear the + * selector. + * @set_index: This is the remote node selector in which the change will be + * made. + * @group_index: This is the bit index in the table to be modified. + * + * This method will clear the group index entry in the specified group index + * table. none + */ +static void sci_remote_node_table_clear_group_index( + struct sci_remote_node_table *remote_node_table, + u32 group_table_index, + u32 group_index) +{ + u32 dword_index; + u32 bit_index; + u32 *group_table; + + BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT); + BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32)); + + dword_index = group_index / 32; + bit_index = group_index % 32; + group_table = remote_node_table->remote_node_groups[group_table_index]; + + group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index); +} + +/** + * + * @out]: remote_node_table This the remote node table in which to set the + * selector. + * @group_table_index: This is the remote node selector in which the change + * will be made. + * @group_index: This is the bit position in the table to be modified. + * + * This method will set the group index bit entry in the specified gropu index + * table. none + */ +static void sci_remote_node_table_set_group_index( + struct sci_remote_node_table *remote_node_table, + u32 group_table_index, + u32 group_index) +{ + u32 dword_index; + u32 bit_index; + u32 *group_table; + + BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT); + BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32)); + + dword_index = group_index / 32; + bit_index = group_index % 32; + group_table = remote_node_table->remote_node_groups[group_table_index]; + + group_table[dword_index] = group_table[dword_index] | (1 << bit_index); +} + +/** + * + * @out]: remote_node_table This is the remote node table in which to modify + * the remote node availability. + * @remote_node_index: This is the remote node index that is being returned to + * the table. + * + * This method will set the remote to available in the remote node allocation + * table. none + */ +static void sci_remote_node_table_set_node_index( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_index) +{ + u32 dword_location; + u32 dword_remainder; + u32 slot_normalized; + u32 slot_position; + + BUG_ON( + (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD) + <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT) + ); + + dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD; + dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD; + slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32); + slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT; + + remote_node_table->available_remote_nodes[dword_location] |= + 1 << (slot_normalized + slot_position); +} + +/** + * + * @out]: remote_node_table This is the remote node table from which to clear + * the available remote node bit. + * @remote_node_index: This is the remote node index which is to be cleared + * from the table. + * + * This method clears the remote node index from the table of available remote + * nodes. none + */ +static void sci_remote_node_table_clear_node_index( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_index) +{ + u32 dword_location; + u32 dword_remainder; + u32 slot_position; + u32 slot_normalized; + + BUG_ON( + (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD) + <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT) + ); + + dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD; + dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD; + slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32); + slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT; + + remote_node_table->available_remote_nodes[dword_location] &= + ~(1 << (slot_normalized + slot_position)); +} + +/** + * + * @out]: remote_node_table The remote node table from which the slot will be + * cleared. + * @group_index: The index for the slot that is to be cleared. + * + * This method clears the entire table slot at the specified slot index. none + */ +static void sci_remote_node_table_clear_group( + struct sci_remote_node_table *remote_node_table, + u32 group_index) +{ + u32 dword_location; + u32 dword_remainder; + u32 dword_value; + + BUG_ON( + (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD) + <= (group_index / SCU_STP_REMOTE_NODE_COUNT) + ); + + dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; + dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; + + dword_value = remote_node_table->available_remote_nodes[dword_location]; + dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4)); + remote_node_table->available_remote_nodes[dword_location] = dword_value; +} + +/** + * + * @remote_node_table: + * + * THis method sets an entire remote node group in the remote node table. + */ +static void sci_remote_node_table_set_group( + struct sci_remote_node_table *remote_node_table, + u32 group_index) +{ + u32 dword_location; + u32 dword_remainder; + u32 dword_value; + + BUG_ON( + (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD) + <= (group_index / SCU_STP_REMOTE_NODE_COUNT) + ); + + dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; + dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; + + dword_value = remote_node_table->available_remote_nodes[dword_location]; + dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4)); + remote_node_table->available_remote_nodes[dword_location] = dword_value; +} + +/** + * + * @remote_node_table: This is the remote node table that for which the group + * value is to be returned. + * @group_index: This is the group index to use to find the group value. + * + * This method will return the group value for the specified group index. The + * bit values at the specified remote node group index. + */ +static u8 sci_remote_node_table_get_group_value( + struct sci_remote_node_table *remote_node_table, + u32 group_index) +{ + u32 dword_location; + u32 dword_remainder; + u32 dword_value; + + dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; + dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; + + dword_value = remote_node_table->available_remote_nodes[dword_location]; + dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4)); + dword_value = dword_value >> (dword_remainder * 4); + + return (u8)dword_value; +} + +/** + * + * @out]: remote_node_table The remote that which is to be initialized. + * @remote_node_entries: The number of entries to put in the table. + * + * This method will initialize the remote node table for use. none + */ +void sci_remote_node_table_initialize( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_entries) +{ + u32 index; + + /* + * Initialize the raw data we could improve the speed by only initializing + * those entries that we are actually going to be used */ + memset( + remote_node_table->available_remote_nodes, + 0x00, + sizeof(remote_node_table->available_remote_nodes) + ); + + memset( + remote_node_table->remote_node_groups, + 0x00, + sizeof(remote_node_table->remote_node_groups) + ); + + /* Initialize the available remote node sets */ + remote_node_table->available_nodes_array_size = (u16) + (remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD) + + ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0); + + + /* Initialize each full DWORD to a FULL SET of remote nodes */ + for (index = 0; index < remote_node_entries; index++) { + sci_remote_node_table_set_node_index(remote_node_table, index); + } + + remote_node_table->group_array_size = (u16) + (remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32)) + + ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0); + + for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) { + /* + * These are all guaranteed to be full slot values so fill them in the + * available sets of 3 remote nodes */ + sci_remote_node_table_set_group_index(remote_node_table, 2, index); + } + + /* Now fill in any remainders that we may find */ + if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) { + sci_remote_node_table_set_group_index(remote_node_table, 1, index); + } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) { + sci_remote_node_table_set_group_index(remote_node_table, 0, index); + } +} + +/** + * + * @out]: remote_node_table The remote node table from which to allocate a + * remote node. + * @table_index: The group index that is to be used for the search. + * + * This method will allocate a single RNi from the remote node table. The + * table index will determine from which remote node group table to search. + * This search may fail and another group node table can be specified. The + * function is designed to allow a serach of the available single remote node + * group up to the triple remote node group. If an entry is found in the + * specified table the remote node is removed and the remote node groups are + * updated. The RNi value or an invalid remote node context if an RNi can not + * be found. + */ +static u16 sci_remote_node_table_allocate_single_remote_node( + struct sci_remote_node_table *remote_node_table, + u32 group_table_index) +{ + u8 index; + u8 group_value; + u32 group_index; + u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; + + group_index = sci_remote_node_table_get_group_index( + remote_node_table, group_table_index); + + /* We could not find an available slot in the table selector 0 */ + if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) { + group_value = sci_remote_node_table_get_group_value( + remote_node_table, group_index); + + for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) { + if (((1 << index) & group_value) != 0) { + /* We have selected a bit now clear it */ + remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT + + index); + + sci_remote_node_table_clear_group_index( + remote_node_table, group_table_index, group_index + ); + + sci_remote_node_table_clear_node_index( + remote_node_table, remote_node_index + ); + + if (group_table_index > 0) { + sci_remote_node_table_set_group_index( + remote_node_table, group_table_index - 1, group_index + ); + } + + break; + } + } + } + + return remote_node_index; +} + +/** + * + * @remote_node_table: This is the remote node table from which to allocate the + * remote node entries. + * @group_table_index: THis is the group table index which must equal two (2) + * for this operation. + * + * This method will allocate three consecutive remote node context entries. If + * there are no remaining triple entries the function will return a failure. + * The remote node index that represents three consecutive remote node entries + * or an invalid remote node context if none can be found. + */ +static u16 sci_remote_node_table_allocate_triple_remote_node( + struct sci_remote_node_table *remote_node_table, + u32 group_table_index) +{ + u32 group_index; + u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; + + group_index = sci_remote_node_table_get_group_index( + remote_node_table, group_table_index); + + if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) { + remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT; + + sci_remote_node_table_clear_group_index( + remote_node_table, group_table_index, group_index + ); + + sci_remote_node_table_clear_group( + remote_node_table, group_index + ); + } + + return remote_node_index; +} + +/** + * + * @remote_node_table: This is the remote node table from which the remote node + * allocation is to take place. + * @remote_node_count: This is ther remote node count which is one of + * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). + * + * This method will allocate a remote node that mataches the remote node count + * specified by the caller. Valid values for remote node count is + * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is + * the remote node index that is returned or an invalid remote node context. + */ +u16 sci_remote_node_table_allocate_remote_node( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_count) +{ + u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; + + if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) { + remote_node_index = + sci_remote_node_table_allocate_single_remote_node( + remote_node_table, 0); + + if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { + remote_node_index = + sci_remote_node_table_allocate_single_remote_node( + remote_node_table, 1); + } + + if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { + remote_node_index = + sci_remote_node_table_allocate_single_remote_node( + remote_node_table, 2); + } + } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) { + remote_node_index = + sci_remote_node_table_allocate_triple_remote_node( + remote_node_table, 2); + } + + return remote_node_index; +} + +/** + * + * @remote_node_table: + * + * This method will free a single remote node index back to the remote node + * table. This routine will update the remote node groups + */ +static void sci_remote_node_table_release_single_remote_node( + struct sci_remote_node_table *remote_node_table, + u16 remote_node_index) +{ + u32 group_index; + u8 group_value; + + group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT; + + group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index); + + /* + * Assert that we are not trying to add an entry to a slot that is already + * full. */ + BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE); + + if (group_value == 0x00) { + /* + * There are no entries in this slot so it must be added to the single + * slot table. */ + sci_remote_node_table_set_group_index(remote_node_table, 0, group_index); + } else if ((group_value & (group_value - 1)) == 0) { + /* + * There is only one entry in this slot so it must be moved from the + * single slot table to the dual slot table */ + sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index); + sci_remote_node_table_set_group_index(remote_node_table, 1, group_index); + } else { + /* + * There are two entries in the slot so it must be moved from the dual + * slot table to the tripple slot table. */ + sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index); + sci_remote_node_table_set_group_index(remote_node_table, 2, group_index); + } + + sci_remote_node_table_set_node_index(remote_node_table, remote_node_index); +} + +/** + * + * @remote_node_table: This is the remote node table to which the remote node + * index is to be freed. + * + * This method will release a group of three consecutive remote nodes back to + * the free remote nodes. + */ +static void sci_remote_node_table_release_triple_remote_node( + struct sci_remote_node_table *remote_node_table, + u16 remote_node_index) +{ + u32 group_index; + + group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT; + + sci_remote_node_table_set_group_index( + remote_node_table, 2, group_index + ); + + sci_remote_node_table_set_group(remote_node_table, group_index); +} + +/** + * + * @remote_node_table: The remote node table to which the remote node index is + * to be freed. + * @remote_node_count: This is the count of consecutive remote nodes that are + * to be freed. + * + * This method will release the remote node index back into the remote node + * table free pool. + */ +void sci_remote_node_table_release_remote_node_index( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_count, + u16 remote_node_index) +{ + if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) { + sci_remote_node_table_release_single_remote_node( + remote_node_table, remote_node_index); + } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) { + sci_remote_node_table_release_triple_remote_node( + remote_node_table, remote_node_index); + } +} + diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h new file mode 100644 index 00000000000..721ab982d2a --- /dev/null +++ b/drivers/scsi/isci/remote_node_table.h @@ -0,0 +1,188 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SCIC_SDS_REMOTE_NODE_TABLE_H_ +#define _SCIC_SDS_REMOTE_NODE_TABLE_H_ + +#include "isci.h" + +/** + * + * + * Remote node sets are sets of remote node index in the remtoe node table The + * SCU hardware requires that STP remote node entries take three consecutive + * remote node index so the table is arranged in sets of three. The bits are + * used as 0111 0111 to make a byte and the bits define the set of three remote + * nodes to use as a sequence. + */ +#define SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE 2 + +/** + * + * + * Since the remote node table is organized as DWORDS take the remote node sets + * in bytes and represent them in DWORDs. The lowest ordered bits are the ones + * used in case full DWORD is not being used. i.e. 0000 0000 0000 0000 0111 + * 0111 0111 0111 // if only a single WORD is in use in the DWORD. + */ +#define SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD \ + (sizeof(u32) * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE) +/** + * + * + * This is a count of the numeber of remote nodes that can be represented in a + * byte + */ +#define SCIC_SDS_REMOTE_NODES_PER_BYTE \ + (SCU_STP_REMOTE_NODE_COUNT * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE) + +/** + * + * + * This is a count of the number of remote nodes that can be represented in a + * DWROD + */ +#define SCIC_SDS_REMOTE_NODES_PER_DWORD \ + (sizeof(u32) * SCIC_SDS_REMOTE_NODES_PER_BYTE) + +/** + * + * + * This is the number of bits in a remote node group + */ +#define SCIC_SDS_REMOTE_NODES_BITS_PER_GROUP 4 + +#define SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX (0xFFFFFFFF) +#define SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE (0x07) +#define SCIC_SDS_REMOTE_NODE_TABLE_EMPTY_SLOT_VALUE (0x00) + +/** + * + * + * Expander attached sata remote node count + */ +#define SCU_STP_REMOTE_NODE_COUNT 3 + +/** + * + * + * Expander or direct attached ssp remote node count + */ +#define SCU_SSP_REMOTE_NODE_COUNT 1 + +/** + * + * + * Direct attached STP remote node count + */ +#define SCU_SATA_REMOTE_NODE_COUNT 1 + +/** + * struct sci_remote_node_table - + * + * + */ +struct sci_remote_node_table { + /** + * This field contains the array size in dwords + */ + u16 available_nodes_array_size; + + /** + * This field contains the array size of the + */ + u16 group_array_size; + + /** + * This field is the array of available remote node entries in bits. + * Because of the way STP remote node data is allocated on the SCU hardware + * the remote nodes must occupy three consecutive remote node context + * entries. For ease of allocation and de-allocation we have broken the + * sets of three into a single nibble. When the STP RNi is allocated all + * of the bits in the nibble are cleared. This math results in a table size + * of MAX_REMOTE_NODES / CONSECUTIVE RNi ENTRIES for STP / 2 entries per byte. + */ + u32 available_remote_nodes[ + (SCI_MAX_REMOTE_DEVICES / SCIC_SDS_REMOTE_NODES_PER_DWORD) + + ((SCI_MAX_REMOTE_DEVICES % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0)]; + + /** + * This field is the nibble selector for the above table. There are three + * possible selectors each for fast lookup when trying to find one, two or + * three remote node entries. + */ + u32 remote_node_groups[ + SCU_STP_REMOTE_NODE_COUNT][ + (SCI_MAX_REMOTE_DEVICES / (32 * SCU_STP_REMOTE_NODE_COUNT)) + + ((SCI_MAX_REMOTE_DEVICES % (32 * SCU_STP_REMOTE_NODE_COUNT)) != 0)]; + +}; + +/* --------------------------------------------------------------------------- */ + +void sci_remote_node_table_initialize( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_entries); + +u16 sci_remote_node_table_allocate_remote_node( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_count); + +void sci_remote_node_table_release_remote_node_index( + struct sci_remote_node_table *remote_node_table, + u32 remote_node_count, + u16 remote_node_index); + +#endif /* _SCIC_SDS_REMOTE_NODE_TABLE_H_ */ diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c new file mode 100644 index 00000000000..a46e07ac789 --- /dev/null +++ b/drivers/scsi/isci/request.c @@ -0,0 +1,3391 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "isci.h" +#include "task.h" +#include "request.h" +#include "scu_completion_codes.h" +#include "scu_event_codes.h" +#include "sas.h" + +static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, + int idx) +{ + if (idx == 0) + return &ireq->tc->sgl_pair_ab; + else if (idx == 1) + return &ireq->tc->sgl_pair_cd; + else if (idx < 0) + return NULL; + else + return &ireq->sg_table[idx - 2]; +} + +static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, + struct isci_request *ireq, u32 idx) +{ + u32 offset; + + if (idx == 0) { + offset = (void *) &ireq->tc->sgl_pair_ab - + (void *) &ihost->task_context_table[0]; + return ihost->task_context_dma + offset; + } else if (idx == 1) { + offset = (void *) &ireq->tc->sgl_pair_cd - + (void *) &ihost->task_context_table[0]; + return ihost->task_context_dma + offset; + } + + return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); +} + +static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) +{ + e->length = sg_dma_len(sg); + e->address_upper = upper_32_bits(sg_dma_address(sg)); + e->address_lower = lower_32_bits(sg_dma_address(sg)); + e->address_modifier = 0; +} + +static void sci_request_build_sgl(struct isci_request *ireq) +{ + struct isci_host *ihost = ireq->isci_host; + struct sas_task *task = isci_request_access_task(ireq); + struct scatterlist *sg = NULL; + dma_addr_t dma_addr; + u32 sg_idx = 0; + struct scu_sgl_element_pair *scu_sg = NULL; + struct scu_sgl_element_pair *prev_sg = NULL; + + if (task->num_scatter > 0) { + sg = task->scatter; + + while (sg) { + scu_sg = to_sgl_element_pair(ireq, sg_idx); + init_sgl_element(&scu_sg->A, sg); + sg = sg_next(sg); + if (sg) { + init_sgl_element(&scu_sg->B, sg); + sg = sg_next(sg); + } else + memset(&scu_sg->B, 0, sizeof(scu_sg->B)); + + if (prev_sg) { + dma_addr = to_sgl_element_pair_dma(ihost, + ireq, + sg_idx); + + prev_sg->next_pair_upper = + upper_32_bits(dma_addr); + prev_sg->next_pair_lower = + lower_32_bits(dma_addr); + } + + prev_sg = scu_sg; + sg_idx++; + } + } else { /* handle when no sg */ + scu_sg = to_sgl_element_pair(ireq, sg_idx); + + dma_addr = dma_map_single(&ihost->pdev->dev, + task->scatter, + task->total_xfer_len, + task->data_dir); + + ireq->zero_scatter_daddr = dma_addr; + + scu_sg->A.length = task->total_xfer_len; + scu_sg->A.address_upper = upper_32_bits(dma_addr); + scu_sg->A.address_lower = lower_32_bits(dma_addr); + } + + if (scu_sg) { + scu_sg->next_pair_upper = 0; + scu_sg->next_pair_lower = 0; + } +} + +static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) +{ + struct ssp_cmd_iu *cmd_iu; + struct sas_task *task = isci_request_access_task(ireq); + + cmd_iu = &ireq->ssp.cmd; + + memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); + cmd_iu->add_cdb_len = 0; + cmd_iu->_r_a = 0; + cmd_iu->_r_b = 0; + cmd_iu->en_fburst = 0; /* unsupported */ + cmd_iu->task_prio = task->ssp_task.task_prio; + cmd_iu->task_attr = task->ssp_task.task_attr; + cmd_iu->_r_c = 0; + + sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb, + sizeof(task->ssp_task.cdb) / sizeof(u32)); +} + +static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) +{ + struct ssp_task_iu *task_iu; + struct sas_task *task = isci_request_access_task(ireq); + struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); + + task_iu = &ireq->ssp.tmf; + + memset(task_iu, 0, sizeof(struct ssp_task_iu)); + + memcpy(task_iu->LUN, task->ssp_task.LUN, 8); + + task_iu->task_func = isci_tmf->tmf_code; + task_iu->task_tag = + (ireq->ttype == tmf_task) ? + isci_tmf->io_tag : + SCI_CONTROLLER_INVALID_IO_TAG; +} + +/** + * This method is will fill in the SCU Task Context for any type of SSP request. + * @sci_req: + * @task_context: + * + */ +static void scu_ssp_reqeust_construct_task_context( + struct isci_request *ireq, + struct scu_task_context *task_context) +{ + dma_addr_t dma_addr; + struct isci_remote_device *idev; + struct isci_port *iport; + + idev = ireq->target_device; + iport = idev->owning_port; + + /* Fill in the TC with the its required data */ + task_context->abort = 0; + task_context->priority = 0; + task_context->initiator_request = 1; + task_context->connection_rate = idev->connection_rate; + task_context->protocol_engine_index = ISCI_PEG; + task_context->logical_port_index = iport->physical_port_index; + task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; + task_context->valid = SCU_TASK_CONTEXT_VALID; + task_context->context_type = SCU_TASK_CONTEXT_TYPE; + + task_context->remote_node_index = idev->rnc.remote_node_index; + task_context->command_code = 0; + + task_context->link_layer_control = 0; + task_context->do_not_dma_ssp_good_response = 1; + task_context->strict_ordering = 0; + task_context->control_frame = 0; + task_context->timeout_enable = 0; + task_context->block_guard_enable = 0; + + task_context->address_modifier = 0; + + /* task_context->type.ssp.tag = ireq->io_tag; */ + task_context->task_phase = 0x01; + + ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (iport->physical_port_index << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + ISCI_TAG_TCI(ireq->io_tag)); + + /* + * Copy the physical address for the command buffer to the + * SCU Task Context + */ + dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); + + task_context->command_iu_upper = upper_32_bits(dma_addr); + task_context->command_iu_lower = lower_32_bits(dma_addr); + + /* + * Copy the physical address for the response buffer to the + * SCU Task Context + */ + dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); + + task_context->response_iu_upper = upper_32_bits(dma_addr); + task_context->response_iu_lower = lower_32_bits(dma_addr); +} + +/** + * This method is will fill in the SCU Task Context for a SSP IO request. + * @sci_req: + * + */ +static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, + enum dma_data_direction dir, + u32 len) +{ + struct scu_task_context *task_context = ireq->tc; + + scu_ssp_reqeust_construct_task_context(ireq, task_context); + + task_context->ssp_command_iu_length = + sizeof(struct ssp_cmd_iu) / sizeof(u32); + task_context->type.ssp.frame_type = SSP_COMMAND; + + switch (dir) { + case DMA_FROM_DEVICE: + case DMA_NONE: + default: + task_context->task_type = SCU_TASK_TYPE_IOREAD; + break; + case DMA_TO_DEVICE: + task_context->task_type = SCU_TASK_TYPE_IOWRITE; + break; + } + + task_context->transfer_length_bytes = len; + + if (task_context->transfer_length_bytes > 0) + sci_request_build_sgl(ireq); +} + +/** + * This method will fill in the SCU Task Context for a SSP Task request. The + * following important settings are utilized: -# priority == + * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued + * ahead of other task destined for the same Remote Node. -# task_type == + * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type + * (i.e. non-raw frame) is being utilized to perform task management. -# + * control_frame == 1. This ensures that the proper endianess is set so + * that the bytes are transmitted in the right order for a task frame. + * @sci_req: This parameter specifies the task request object being + * constructed. + * + */ +static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq) +{ + struct scu_task_context *task_context = ireq->tc; + + scu_ssp_reqeust_construct_task_context(ireq, task_context); + + task_context->control_frame = 1; + task_context->priority = SCU_TASK_PRIORITY_HIGH; + task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; + task_context->transfer_length_bytes = 0; + task_context->type.ssp.frame_type = SSP_TASK; + task_context->ssp_command_iu_length = + sizeof(struct ssp_task_iu) / sizeof(u32); +} + +/** + * This method is will fill in the SCU Task Context for any type of SATA + * request. This is called from the various SATA constructors. + * @sci_req: The general IO request object which is to be used in + * constructing the SCU task context. + * @task_context: The buffer pointer for the SCU task context which is being + * constructed. + * + * The general io request construction is complete. The buffer assignment for + * the command buffer is complete. none Revisit task context construction to + * determine what is common for SSP/SMP/STP task context structures. + */ +static void scu_sata_reqeust_construct_task_context( + struct isci_request *ireq, + struct scu_task_context *task_context) +{ + dma_addr_t dma_addr; + struct isci_remote_device *idev; + struct isci_port *iport; + + idev = ireq->target_device; + iport = idev->owning_port; + + /* Fill in the TC with the its required data */ + task_context->abort = 0; + task_context->priority = SCU_TASK_PRIORITY_NORMAL; + task_context->initiator_request = 1; + task_context->connection_rate = idev->connection_rate; + task_context->protocol_engine_index = ISCI_PEG; + task_context->logical_port_index = iport->physical_port_index; + task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; + task_context->valid = SCU_TASK_CONTEXT_VALID; + task_context->context_type = SCU_TASK_CONTEXT_TYPE; + + task_context->remote_node_index = idev->rnc.remote_node_index; + task_context->command_code = 0; + + task_context->link_layer_control = 0; + task_context->do_not_dma_ssp_good_response = 1; + task_context->strict_ordering = 0; + task_context->control_frame = 0; + task_context->timeout_enable = 0; + task_context->block_guard_enable = 0; + + task_context->address_modifier = 0; + task_context->task_phase = 0x01; + + task_context->ssp_command_iu_length = + (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); + + /* Set the first word of the H2D REG FIS */ + task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; + + ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (iport->physical_port_index << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + ISCI_TAG_TCI(ireq->io_tag)); + /* + * Copy the physical address for the command buffer to the SCU Task + * Context. We must offset the command buffer by 4 bytes because the + * first 4 bytes are transfered in the body of the TC. + */ + dma_addr = sci_io_request_get_dma_addr(ireq, + ((char *) &ireq->stp.cmd) + + sizeof(u32)); + + task_context->command_iu_upper = upper_32_bits(dma_addr); + task_context->command_iu_lower = lower_32_bits(dma_addr); + + /* SATA Requests do not have a response buffer */ + task_context->response_iu_upper = 0; + task_context->response_iu_lower = 0; +} + +static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq) +{ + struct scu_task_context *task_context = ireq->tc; + + scu_sata_reqeust_construct_task_context(ireq, task_context); + + task_context->control_frame = 0; + task_context->priority = SCU_TASK_PRIORITY_NORMAL; + task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; + task_context->type.stp.fis_type = FIS_REGH2D; + task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); +} + +static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, + bool copy_rx_frame) +{ + struct isci_stp_request *stp_req = &ireq->stp.req; + + scu_stp_raw_request_construct_task_context(ireq); + + stp_req->status = 0; + stp_req->sgl.offset = 0; + stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; + + if (copy_rx_frame) { + sci_request_build_sgl(ireq); + stp_req->sgl.index = 0; + } else { + /* The user does not want the data copied to the SGL buffer location */ + stp_req->sgl.index = -1; + } + + return SCI_SUCCESS; +} + +/** + * + * @sci_req: This parameter specifies the request to be constructed as an + * optimized request. + * @optimized_task_type: This parameter specifies whether the request is to be + * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A + * value of 1 indicates NCQ. + * + * This method will perform request construction common to all types of STP + * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method + * returns an indication as to whether the construction was successful. + */ +static void sci_stp_optimized_request_construct(struct isci_request *ireq, + u8 optimized_task_type, + u32 len, + enum dma_data_direction dir) +{ + struct scu_task_context *task_context = ireq->tc; + + /* Build the STP task context structure */ + scu_sata_reqeust_construct_task_context(ireq, task_context); + + /* Copy over the SGL elements */ + sci_request_build_sgl(ireq); + + /* Copy over the number of bytes to be transfered */ + task_context->transfer_length_bytes = len; + + if (dir == DMA_TO_DEVICE) { + /* + * The difference between the DMA IN and DMA OUT request task type + * values are consistent with the difference between FPDMA READ + * and FPDMA WRITE values. Add the supplied task type parameter + * to this difference to set the task type properly for this + * DATA OUT (WRITE) case. */ + task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT + - SCU_TASK_TYPE_DMA_IN); + } else { + /* + * For the DATA IN (READ) case, simply save the supplied + * optimized task type. */ + task_context->task_type = optimized_task_type; + } +} + + + +static enum sci_status +sci_io_request_construct_sata(struct isci_request *ireq, + u32 len, + enum dma_data_direction dir, + bool copy) +{ + enum sci_status status = SCI_SUCCESS; + struct sas_task *task = isci_request_access_task(ireq); + + /* check for management protocols */ + if (ireq->ttype == tmf_task) { + struct isci_tmf *tmf = isci_request_access_tmf(ireq); + + if (tmf->tmf_code == isci_tmf_sata_srst_high || + tmf->tmf_code == isci_tmf_sata_srst_low) { + scu_stp_raw_request_construct_task_context(ireq); + return SCI_SUCCESS; + } else { + dev_err(&ireq->owning_controller->pdev->dev, + "%s: Request 0x%p received un-handled SAT " + "management protocol 0x%x.\n", + __func__, ireq, tmf->tmf_code); + + return SCI_FAILURE; + } + } + + if (!sas_protocol_ata(task->task_proto)) { + dev_err(&ireq->owning_controller->pdev->dev, + "%s: Non-ATA protocol in SATA path: 0x%x\n", + __func__, + task->task_proto); + return SCI_FAILURE; + + } + + /* non data */ + if (task->data_dir == DMA_NONE) { + scu_stp_raw_request_construct_task_context(ireq); + return SCI_SUCCESS; + } + + /* NCQ */ + if (task->ata_task.use_ncq) { + sci_stp_optimized_request_construct(ireq, + SCU_TASK_TYPE_FPDMAQ_READ, + len, dir); + return SCI_SUCCESS; + } + + /* DMA */ + if (task->ata_task.dma_xfer) { + sci_stp_optimized_request_construct(ireq, + SCU_TASK_TYPE_DMA_IN, + len, dir); + return SCI_SUCCESS; + } else /* PIO */ + return sci_stp_pio_request_construct(ireq, copy); + + return status; +} + +static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) +{ + struct sas_task *task = isci_request_access_task(ireq); + + ireq->protocol = SCIC_SSP_PROTOCOL; + + scu_ssp_io_request_construct_task_context(ireq, + task->data_dir, + task->total_xfer_len); + + sci_io_request_build_ssp_command_iu(ireq); + + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); + + return SCI_SUCCESS; +} + +enum sci_status sci_task_request_construct_ssp( + struct isci_request *ireq) +{ + /* Construct the SSP Task SCU Task Context */ + scu_ssp_task_request_construct_task_context(ireq); + + /* Fill in the SSP Task IU */ + sci_task_request_build_ssp_task_iu(ireq); + + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); + + return SCI_SUCCESS; +} + +static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) +{ + enum sci_status status; + bool copy = false; + struct sas_task *task = isci_request_access_task(ireq); + + ireq->protocol = SCIC_STP_PROTOCOL; + + copy = (task->data_dir == DMA_NONE) ? false : true; + + status = sci_io_request_construct_sata(ireq, + task->total_xfer_len, + task->data_dir, + copy); + + if (status == SCI_SUCCESS) + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); + + return status; +} + +enum sci_status sci_task_request_construct_sata(struct isci_request *ireq) +{ + enum sci_status status = SCI_SUCCESS; + + /* check for management protocols */ + if (ireq->ttype == tmf_task) { + struct isci_tmf *tmf = isci_request_access_tmf(ireq); + + if (tmf->tmf_code == isci_tmf_sata_srst_high || + tmf->tmf_code == isci_tmf_sata_srst_low) { + scu_stp_raw_request_construct_task_context(ireq); + } else { + dev_err(&ireq->owning_controller->pdev->dev, + "%s: Request 0x%p received un-handled SAT " + "Protocol 0x%x.\n", + __func__, ireq, tmf->tmf_code); + + return SCI_FAILURE; + } + } + + if (status != SCI_SUCCESS) + return status; + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); + + return status; +} + +/** + * sci_req_tx_bytes - bytes transferred when reply underruns request + * @sci_req: request that was terminated early + */ +#define SCU_TASK_CONTEXT_SRAM 0x200000 +static u32 sci_req_tx_bytes(struct isci_request *ireq) +{ + struct isci_host *ihost = ireq->owning_controller; + u32 ret_val = 0; + + if (readl(&ihost->smu_registers->address_modifier) == 0) { + void __iomem *scu_reg_base = ihost->scu_registers; + + /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where + * BAR1 is the scu_registers + * 0x20002C = 0x200000 + 0x2c + * = start of task context SRAM + offset of (type.ssp.data_offset) + * TCi is the io_tag of struct sci_request + */ + ret_val = readl(scu_reg_base + + (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + + ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag))); + } + + return ret_val; +} + +enum sci_status sci_request_start(struct isci_request *ireq) +{ + enum sci_base_request_states state; + struct scu_task_context *tc = ireq->tc; + struct isci_host *ihost = ireq->owning_controller; + + state = ireq->sm.current_state_id; + if (state != SCI_REQ_CONSTRUCTED) { + dev_warn(&ihost->pdev->dev, + "%s: SCIC IO Request requested to start while in wrong " + "state %d\n", __func__, state); + return SCI_FAILURE_INVALID_STATE; + } + + tc->task_index = ISCI_TAG_TCI(ireq->io_tag); + + switch (tc->protocol_type) { + case SCU_TASK_CONTEXT_PROTOCOL_SMP: + case SCU_TASK_CONTEXT_PROTOCOL_SSP: + /* SSP/SMP Frame */ + tc->type.ssp.tag = ireq->io_tag; + tc->type.ssp.target_port_transfer_tag = 0xFFFF; + break; + + case SCU_TASK_CONTEXT_PROTOCOL_STP: + /* STP/SATA Frame + * tc->type.stp.ncq_tag = ireq->ncq_tag; + */ + break; + + case SCU_TASK_CONTEXT_PROTOCOL_NONE: + /* / @todo When do we set no protocol type? */ + break; + + default: + /* This should never happen since we build the IO + * requests */ + break; + } + + /* Add to the post_context the io tag value */ + ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag); + + /* Everything is good go ahead and change state */ + sci_change_state(&ireq->sm, SCI_REQ_STARTED); + + return SCI_SUCCESS; +} + +enum sci_status +sci_io_request_terminate(struct isci_request *ireq) +{ + enum sci_base_request_states state; + + state = ireq->sm.current_state_id; + + switch (state) { + case SCI_REQ_CONSTRUCTED: + ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; + ireq->sci_status = SCI_FAILURE_IO_TERMINATED; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + return SCI_SUCCESS; + case SCI_REQ_STARTED: + case SCI_REQ_TASK_WAIT_TC_COMP: + case SCI_REQ_SMP_WAIT_RESP: + case SCI_REQ_SMP_WAIT_TC_COMP: + case SCI_REQ_STP_UDMA_WAIT_TC_COMP: + case SCI_REQ_STP_UDMA_WAIT_D2H: + case SCI_REQ_STP_NON_DATA_WAIT_H2D: + case SCI_REQ_STP_NON_DATA_WAIT_D2H: + case SCI_REQ_STP_PIO_WAIT_H2D: + case SCI_REQ_STP_PIO_WAIT_FRAME: + case SCI_REQ_STP_PIO_DATA_IN: + case SCI_REQ_STP_PIO_DATA_OUT: + case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED: + case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG: + case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: + sci_change_state(&ireq->sm, SCI_REQ_ABORTING); + return SCI_SUCCESS; + case SCI_REQ_TASK_WAIT_TC_RESP: + sci_change_state(&ireq->sm, SCI_REQ_ABORTING); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + return SCI_SUCCESS; + case SCI_REQ_ABORTING: + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + return SCI_SUCCESS; + case SCI_REQ_COMPLETED: + default: + dev_warn(&ireq->owning_controller->pdev->dev, + "%s: SCIC IO Request requested to abort while in wrong " + "state %d\n", + __func__, + ireq->sm.current_state_id); + break; + } + + return SCI_FAILURE_INVALID_STATE; +} + +enum sci_status sci_request_complete(struct isci_request *ireq) +{ + enum sci_base_request_states state; + struct isci_host *ihost = ireq->owning_controller; + + state = ireq->sm.current_state_id; + if (WARN_ONCE(state != SCI_REQ_COMPLETED, + "isci: request completion from wrong state (%d)\n", state)) + return SCI_FAILURE_INVALID_STATE; + + if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) + sci_controller_release_frame(ihost, + ireq->saved_rx_frame_index); + + /* XXX can we just stop the machine and remove the 'final' state? */ + sci_change_state(&ireq->sm, SCI_REQ_FINAL); + return SCI_SUCCESS; +} + +enum sci_status sci_io_request_event_handler(struct isci_request *ireq, + u32 event_code) +{ + enum sci_base_request_states state; + struct isci_host *ihost = ireq->owning_controller; + + state = ireq->sm.current_state_id; + + if (state != SCI_REQ_STP_PIO_DATA_IN) { + dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n", + __func__, event_code, state); + + return SCI_FAILURE_INVALID_STATE; + } + + switch (scu_get_event_specifier(event_code)) { + case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: + /* We are waiting for data and the SCU has R_ERR the data frame. + * Go back to waiting for the D2H Register FIS + */ + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + return SCI_SUCCESS; + default: + dev_err(&ihost->pdev->dev, + "%s: pio request unexpected event %#x\n", + __func__, event_code); + + /* TODO Should we fail the PIO request when we get an + * unexpected event? + */ + return SCI_FAILURE; + } +} + +/* + * This function copies response data for requests returning response data + * instead of sense data. + * @sci_req: This parameter specifies the request object for which to copy + * the response data. + */ +static void sci_io_request_copy_response(struct isci_request *ireq) +{ + void *resp_buf; + u32 len; + struct ssp_response_iu *ssp_response; + struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); + + ssp_response = &ireq->ssp.rsp; + + resp_buf = &isci_tmf->resp.resp_iu; + + len = min_t(u32, + SSP_RESP_IU_MAX_SIZE, + be32_to_cpu(ssp_response->response_data_len)); + + memcpy(resp_buf, ssp_response->resp_data, len); +} + +static enum sci_status +request_started_state_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + struct ssp_response_iu *resp_iu; + u8 datapres; + + /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 + * to determine SDMA status + */ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + break; + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { + /* There are times when the SCU hardware will return an early + * response because the io request specified more data than is + * returned by the target device (mode pages, inquiry data, + * etc.). We must check the response stats to see if this is + * truly a failed request or a good request that just got + * completed early. + */ + struct ssp_response_iu *resp = &ireq->ssp.rsp; + ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); + + sci_swab32_cpy(&ireq->ssp.rsp, + &ireq->ssp.rsp, + word_cnt); + + if (resp->status == 0) { + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; + } else { + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + } + break; + } + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { + ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); + + sci_swab32_cpy(&ireq->ssp.rsp, + &ireq->ssp.rsp, + word_cnt); + + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + break; + } + + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): + /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame + * guaranteed to be received before this completion status is + * posted? + */ + resp_iu = &ireq->ssp.rsp; + datapres = resp_iu->datapres; + + if (datapres == 1 || datapres == 2) { + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + } else { + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + } + break; + /* only stp device gets suspended. */ + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): + if (ireq->protocol == SCIC_STP_PROTOCOL) { + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT; + ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; + } else { + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT; + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + } + break; + + /* both stp/ssp device gets suspended */ + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT; + ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; + break; + + /* neither ssp nor stp gets suspended. */ + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): + default: + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> + SCU_COMPLETION_TL_STATUS_SHIFT; + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + break; + } + + /* + * TODO: This is probably wrong for ACK/NAK timeout conditions + */ + + /* In all cases we will treat this as the completion of the IO req. */ + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + return SCI_SUCCESS; +} + +static enum sci_status +request_aborting_state_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): + case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): + ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; + ireq->sci_status = SCI_FAILURE_IO_TERMINATED; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + + default: + /* Unless we get some strange error wait for the task abort to complete + * TODO: Should there be a state change for this completion? + */ + break; + } + + return SCI_SUCCESS; +} + +static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); + break; + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): + /* Currently, the decision is to simply allow the task request + * to timeout if the task IU wasn't received successfully. + * There is a potential for receiving multiple task responses if + * we decide to send the task IU again. + */ + dev_warn(&ireq->owning_controller->pdev->dev, + "%s: TaskRequest:0x%p CompletionCode:%x - " + "ACK/NAK timeout\n", __func__, ireq, + completion_code); + + sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); + break; + default: + /* + * All other completion status cause the IO to be complete. + * If a NAK was received, then it is up to the user to retry + * the request. + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +static enum sci_status +smp_request_await_response_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + /* In the AWAIT RESPONSE state, any TC completion is + * unexpected. but if the TC has success status, we + * complete the IO anyway. + */ + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): + /* These status has been seen in a specific LSI + * expander, which sometimes is not able to send smp + * response within 2 ms. This causes our hardware break + * the connection and set TC completion with one of + * these SMP_XXX_XX_ERR status. For these type of error, + * we ask ihost user to retry the request. + */ + ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR; + ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + default: + /* All other completion status cause the IO to be complete. If a NAK + * was received, then it is up to the user to retry the request + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +static enum sci_status +smp_request_await_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + default: + /* All other completion status cause the IO to be + * complete. If a NAK was received, then it is up to + * the user to retry the request. + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) +{ + struct scu_sgl_element *sgl; + struct scu_sgl_element_pair *sgl_pair; + struct isci_request *ireq = to_ireq(stp_req); + struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; + + sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); + if (!sgl_pair) + sgl = NULL; + else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { + if (sgl_pair->B.address_lower == 0 && + sgl_pair->B.address_upper == 0) { + sgl = NULL; + } else { + pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B; + sgl = &sgl_pair->B; + } + } else { + if (sgl_pair->next_pair_lower == 0 && + sgl_pair->next_pair_upper == 0) { + sgl = NULL; + } else { + pio_sgl->index++; + pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; + sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); + sgl = &sgl_pair->A; + } + } + + return sgl; +} + +static enum sci_status +stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); + break; + + default: + /* All other completion status cause the IO to be + * complete. If a NAK was received, then it is up to + * the user to retry the request. + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ + +/* transmit DATA_FIS from (current sgl + offset) for input + * parameter length. current sgl and offset is alreay stored in the IO request + */ +static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( + struct isci_request *ireq, + u32 length) +{ + struct isci_stp_request *stp_req = &ireq->stp.req; + struct scu_task_context *task_context = ireq->tc; + struct scu_sgl_element_pair *sgl_pair; + struct scu_sgl_element *current_sgl; + + /* Recycle the TC and reconstruct it for sending out DATA FIS containing + * for the data from current_sgl+offset for the input length + */ + sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); + if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) + current_sgl = &sgl_pair->A; + else + current_sgl = &sgl_pair->B; + + /* update the TC */ + task_context->command_iu_upper = current_sgl->address_upper; + task_context->command_iu_lower = current_sgl->address_lower; + task_context->transfer_length_bytes = length; + task_context->type.stp.fis_type = FIS_DATA; + + /* send the new TC out. */ + return sci_controller_continue_io(ireq); +} + +static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) +{ + struct isci_stp_request *stp_req = &ireq->stp.req; + struct scu_sgl_element_pair *sgl_pair; + struct scu_sgl_element *sgl; + enum sci_status status; + u32 offset; + u32 len = 0; + + offset = stp_req->sgl.offset; + sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); + if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) + return SCI_FAILURE; + + if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) { + sgl = &sgl_pair->A; + len = sgl_pair->A.length - offset; + } else { + sgl = &sgl_pair->B; + len = sgl_pair->B.length - offset; + } + + if (stp_req->pio_len == 0) + return SCI_SUCCESS; + + if (stp_req->pio_len >= len) { + status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); + if (status != SCI_SUCCESS) + return status; + stp_req->pio_len -= len; + + /* update the current sgl, offset and save for future */ + sgl = pio_sgl_next(stp_req); + offset = 0; + } else if (stp_req->pio_len < len) { + sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); + + /* Sgl offset will be adjusted and saved for future */ + offset += stp_req->pio_len; + sgl->address_lower += stp_req->pio_len; + stp_req->pio_len = 0; + } + + stp_req->sgl.offset = offset; + + return status; +} + +/** + * + * @stp_request: The request that is used for the SGL processing. + * @data_buffer: The buffer of data to be copied. + * @length: The length of the data transfer. + * + * Copy the data from the buffer for the length specified to the IO reqeust SGL + * specified data region. enum sci_status + */ +static enum sci_status +sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, + u8 *data_buf, u32 len) +{ + struct isci_request *ireq; + u8 *src_addr; + int copy_len; + struct sas_task *task; + struct scatterlist *sg; + void *kaddr; + int total_len = len; + + ireq = to_ireq(stp_req); + task = isci_request_access_task(ireq); + src_addr = data_buf; + + if (task->num_scatter > 0) { + sg = task->scatter; + + while (total_len > 0) { + struct page *page = sg_page(sg); + + copy_len = min_t(int, total_len, sg_dma_len(sg)); + kaddr = kmap_atomic(page, KM_IRQ0); + memcpy(kaddr + sg->offset, src_addr, copy_len); + kunmap_atomic(kaddr, KM_IRQ0); + total_len -= copy_len; + src_addr += copy_len; + sg = sg_next(sg); + } + } else { + BUG_ON(task->total_xfer_len < total_len); + memcpy(task->scatter, src_addr, total_len); + } + + return SCI_SUCCESS; +} + +/** + * + * @sci_req: The PIO DATA IN request that is to receive the data. + * @data_buffer: The buffer to copy from. + * + * Copy the data buffer to the io request data region. enum sci_status + */ +static enum sci_status sci_stp_request_pio_data_in_copy_data( + struct isci_stp_request *stp_req, + u8 *data_buffer) +{ + enum sci_status status; + + /* + * If there is less than 1K remaining in the transfer request + * copy just the data for the transfer */ + if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { + status = sci_stp_request_pio_data_in_copy_data_buffer( + stp_req, data_buffer, stp_req->pio_len); + + if (status == SCI_SUCCESS) + stp_req->pio_len = 0; + } else { + /* We are transfering the whole frame so copy */ + status = sci_stp_request_pio_data_in_copy_data_buffer( + stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); + + if (status == SCI_SUCCESS) + stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE; + } + + return status; +} + +static enum sci_status +stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + enum sci_status status = SCI_SUCCESS; + + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + break; + + default: + /* All other completion status cause the IO to be + * complete. If a NAK was received, then it is up to + * the user to retry the request. + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return status; +} + +static enum sci_status +pio_data_out_tx_done_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + enum sci_status status = SCI_SUCCESS; + bool all_frames_transferred = false; + struct isci_stp_request *stp_req = &ireq->stp.req; + + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + /* Transmit data */ + if (stp_req->pio_len != 0) { + status = sci_stp_request_pio_data_out_transmit_data(ireq); + if (status == SCI_SUCCESS) { + if (stp_req->pio_len == 0) + all_frames_transferred = true; + } + } else if (stp_req->pio_len == 0) { + /* + * this will happen if the all data is written at the + * first time after the pio setup fis is received + */ + all_frames_transferred = true; + } + + /* all data transferred. */ + if (all_frames_transferred) { + /* + * Change the state to SCI_REQ_STP_PIO_DATA_IN + * and wait for PIO_SETUP fis / or D2H REg fis. */ + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + } + break; + + default: + /* + * All other completion status cause the IO to be complete. + * If a NAK was received, then it is up to the user to retry + * the request. + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return status; +} + +static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, + u32 frame_index) +{ + struct isci_host *ihost = ireq->owning_controller; + struct dev_to_host_fis *frame_header; + enum sci_status status; + u32 *frame_buffer; + + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_header); + + if ((status == SCI_SUCCESS) && + (frame_header->fis_type == FIS_REGD2H)) { + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&frame_buffer); + + sci_controller_copy_sata_response(&ireq->stp.rsp, + frame_header, + frame_buffer); + } + + sci_controller_release_frame(ihost, frame_index); + + return status; +} + +enum sci_status +sci_io_request_frame_handler(struct isci_request *ireq, + u32 frame_index) +{ + struct isci_host *ihost = ireq->owning_controller; + struct isci_stp_request *stp_req = &ireq->stp.req; + enum sci_base_request_states state; + enum sci_status status; + ssize_t word_cnt; + + state = ireq->sm.current_state_id; + switch (state) { + case SCI_REQ_STARTED: { + struct ssp_frame_hdr ssp_hdr; + void *frame_header; + + sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + &frame_header); + + word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); + sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); + + if (ssp_hdr.frame_type == SSP_RESPONSE) { + struct ssp_response_iu *resp_iu; + ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); + + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&resp_iu); + + sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt); + + resp_iu = &ireq->ssp.rsp; + + if (resp_iu->datapres == 0x01 || + resp_iu->datapres == 0x02) { + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + } else { + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + } + } else { + /* not a response frame, why did it get forwarded? */ + dev_err(&ihost->pdev->dev, + "%s: SCIC IO Request 0x%p received unexpected " + "frame %d type 0x%02x\n", __func__, ireq, + frame_index, ssp_hdr.frame_type); + } + + /* + * In any case we are done with this frame buffer return it to + * the controller + */ + sci_controller_release_frame(ihost, frame_index); + + return SCI_SUCCESS; + } + + case SCI_REQ_TASK_WAIT_TC_RESP: + sci_io_request_copy_response(ireq); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + sci_controller_release_frame(ihost, frame_index); + return SCI_SUCCESS; + + case SCI_REQ_SMP_WAIT_RESP: { + struct smp_resp *rsp_hdr = &ireq->smp.rsp; + void *frame_header; + + sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + &frame_header); + + /* byte swap the header. */ + word_cnt = SMP_RESP_HDR_SZ / sizeof(u32); + sci_swab32_cpy(rsp_hdr, frame_header, word_cnt); + + if (rsp_hdr->frame_type == SMP_RESPONSE) { + void *smp_resp; + + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + &smp_resp); + + word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) / + sizeof(u32); + + sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, + smp_resp, word_cnt); + + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); + } else { + /* + * This was not a response frame why did it get + * forwarded? + */ + dev_err(&ihost->pdev->dev, + "%s: SCIC SMP Request 0x%p received unexpected " + "frame %d type 0x%02x\n", + __func__, + ireq, + frame_index, + rsp_hdr->frame_type); + + ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + } + + sci_controller_release_frame(ihost, frame_index); + + return SCI_SUCCESS; + } + + case SCI_REQ_STP_UDMA_WAIT_TC_COMP: + return sci_stp_request_udma_general_frame_handler(ireq, + frame_index); + + case SCI_REQ_STP_UDMA_WAIT_D2H: + /* Use the general frame handler to copy the resposne data */ + status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); + + if (status != SCI_SUCCESS) + return status; + + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + return SCI_SUCCESS; + + case SCI_REQ_STP_NON_DATA_WAIT_D2H: { + struct dev_to_host_fis *frame_header; + u32 *frame_buffer; + + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) { + dev_err(&ihost->pdev->dev, + "%s: SCIC IO Request 0x%p could not get frame " + "header for frame index %d, status %x\n", + __func__, + stp_req, + frame_index, + status); + + return status; + } + + switch (frame_header->fis_type) { + case FIS_REGD2H: + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&frame_buffer); + + sci_controller_copy_sata_response(&ireq->stp.rsp, + frame_header, + frame_buffer); + + /* The command has completed with error */ + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + break; + + default: + dev_warn(&ihost->pdev->dev, + "%s: IO Request:0x%p Frame Id:%d protocol " + "violation occurred\n", __func__, stp_req, + frame_index); + + ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; + ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; + break; + } + + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + + /* Frame has been decoded return it to the controller */ + sci_controller_release_frame(ihost, frame_index); + + return status; + } + + case SCI_REQ_STP_PIO_WAIT_FRAME: { + struct sas_task *task = isci_request_access_task(ireq); + struct dev_to_host_fis *frame_header; + u32 *frame_buffer; + + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) { + dev_err(&ihost->pdev->dev, + "%s: SCIC IO Request 0x%p could not get frame " + "header for frame index %d, status %x\n", + __func__, stp_req, frame_index, status); + return status; + } + + switch (frame_header->fis_type) { + case FIS_PIO_SETUP: + /* Get from the frame buffer the PIO Setup Data */ + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&frame_buffer); + + /* Get the data from the PIO Setup The SCU Hardware + * returns first word in the frame_header and the rest + * of the data is in the frame buffer so we need to + * back up one dword + */ + + /* transfer_count: first 16bits in the 4th dword */ + stp_req->pio_len = frame_buffer[3] & 0xffff; + + /* status: 4th byte in the 3rd dword */ + stp_req->status = (frame_buffer[2] >> 24) & 0xff; + + sci_controller_copy_sata_response(&ireq->stp.rsp, + frame_header, + frame_buffer); + + ireq->stp.rsp.status = stp_req->status; + + /* The next state is dependent on whether the + * request was PIO Data-in or Data out + */ + if (task->data_dir == DMA_FROM_DEVICE) { + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); + } else if (task->data_dir == DMA_TO_DEVICE) { + /* Transmit data */ + status = sci_stp_request_pio_data_out_transmit_data(ireq); + if (status != SCI_SUCCESS) + break; + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); + } + break; + + case FIS_SETDEVBITS: + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + break; + + case FIS_REGD2H: + if (frame_header->status & ATA_BUSY) { + /* + * Now why is the drive sending a D2H Register + * FIS when it is still busy? Do nothing since + * we are still in the right state. + */ + dev_dbg(&ihost->pdev->dev, + "%s: SCIC PIO Request 0x%p received " + "D2H Register FIS with BSY status " + "0x%x\n", + __func__, + stp_req, + frame_header->status); + break; + } + + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&frame_buffer); + + sci_controller_copy_sata_response(&ireq->stp.req, + frame_header, + frame_buffer); + + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + + default: + /* FIXME: what do we do here? */ + break; + } + + /* Frame is decoded return it to the controller */ + sci_controller_release_frame(ihost, frame_index); + + return status; + } + + case SCI_REQ_STP_PIO_DATA_IN: { + struct dev_to_host_fis *frame_header; + struct sata_fis_data *frame_buffer; + + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) { + dev_err(&ihost->pdev->dev, + "%s: SCIC IO Request 0x%p could not get frame " + "header for frame index %d, status %x\n", + __func__, + stp_req, + frame_index, + status); + return status; + } + + if (frame_header->fis_type != FIS_DATA) { + dev_err(&ihost->pdev->dev, + "%s: SCIC PIO Request 0x%p received frame %d " + "with fis type 0x%02x when expecting a data " + "fis.\n", + __func__, + stp_req, + frame_index, + frame_header->fis_type); + + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + + /* Frame is decoded return it to the controller */ + sci_controller_release_frame(ihost, frame_index); + return status; + } + + if (stp_req->sgl.index < 0) { + ireq->saved_rx_frame_index = frame_index; + stp_req->pio_len = 0; + } else { + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&frame_buffer); + + status = sci_stp_request_pio_data_in_copy_data(stp_req, + (u8 *)frame_buffer); + + /* Frame is decoded return it to the controller */ + sci_controller_release_frame(ihost, frame_index); + } + + /* Check for the end of the transfer, are there more + * bytes remaining for this data transfer + */ + if (status != SCI_SUCCESS || stp_req->pio_len != 0) + return status; + + if ((stp_req->status & ATA_BUSY) == 0) { + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + } else { + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + } + return status; + } + + case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: { + struct dev_to_host_fis *frame_header; + u32 *frame_buffer; + + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_header); + if (status != SCI_SUCCESS) { + dev_err(&ihost->pdev->dev, + "%s: SCIC IO Request 0x%p could not get frame " + "header for frame index %d, status %x\n", + __func__, + stp_req, + frame_index, + status); + return status; + } + + switch (frame_header->fis_type) { + case FIS_REGD2H: + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&frame_buffer); + + sci_controller_copy_sata_response(&ireq->stp.rsp, + frame_header, + frame_buffer); + + /* The command has completed with error */ + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + break; + + default: + dev_warn(&ihost->pdev->dev, + "%s: IO Request:0x%p Frame Id:%d protocol " + "violation occurred\n", + __func__, + stp_req, + frame_index); + + ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; + ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; + break; + } + + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + + /* Frame has been decoded return it to the controller */ + sci_controller_release_frame(ihost, frame_index); + + return status; + } + case SCI_REQ_ABORTING: + /* + * TODO: Is it even possible to get an unsolicited frame in the + * aborting state? + */ + sci_controller_release_frame(ihost, frame_index); + return SCI_SUCCESS; + + default: + dev_warn(&ihost->pdev->dev, + "%s: SCIC IO Request given unexpected frame %x while " + "in state %d\n", + __func__, + frame_index, + state); + + sci_controller_release_frame(ihost, frame_index); + return SCI_FAILURE_INVALID_STATE; + } +} + +static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + enum sci_status status = SCI_SUCCESS; + + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): + /* We must check ther response buffer to see if the D2H + * Register FIS was received before we got the TC + * completion. + */ + if (ireq->stp.rsp.fis_type == FIS_REGD2H) { + sci_remote_device_suspend(ireq->target_device, + SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); + + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + } else { + /* If we have an error completion status for the + * TC then we can expect a D2H register FIS from + * the device so we must change state to wait + * for it + */ + sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H); + } + break; + + /* TODO Check to see if any of these completion status need to + * wait for the device to host register fis. + */ + /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR + * - this comes only for B0 + */ + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): + sci_remote_device_suspend(ireq->target_device, + SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); + /* Fall through to the default case */ + default: + /* All other completion status cause the IO to be complete. */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return status; +} + +static enum sci_status +stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); + break; + + default: + /* + * All other completion status cause the IO to be complete. + * If a NAK was received, then it is up to the user to retry + * the request. + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +static enum sci_status +stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, + u32 completion_code) +{ + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); + break; + + default: + /* All other completion status cause the IO to be complete. If + * a NAK was received, then it is up to the user to retry the + * request. + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return SCI_SUCCESS; +} + +enum sci_status +sci_io_request_tc_completion(struct isci_request *ireq, + u32 completion_code) +{ + enum sci_base_request_states state; + struct isci_host *ihost = ireq->owning_controller; + + state = ireq->sm.current_state_id; + + switch (state) { + case SCI_REQ_STARTED: + return request_started_state_tc_event(ireq, completion_code); + + case SCI_REQ_TASK_WAIT_TC_COMP: + return ssp_task_request_await_tc_event(ireq, + completion_code); + + case SCI_REQ_SMP_WAIT_RESP: + return smp_request_await_response_tc_event(ireq, + completion_code); + + case SCI_REQ_SMP_WAIT_TC_COMP: + return smp_request_await_tc_event(ireq, completion_code); + + case SCI_REQ_STP_UDMA_WAIT_TC_COMP: + return stp_request_udma_await_tc_event(ireq, + completion_code); + + case SCI_REQ_STP_NON_DATA_WAIT_H2D: + return stp_request_non_data_await_h2d_tc_event(ireq, + completion_code); + + case SCI_REQ_STP_PIO_WAIT_H2D: + return stp_request_pio_await_h2d_completion_tc_event(ireq, + completion_code); + + case SCI_REQ_STP_PIO_DATA_OUT: + return pio_data_out_tx_done_tc_event(ireq, completion_code); + + case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED: + return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq, + completion_code); + + case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG: + return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq, + completion_code); + + case SCI_REQ_ABORTING: + return request_aborting_state_tc_event(ireq, + completion_code); + + default: + dev_warn(&ihost->pdev->dev, + "%s: SCIC IO Request given task completion " + "notification %x while in wrong state %d\n", + __func__, + completion_code, + state); + return SCI_FAILURE_INVALID_STATE; + } +} + +/** + * isci_request_process_response_iu() - This function sets the status and + * response iu, in the task struct, from the request object for the upper + * layer driver. + * @sas_task: This parameter is the task struct from the upper layer driver. + * @resp_iu: This parameter points to the response iu of the completed request. + * @dev: This parameter specifies the linux device struct. + * + * none. + */ +static void isci_request_process_response_iu( + struct sas_task *task, + struct ssp_response_iu *resp_iu, + struct device *dev) +{ + dev_dbg(dev, + "%s: resp_iu = %p " + "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " + "resp_iu->response_data_len = %x, " + "resp_iu->sense_data_len = %x\nrepsonse data: ", + __func__, + resp_iu, + resp_iu->status, + resp_iu->datapres, + resp_iu->response_data_len, + resp_iu->sense_data_len); + + task->task_status.stat = resp_iu->status; + + /* libsas updates the task status fields based on the response iu. */ + sas_ssp_task_response(dev, task, resp_iu); +} + +/** + * isci_request_set_open_reject_status() - This function prepares the I/O + * completion for OPEN_REJECT conditions. + * @request: This parameter is the completed isci_request object. + * @response_ptr: This parameter specifies the service response for the I/O. + * @status_ptr: This parameter specifies the exec status for the I/O. + * @complete_to_host_ptr: This parameter specifies the action to be taken by + * the LLDD with respect to completing this request or forcing an abort + * condition on the I/O. + * @open_rej_reason: This parameter specifies the encoded reason for the + * abandon-class reject. + * + * none. + */ +static void isci_request_set_open_reject_status( + struct isci_request *request, + struct sas_task *task, + enum service_response *response_ptr, + enum exec_status *status_ptr, + enum isci_completion_selection *complete_to_host_ptr, + enum sas_open_rej_reason open_rej_reason) +{ + /* Task in the target is done. */ + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + *response_ptr = SAS_TASK_UNDELIVERED; + *status_ptr = SAS_OPEN_REJECT; + *complete_to_host_ptr = isci_perform_normal_io_completion; + task->task_status.open_rej_reason = open_rej_reason; +} + +/** + * isci_request_handle_controller_specific_errors() - This function decodes + * controller-specific I/O completion error conditions. + * @request: This parameter is the completed isci_request object. + * @response_ptr: This parameter specifies the service response for the I/O. + * @status_ptr: This parameter specifies the exec status for the I/O. + * @complete_to_host_ptr: This parameter specifies the action to be taken by + * the LLDD with respect to completing this request or forcing an abort + * condition on the I/O. + * + * none. + */ +static void isci_request_handle_controller_specific_errors( + struct isci_remote_device *idev, + struct isci_request *request, + struct sas_task *task, + enum service_response *response_ptr, + enum exec_status *status_ptr, + enum isci_completion_selection *complete_to_host_ptr) +{ + unsigned int cstatus; + + cstatus = request->scu_status; + + dev_dbg(&request->isci_host->pdev->dev, + "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " + "- controller status = 0x%x\n", + __func__, request, cstatus); + + /* Decode the controller-specific errors; most + * important is to recognize those conditions in which + * the target may still have a task outstanding that + * must be aborted. + * + * Note that there are SCU completion codes being + * named in the decode below for which SCIC has already + * done work to handle them in a way other than as + * a controller-specific completion code; these are left + * in the decode below for completeness sake. + */ + switch (cstatus) { + case SCU_TASK_DONE_DMASETUP_DIRERR: + /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ + case SCU_TASK_DONE_XFERCNT_ERR: + /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ + if (task->task_proto == SAS_PROTOCOL_SMP) { + /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ + *response_ptr = SAS_TASK_COMPLETE; + + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if (!idev) + *status_ptr = SAS_DEVICE_UNKNOWN; + else + *status_ptr = SAS_ABORTED_TASK; + + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + + *complete_to_host_ptr = + isci_perform_normal_io_completion; + } else { + /* Task in the target is not done. */ + *response_ptr = SAS_TASK_UNDELIVERED; + + if (!idev) + *status_ptr = SAS_DEVICE_UNKNOWN; + else + *status_ptr = SAM_STAT_TASK_ABORTED; + + clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + + *complete_to_host_ptr = + isci_perform_error_io_completion; + } + + break; + + case SCU_TASK_DONE_CRC_ERR: + case SCU_TASK_DONE_NAK_CMD_ERR: + case SCU_TASK_DONE_EXCESS_DATA: + case SCU_TASK_DONE_UNEXP_FIS: + /* Also SCU_TASK_DONE_UNEXP_RESP: */ + case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ + case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ + case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ + /* These are conditions in which the target + * has completed the task, so that no cleanup + * is necessary. + */ + *response_ptr = SAS_TASK_COMPLETE; + + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if (!idev) + *status_ptr = SAS_DEVICE_UNKNOWN; + else + *status_ptr = SAS_ABORTED_TASK; + + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + + *complete_to_host_ptr = isci_perform_normal_io_completion; + break; + + + /* Note that the only open reject completion codes seen here will be + * abandon-class codes; all others are automatically retried in the SCU. + */ + case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_WRONG_DEST); + break; + + case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: + + /* Note - the return of AB0 will change when + * libsas implements detection of zone violations. + */ + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_RESV_AB0); + break; + + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_RESV_AB1); + break; + + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_RESV_AB2); + break; + + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_RESV_AB3); + break; + + case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_BAD_DEST); + break; + + case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_STP_NORES); + break; + + case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_EPROTO); + break; + + case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: + + isci_request_set_open_reject_status( + request, task, response_ptr, status_ptr, + complete_to_host_ptr, SAS_OREJ_CONN_RATE); + break; + + case SCU_TASK_DONE_LL_R_ERR: + /* Also SCU_TASK_DONE_ACK_NAK_TO: */ + case SCU_TASK_DONE_LL_PERR: + case SCU_TASK_DONE_LL_SY_TERM: + /* Also SCU_TASK_DONE_NAK_ERR:*/ + case SCU_TASK_DONE_LL_LF_TERM: + /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ + case SCU_TASK_DONE_LL_ABORT_ERR: + case SCU_TASK_DONE_SEQ_INV_TYPE: + /* Also SCU_TASK_DONE_UNEXP_XR: */ + case SCU_TASK_DONE_XR_IU_LEN_ERR: + case SCU_TASK_DONE_INV_FIS_LEN: + /* Also SCU_TASK_DONE_XR_WD_LEN: */ + case SCU_TASK_DONE_SDMA_ERR: + case SCU_TASK_DONE_OFFSET_ERR: + case SCU_TASK_DONE_MAX_PLD_ERR: + case SCU_TASK_DONE_LF_ERR: + case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ + case SCU_TASK_DONE_SMP_LL_RX_ERR: + case SCU_TASK_DONE_UNEXP_DATA: + case SCU_TASK_DONE_UNEXP_SDBFIS: + case SCU_TASK_DONE_REG_ERR: + case SCU_TASK_DONE_SDB_ERR: + case SCU_TASK_DONE_TASK_ABORT: + default: + /* Task in the target is not done. */ + *response_ptr = SAS_TASK_UNDELIVERED; + *status_ptr = SAM_STAT_TASK_ABORTED; + + if (task->task_proto == SAS_PROTOCOL_SMP) { + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + + *complete_to_host_ptr = isci_perform_normal_io_completion; + } else { + clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + + *complete_to_host_ptr = isci_perform_error_io_completion; + } + break; + } +} + +/** + * isci_task_save_for_upper_layer_completion() - This function saves the + * request for later completion to the upper layer driver. + * @host: This parameter is a pointer to the host on which the the request + * should be queued (either as an error or success). + * @request: This parameter is the completed request. + * @response: This parameter is the response code for the completed task. + * @status: This parameter is the status code for the completed task. + * + * none. + */ +static void isci_task_save_for_upper_layer_completion( + struct isci_host *host, + struct isci_request *request, + enum service_response response, + enum exec_status status, + enum isci_completion_selection task_notification_selection) +{ + struct sas_task *task = isci_request_access_task(request); + + task_notification_selection + = isci_task_set_completion_status(task, response, status, + task_notification_selection); + + /* Tasks aborted specifically by a call to the lldd_abort_task + * function should not be completed to the host in the regular path. + */ + switch (task_notification_selection) { + + case isci_perform_normal_io_completion: + + /* Normal notification (task_done) */ + dev_dbg(&host->pdev->dev, + "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n", + __func__, + task, + task->task_status.resp, response, + task->task_status.stat, status); + /* Add to the completed list. */ + list_add(&request->completed_node, + &host->requests_to_complete); + + /* Take the request off the device's pending request list. */ + list_del_init(&request->dev_node); + break; + + case isci_perform_aborted_io_completion: + /* No notification to libsas because this request is + * already in the abort path. + */ + dev_dbg(&host->pdev->dev, + "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n", + __func__, + task, + task->task_status.resp, response, + task->task_status.stat, status); + + /* Wake up whatever process was waiting for this + * request to complete. + */ + WARN_ON(request->io_request_completion == NULL); + + if (request->io_request_completion != NULL) { + + /* Signal whoever is waiting that this + * request is complete. + */ + complete(request->io_request_completion); + } + break; + + case isci_perform_error_io_completion: + /* Use sas_task_abort */ + dev_dbg(&host->pdev->dev, + "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n", + __func__, + task, + task->task_status.resp, response, + task->task_status.stat, status); + /* Add to the aborted list. */ + list_add(&request->completed_node, + &host->requests_to_errorback); + break; + + default: + dev_dbg(&host->pdev->dev, + "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n", + __func__, + task, + task->task_status.resp, response, + task->task_status.stat, status); + + /* Add to the error to libsas list. */ + list_add(&request->completed_node, + &host->requests_to_errorback); + break; + } +} + +static void isci_request_process_stp_response(struct sas_task *task, + void *response_buffer) +{ + struct dev_to_host_fis *d2h_reg_fis = response_buffer; + struct task_status_struct *ts = &task->task_status; + struct ata_task_resp *resp = (void *)&ts->buf[0]; + + resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6)); + memcpy(&resp->ending_fis[0], response_buffer + 16, 24); + ts->buf_valid_size = sizeof(*resp); + + /** + * If the device fault bit is set in the status register, then + * set the sense data and return. + */ + if (d2h_reg_fis->status & ATA_DF) + ts->stat = SAS_PROTO_RESPONSE; + else + ts->stat = SAM_STAT_GOOD; + + ts->resp = SAS_TASK_COMPLETE; +} + +static void isci_request_io_request_complete(struct isci_host *ihost, + struct isci_request *request, + enum sci_io_status completion_status) +{ + struct sas_task *task = isci_request_access_task(request); + struct ssp_response_iu *resp_iu; + void *resp_buf; + unsigned long task_flags; + struct isci_remote_device *idev = isci_lookup_device(task->dev); + enum service_response response = SAS_TASK_UNDELIVERED; + enum exec_status status = SAS_ABORTED_TASK; + enum isci_request_status request_status; + enum isci_completion_selection complete_to_host + = isci_perform_normal_io_completion; + + dev_dbg(&ihost->pdev->dev, + "%s: request = %p, task = %p,\n" + "task->data_dir = %d completion_status = 0x%x\n", + __func__, + request, + task, + task->data_dir, + completion_status); + + spin_lock(&request->state_lock); + request_status = request->status; + + /* Decode the request status. Note that if the request has been + * aborted by a task management function, we don't care + * what the status is. + */ + switch (request_status) { + + case aborted: + /* "aborted" indicates that the request was aborted by a task + * management function, since once a task management request is + * perfomed by the device, the request only completes because + * of the subsequent driver terminate. + * + * Aborted also means an external thread is explicitly managing + * this request, so that we do not complete it up the stack. + * + * The target is still there (since the TMF was successful). + */ + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + response = SAS_TASK_COMPLETE; + + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if (!idev) + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_ABORTED_TASK; + + complete_to_host = isci_perform_aborted_io_completion; + /* This was an aborted request. */ + + spin_unlock(&request->state_lock); + break; + + case aborting: + /* aborting means that the task management function tried and + * failed to abort the request. We need to note the request + * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the + * target as down. + * + * Aborting also means an external thread is explicitly managing + * this request, so that we do not complete it up the stack. + */ + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + response = SAS_TASK_UNDELIVERED; + + if (!idev) + /* The device has been /is being stopped. Note that + * we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_PHY_DOWN; + + complete_to_host = isci_perform_aborted_io_completion; + + /* This was an aborted request. */ + + spin_unlock(&request->state_lock); + break; + + case terminating: + + /* This was an terminated request. This happens when + * the I/O is being terminated because of an action on + * the device (reset, tear down, etc.), and the I/O needs + * to be completed up the stack. + */ + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + response = SAS_TASK_UNDELIVERED; + + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if (!idev) + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_ABORTED_TASK; + + complete_to_host = isci_perform_aborted_io_completion; + + /* This was a terminated request. */ + + spin_unlock(&request->state_lock); + break; + + case dead: + /* This was a terminated request that timed-out during the + * termination process. There is no task to complete to + * libsas. + */ + complete_to_host = isci_perform_normal_io_completion; + spin_unlock(&request->state_lock); + break; + + default: + + /* The request is done from an SCU HW perspective. */ + request->status = completed; + + spin_unlock(&request->state_lock); + + /* This is an active request being completed from the core. */ + switch (completion_status) { + + case SCI_IO_FAILURE_RESPONSE_VALID: + dev_dbg(&ihost->pdev->dev, + "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", + __func__, + request, + task); + + if (sas_protocol_ata(task->task_proto)) { + resp_buf = &request->stp.rsp; + isci_request_process_stp_response(task, + resp_buf); + } else if (SAS_PROTOCOL_SSP == task->task_proto) { + + /* crack the iu response buffer. */ + resp_iu = &request->ssp.rsp; + isci_request_process_response_iu(task, resp_iu, + &ihost->pdev->dev); + + } else if (SAS_PROTOCOL_SMP == task->task_proto) { + + dev_err(&ihost->pdev->dev, + "%s: SCI_IO_FAILURE_RESPONSE_VALID: " + "SAS_PROTOCOL_SMP protocol\n", + __func__); + + } else + dev_err(&ihost->pdev->dev, + "%s: unknown protocol\n", __func__); + + /* use the task status set in the task struct by the + * isci_request_process_response_iu call. + */ + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + response = task->task_status.resp; + status = task->task_status.stat; + break; + + case SCI_IO_SUCCESS: + case SCI_IO_SUCCESS_IO_DONE_EARLY: + + response = SAS_TASK_COMPLETE; + status = SAM_STAT_GOOD; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + + if (task->task_proto == SAS_PROTOCOL_SMP) { + void *rsp = &request->smp.rsp; + + dev_dbg(&ihost->pdev->dev, + "%s: SMP protocol completion\n", + __func__); + + sg_copy_from_buffer( + &task->smp_task.smp_resp, 1, + rsp, sizeof(struct smp_resp)); + } else if (completion_status + == SCI_IO_SUCCESS_IO_DONE_EARLY) { + + /* This was an SSP / STP / SATA transfer. + * There is a possibility that less data than + * the maximum was transferred. + */ + u32 transferred_length = sci_req_tx_bytes(request); + + task->task_status.residual + = task->total_xfer_len - transferred_length; + + /* If there were residual bytes, call this an + * underrun. + */ + if (task->task_status.residual != 0) + status = SAS_DATA_UNDERRUN; + + dev_dbg(&ihost->pdev->dev, + "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", + __func__, + status); + + } else + dev_dbg(&ihost->pdev->dev, + "%s: SCI_IO_SUCCESS\n", + __func__); + + break; + + case SCI_IO_FAILURE_TERMINATED: + dev_dbg(&ihost->pdev->dev, + "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", + __func__, + request, + task); + + /* The request was terminated explicitly. No handling + * is needed in the SCSI error handler path. + */ + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + response = SAS_TASK_UNDELIVERED; + + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if (!idev) + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_ABORTED_TASK; + + complete_to_host = isci_perform_normal_io_completion; + break; + + case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: + + isci_request_handle_controller_specific_errors( + idev, request, task, &response, &status, + &complete_to_host); + + break; + + case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: + /* This is a special case, in that the I/O completion + * is telling us that the device needs a reset. + * In order for the device reset condition to be + * noticed, the I/O has to be handled in the error + * handler. Set the reset flag and cause the + * SCSI error thread to be scheduled. + */ + spin_lock_irqsave(&task->task_state_lock, task_flags); + task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; + spin_unlock_irqrestore(&task->task_state_lock, task_flags); + + /* Fail the I/O. */ + response = SAS_TASK_UNDELIVERED; + status = SAM_STAT_TASK_ABORTED; + + complete_to_host = isci_perform_error_io_completion; + clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + break; + + case SCI_FAILURE_RETRY_REQUIRED: + + /* Fail the I/O so it can be retried. */ + response = SAS_TASK_UNDELIVERED; + if (!idev) + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_ABORTED_TASK; + + complete_to_host = isci_perform_normal_io_completion; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + break; + + + default: + /* Catch any otherwise unhandled error codes here. */ + dev_dbg(&ihost->pdev->dev, + "%s: invalid completion code: 0x%x - " + "isci_request = %p\n", + __func__, completion_status, request); + + response = SAS_TASK_UNDELIVERED; + + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if (!idev) + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_ABORTED_TASK; + + if (SAS_PROTOCOL_SMP == task->task_proto) { + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + complete_to_host = isci_perform_normal_io_completion; + } else { + clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + complete_to_host = isci_perform_error_io_completion; + } + break; + } + break; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + if (task->data_dir == DMA_NONE) + break; + if (task->num_scatter == 0) + /* 0 indicates a single dma address */ + dma_unmap_single(&ihost->pdev->dev, + request->zero_scatter_daddr, + task->total_xfer_len, task->data_dir); + else /* unmap the sgl dma addresses */ + dma_unmap_sg(&ihost->pdev->dev, task->scatter, + request->num_sg_entries, task->data_dir); + break; + case SAS_PROTOCOL_SMP: { + struct scatterlist *sg = &task->smp_task.smp_req; + struct smp_req *smp_req; + void *kaddr; + + dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); + + /* need to swab it back in case the command buffer is re-used */ + kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); + smp_req = kaddr + sg->offset; + sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); + kunmap_atomic(kaddr, KM_IRQ0); + break; + } + default: + break; + } + + /* Put the completed request on the correct list */ + isci_task_save_for_upper_layer_completion(ihost, request, response, + status, complete_to_host + ); + + /* complete the io request to the core. */ + sci_controller_complete_io(ihost, request->target_device, request); + isci_put_device(idev); + + /* set terminated handle so it cannot be completed or + * terminated again, and to cause any calls into abort + * task to recognize the already completed case. + */ + set_bit(IREQ_TERMINATED, &request->flags); +} + +static void sci_request_started_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); + struct domain_device *dev = ireq->target_device->domain_dev; + struct sas_task *task; + + /* XXX as hch said always creating an internal sas_task for tmf + * requests would simplify the driver + */ + task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL; + + /* all unaccelerated request types (non ssp or ncq) handled with + * substates + */ + if (!task && dev->dev_type == SAS_END_DEV) { + sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP); + } else if (!task && + (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high || + isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) { + sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED); + } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { + sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP); + } else if (task && sas_protocol_ata(task->task_proto) && + !task->ata_task.use_ncq) { + u32 state; + + if (task->data_dir == DMA_NONE) + state = SCI_REQ_STP_NON_DATA_WAIT_H2D; + else if (task->ata_task.dma_xfer) + state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; + else /* PIO */ + state = SCI_REQ_STP_PIO_WAIT_H2D; + + sci_change_state(sm, state); + } +} + +static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); + struct isci_host *ihost = ireq->owning_controller; + + /* Tell the SCI_USER that the IO request is complete */ + if (!test_bit(IREQ_TMF, &ireq->flags)) + isci_request_io_request_complete(ihost, ireq, + ireq->sci_status); + else + isci_task_request_complete(ihost, ireq, ireq->sci_status); +} + +static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) +{ + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); + + /* Setting the abort bit in the Task Context is required by the silicon. */ + ireq->tc->abort = 1; +} + +static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) +{ + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); + + ireq->target_device->working_request = ireq; +} + +static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) +{ + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); + + ireq->target_device->working_request = ireq; +} + +static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) +{ + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); + + ireq->target_device->working_request = ireq; +} + +static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) +{ + struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); + struct scu_task_context *tc = ireq->tc; + struct host_to_dev_fis *h2d_fis; + enum sci_status status; + + /* Clear the SRST bit */ + h2d_fis = &ireq->stp.cmd; + h2d_fis->control = 0; + + /* Clear the TC control bit */ + tc->control_frame = 0; + + status = sci_controller_continue_io(ireq); + WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); +} + +static const struct sci_base_state sci_request_state_table[] = { + [SCI_REQ_INIT] = { }, + [SCI_REQ_CONSTRUCTED] = { }, + [SCI_REQ_STARTED] = { + .enter_state = sci_request_started_state_enter, + }, + [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { + .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, + }, + [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, + [SCI_REQ_STP_PIO_WAIT_H2D] = { + .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, + }, + [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, + [SCI_REQ_STP_PIO_DATA_IN] = { }, + [SCI_REQ_STP_PIO_DATA_OUT] = { }, + [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, + [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, + [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = { + .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, + }, + [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = { + .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, + }, + [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { }, + [SCI_REQ_TASK_WAIT_TC_COMP] = { }, + [SCI_REQ_TASK_WAIT_TC_RESP] = { }, + [SCI_REQ_SMP_WAIT_RESP] = { }, + [SCI_REQ_SMP_WAIT_TC_COMP] = { }, + [SCI_REQ_COMPLETED] = { + .enter_state = sci_request_completed_state_enter, + }, + [SCI_REQ_ABORTING] = { + .enter_state = sci_request_aborting_state_enter, + }, + [SCI_REQ_FINAL] = { }, +}; + +static void +sci_general_request_construct(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); + + ireq->target_device = idev; + ireq->protocol = SCIC_NO_PROTOCOL; + ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; + + ireq->sci_status = SCI_SUCCESS; + ireq->scu_status = 0; + ireq->post_context = 0xFFFFFFFF; +} + +static enum sci_status +sci_io_request_construct(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + struct domain_device *dev = idev->domain_dev; + enum sci_status status = SCI_SUCCESS; + + /* Build the common part of the request */ + sci_general_request_construct(ihost, idev, ireq); + + if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) + return SCI_FAILURE_INVALID_REMOTE_DEVICE; + + if (dev->dev_type == SAS_END_DEV) + /* pass */; + else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) + memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); + else if (dev_is_expander(dev)) + /* pass */; + else + return SCI_FAILURE_UNSUPPORTED_PROTOCOL; + + memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); + + return status; +} + +enum sci_status sci_task_request_construct(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 io_tag, struct isci_request *ireq) +{ + struct domain_device *dev = idev->domain_dev; + enum sci_status status = SCI_SUCCESS; + + /* Build the common part of the request */ + sci_general_request_construct(ihost, idev, ireq); + + if (dev->dev_type == SAS_END_DEV || + dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { + set_bit(IREQ_TMF, &ireq->flags); + memset(ireq->tc, 0, sizeof(struct scu_task_context)); + } else + status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; + + return status; +} + +static enum sci_status isci_request_ssp_request_construct( + struct isci_request *request) +{ + enum sci_status status; + + dev_dbg(&request->isci_host->pdev->dev, + "%s: request = %p\n", + __func__, + request); + status = sci_io_request_construct_basic_ssp(request); + return status; +} + +static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq) +{ + struct sas_task *task = isci_request_access_task(ireq); + struct host_to_dev_fis *fis = &ireq->stp.cmd; + struct ata_queued_cmd *qc = task->uldd_task; + enum sci_status status; + + dev_dbg(&ireq->isci_host->pdev->dev, + "%s: ireq = %p\n", + __func__, + ireq); + + memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); + if (!task->ata_task.device_control_reg_update) + fis->flags |= 0x80; + fis->flags &= 0xF0; + + status = sci_io_request_construct_basic_sata(ireq); + + if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE || + qc->tf.command == ATA_CMD_FPDMA_READ)) { + fis->sector_count = qc->tag << 3; + ireq->tc->type.stp.ncq_tag = qc->tag; + } + + return status; +} + +static enum sci_status +sci_io_request_construct_smp(struct device *dev, + struct isci_request *ireq, + struct sas_task *task) +{ + struct scatterlist *sg = &task->smp_task.smp_req; + struct isci_remote_device *idev; + struct scu_task_context *task_context; + struct isci_port *iport; + struct smp_req *smp_req; + void *kaddr; + u8 req_len; + u32 cmd; + + kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); + smp_req = kaddr + sg->offset; + /* + * Look at the SMP requests' header fields; for certain SAS 1.x SMP + * functions under SAS 2.0, a zero request length really indicates + * a non-zero default length. + */ + if (smp_req->req_len == 0) { + switch (smp_req->func) { + case SMP_DISCOVER: + case SMP_REPORT_PHY_ERR_LOG: + case SMP_REPORT_PHY_SATA: + case SMP_REPORT_ROUTE_INFO: + smp_req->req_len = 2; + break; + case SMP_CONF_ROUTE_INFO: + case SMP_PHY_CONTROL: + case SMP_PHY_TEST_FUNCTION: + smp_req->req_len = 9; + break; + /* Default - zero is a valid default for 2.0. */ + } + } + req_len = smp_req->req_len; + sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); + cmd = *(u32 *) smp_req; + kunmap_atomic(kaddr, KM_IRQ0); + + if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) + return SCI_FAILURE; + + ireq->protocol = SCIC_SMP_PROTOCOL; + + /* byte swap the smp request. */ + + task_context = ireq->tc; + + idev = ireq->target_device; + iport = idev->owning_port; + + /* + * Fill in the TC with the its required data + * 00h + */ + task_context->priority = 0; + task_context->initiator_request = 1; + task_context->connection_rate = idev->connection_rate; + task_context->protocol_engine_index = ISCI_PEG; + task_context->logical_port_index = iport->physical_port_index; + task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; + task_context->abort = 0; + task_context->valid = SCU_TASK_CONTEXT_VALID; + task_context->context_type = SCU_TASK_CONTEXT_TYPE; + + /* 04h */ + task_context->remote_node_index = idev->rnc.remote_node_index; + task_context->command_code = 0; + task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; + + /* 08h */ + task_context->link_layer_control = 0; + task_context->do_not_dma_ssp_good_response = 1; + task_context->strict_ordering = 0; + task_context->control_frame = 1; + task_context->timeout_enable = 0; + task_context->block_guard_enable = 0; + + /* 0ch */ + task_context->address_modifier = 0; + + /* 10h */ + task_context->ssp_command_iu_length = req_len; + + /* 14h */ + task_context->transfer_length_bytes = 0; + + /* + * 18h ~ 30h, protocol specific + * since commandIU has been build by framework at this point, we just + * copy the frist DWord from command IU to this location. */ + memcpy(&task_context->type.smp, &cmd, sizeof(u32)); + + /* + * 40h + * "For SMP you could program it to zero. We would prefer that way + * so that done code will be consistent." - Venki + */ + task_context->task_phase = 0; + + ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | + (iport->physical_port_index << + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | + ISCI_TAG_TCI(ireq->io_tag)); + /* + * Copy the physical address for the command buffer to the SCU Task + * Context command buffer should not contain command header. + */ + task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg)); + task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32)); + + /* SMP response comes as UF, so no need to set response IU address. */ + task_context->response_iu_upper = 0; + task_context->response_iu_lower = 0; + + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); + + return SCI_SUCCESS; +} + +/* + * isci_smp_request_build() - This function builds the smp request. + * @ireq: This parameter points to the isci_request allocated in the + * request construct function. + * + * SCI_SUCCESS on successfull completion, or specific failure code. + */ +static enum sci_status isci_smp_request_build(struct isci_request *ireq) +{ + struct sas_task *task = isci_request_access_task(ireq); + struct device *dev = &ireq->isci_host->pdev->dev; + enum sci_status status = SCI_FAILURE; + + status = sci_io_request_construct_smp(dev, ireq, task); + if (status != SCI_SUCCESS) + dev_dbg(&ireq->isci_host->pdev->dev, + "%s: failed with status = %d\n", + __func__, + status); + + return status; +} + +/** + * isci_io_request_build() - This function builds the io request object. + * @ihost: This parameter specifies the ISCI host object + * @request: This parameter points to the isci_request object allocated in the + * request construct function. + * @sci_device: This parameter is the handle for the sci core's remote device + * object that is the destination for this request. + * + * SCI_SUCCESS on successfull completion, or specific failure code. + */ +static enum sci_status isci_io_request_build(struct isci_host *ihost, + struct isci_request *request, + struct isci_remote_device *idev) +{ + enum sci_status status = SCI_SUCCESS; + struct sas_task *task = isci_request_access_task(request); + + dev_dbg(&ihost->pdev->dev, + "%s: idev = 0x%p; request = %p, " + "num_scatter = %d\n", + __func__, + idev, + request, + task->num_scatter); + + /* map the sgl addresses, if present. + * libata does the mapping for sata devices + * before we get the request. + */ + if (task->num_scatter && + !sas_protocol_ata(task->task_proto) && + !(SAS_PROTOCOL_SMP & task->task_proto)) { + + request->num_sg_entries = dma_map_sg( + &ihost->pdev->dev, + task->scatter, + task->num_scatter, + task->data_dir + ); + + if (request->num_sg_entries == 0) + return SCI_FAILURE_INSUFFICIENT_RESOURCES; + } + + status = sci_io_request_construct(ihost, idev, request); + + if (status != SCI_SUCCESS) { + dev_dbg(&ihost->pdev->dev, + "%s: failed request construct\n", + __func__); + return SCI_FAILURE; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + status = isci_smp_request_build(request); + break; + case SAS_PROTOCOL_SSP: + status = isci_request_ssp_request_construct(request); + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + status = isci_request_stp_request_construct(request); + break; + default: + dev_dbg(&ihost->pdev->dev, + "%s: unknown protocol\n", __func__); + return SCI_FAILURE; + } + + return SCI_SUCCESS; +} + +static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) +{ + struct isci_request *ireq; + + ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; + ireq->io_tag = tag; + ireq->io_request_completion = NULL; + ireq->flags = 0; + ireq->num_sg_entries = 0; + INIT_LIST_HEAD(&ireq->completed_node); + INIT_LIST_HEAD(&ireq->dev_node); + isci_request_change_state(ireq, allocated); + + return ireq; +} + +static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, + struct sas_task *task, + u16 tag) +{ + struct isci_request *ireq; + + ireq = isci_request_from_tag(ihost, tag); + ireq->ttype_ptr.io_task_ptr = task; + ireq->ttype = io_task; + task->lldd_task = ireq; + + return ireq; +} + +struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, + struct isci_tmf *isci_tmf, + u16 tag) +{ + struct isci_request *ireq; + + ireq = isci_request_from_tag(ihost, tag); + ireq->ttype_ptr.tmf_task_ptr = isci_tmf; + ireq->ttype = tmf_task; + + return ireq; +} + +int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, + struct sas_task *task, u16 tag) +{ + enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; + struct isci_request *ireq; + unsigned long flags; + int ret = 0; + + /* do common allocation and init of request object. */ + ireq = isci_io_request_from_tag(ihost, task, tag); + + status = isci_io_request_build(ihost, ireq, idev); + if (status != SCI_SUCCESS) { + dev_dbg(&ihost->pdev->dev, + "%s: request_construct failed - status = 0x%x\n", + __func__, + status); + return status; + } + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) { + + if (isci_task_is_ncq_recovery(task)) { + + /* The device is in an NCQ recovery state. Issue the + * request on the task side. Note that it will + * complete on the I/O request side because the + * request was built that way (ie. + * ireq->is_task_management_request is false). + */ + status = sci_controller_start_task(ihost, + idev, + ireq); + } else { + status = SCI_FAILURE; + } + } else { + /* send the request, let the core assign the IO TAG. */ + status = sci_controller_start_io(ihost, idev, + ireq); + } + + if (status != SCI_SUCCESS && + status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { + dev_dbg(&ihost->pdev->dev, + "%s: failed request start (0x%x)\n", + __func__, status); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + return status; + } + + /* Either I/O started OK, or the core has signaled that + * the device needs a target reset. + * + * In either case, hold onto the I/O for later. + * + * Update it's status and add it to the list in the + * remote device object. + */ + list_add(&ireq->dev_node, &idev->reqs_in_process); + + if (status == SCI_SUCCESS) { + isci_request_change_state(ireq, started); + } else { + /* The request did not really start in the + * hardware, so clear the request handle + * here so no terminations will be done. + */ + set_bit(IREQ_TERMINATED, &ireq->flags); + isci_request_change_state(ireq, completed); + } + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + if (status == + SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { + /* Signal libsas that we need the SCSI error + * handler thread to work on this I/O and that + * we want a device reset. + */ + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + /* Cause this task to be scheduled in the SCSI error + * handler thread. + */ + isci_execpath_callback(ihost, task, + sas_task_abort); + + /* Change the status, since we are holding + * the I/O until it is managed by the SCSI + * error handler. + */ + status = SCI_SUCCESS; + } + + return ret; +} diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h new file mode 100644 index 00000000000..7a1d5a9778e --- /dev/null +++ b/drivers/scsi/isci/request.h @@ -0,0 +1,448 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ISCI_REQUEST_H_ +#define _ISCI_REQUEST_H_ + +#include "isci.h" +#include "host.h" +#include "scu_task_context.h" + +/** + * struct isci_request_status - This enum defines the possible states of an I/O + * request. + * + * + */ +enum isci_request_status { + unallocated = 0x00, + allocated = 0x01, + started = 0x02, + completed = 0x03, + aborting = 0x04, + aborted = 0x05, + terminating = 0x06, + dead = 0x07 +}; + +enum task_type { + io_task = 0, + tmf_task = 1 +}; + +enum sci_request_protocol { + SCIC_NO_PROTOCOL, + SCIC_SMP_PROTOCOL, + SCIC_SSP_PROTOCOL, + SCIC_STP_PROTOCOL +}; /* XXX remove me, use sas_task.{dev|task_proto} instead */; + +/** + * isci_stp_request - extra request infrastructure to handle pio/atapi protocol + * @pio_len - number of bytes requested at PIO setup + * @status - pio setup ending status value to tell us if we need + * to wait for another fis or if the transfer is complete. Upon + * receipt of a d2h fis this will be the status field of that fis. + * @sgl - track pio transfer progress as we iterate through the sgl + * @device_cdb_len - atapi device advertises it's transfer constraints at setup + */ +struct isci_stp_request { + u32 pio_len; + u8 status; + + struct isci_stp_pio_sgl { + int index; + u8 set; + u32 offset; + } sgl; + u32 device_cdb_len; +}; + +struct isci_request { + enum isci_request_status status; + #define IREQ_COMPLETE_IN_TARGET 0 + #define IREQ_TERMINATED 1 + #define IREQ_TMF 2 + #define IREQ_ACTIVE 3 + unsigned long flags; + /* XXX kill ttype and ttype_ptr, allocate full sas_task */ + enum task_type ttype; + union ttype_ptr_union { + struct sas_task *io_task_ptr; /* When ttype==io_task */ + struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ + } ttype_ptr; + struct isci_host *isci_host; + /* For use in the requests_to_{complete|abort} lists: */ + struct list_head completed_node; + /* For use in the reqs_in_process list: */ + struct list_head dev_node; + spinlock_t state_lock; + dma_addr_t request_daddr; + dma_addr_t zero_scatter_daddr; + unsigned int num_sg_entries; + /* Note: "io_request_completion" is completed in two different ways + * depending on whether this is a TMF or regular request. + * - TMF requests are completed in the thread that started them; + * - regular requests are completed in the request completion callback + * function. + * This difference in operation allows the aborter of a TMF request + * to be sure that once the TMF request completes, the I/O that the + * TMF was aborting is guaranteed to have completed. + * + * XXX kill io_request_completion + */ + struct completion *io_request_completion; + struct sci_base_state_machine sm; + struct isci_host *owning_controller; + struct isci_remote_device *target_device; + u16 io_tag; + enum sci_request_protocol protocol; + u32 scu_status; /* hardware result */ + u32 sci_status; /* upper layer disposition */ + u32 post_context; + struct scu_task_context *tc; + /* could be larger with sg chaining */ + #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2) + struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32))); + /* This field is a pointer to the stored rx frame data. It is used in + * STP internal requests and SMP response frames. If this field is + * non-NULL the saved frame must be released on IO request completion. + */ + u32 saved_rx_frame_index; + + union { + struct { + union { + struct ssp_cmd_iu cmd; + struct ssp_task_iu tmf; + }; + union { + struct ssp_response_iu rsp; + u8 rsp_buf[SSP_RESP_IU_MAX_SIZE]; + }; + } ssp; + struct { + struct smp_resp rsp; + } smp; + struct { + struct isci_stp_request req; + struct host_to_dev_fis cmd; + struct dev_to_host_fis rsp; + } stp; + }; +}; + +static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req) +{ + struct isci_request *ireq; + + ireq = container_of(stp_req, typeof(*ireq), stp.req); + return ireq; +} + +/** + * enum sci_base_request_states - This enumeration depicts all the states for + * the common request state machine. + * + * + */ +enum sci_base_request_states { + /* + * Simply the initial state for the base request state machine. + */ + SCI_REQ_INIT, + + /* + * This state indicates that the request has been constructed. + * This state is entered from the INITIAL state. + */ + SCI_REQ_CONSTRUCTED, + + /* + * This state indicates that the request has been started. This state + * is entered from the CONSTRUCTED state. + */ + SCI_REQ_STARTED, + + SCI_REQ_STP_UDMA_WAIT_TC_COMP, + SCI_REQ_STP_UDMA_WAIT_D2H, + + SCI_REQ_STP_NON_DATA_WAIT_H2D, + SCI_REQ_STP_NON_DATA_WAIT_D2H, + + SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED, + SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG, + SCI_REQ_STP_SOFT_RESET_WAIT_D2H, + + /* + * While in this state the IO request object is waiting for the TC + * completion notification for the H2D Register FIS + */ + SCI_REQ_STP_PIO_WAIT_H2D, + + /* + * While in this state the IO request object is waiting for either a + * PIO Setup FIS or a D2H register FIS. The type of frame received is + * based on the result of the prior frame and line conditions. + */ + SCI_REQ_STP_PIO_WAIT_FRAME, + + /* + * While in this state the IO request object is waiting for a DATA + * frame from the device. + */ + SCI_REQ_STP_PIO_DATA_IN, + + /* + * While in this state the IO request object is waiting to transmit + * the next data frame to the device. + */ + SCI_REQ_STP_PIO_DATA_OUT, + + /* + * The AWAIT_TC_COMPLETION sub-state indicates that the started raw + * task management request is waiting for the transmission of the + * initial frame (i.e. command, task, etc.). + */ + SCI_REQ_TASK_WAIT_TC_COMP, + + /* + * This sub-state indicates that the started task management request + * is waiting for the reception of an unsolicited frame + * (i.e. response IU). + */ + SCI_REQ_TASK_WAIT_TC_RESP, + + /* + * This sub-state indicates that the started task management request + * is waiting for the reception of an unsolicited frame + * (i.e. response IU). + */ + SCI_REQ_SMP_WAIT_RESP, + + /* + * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP + * request is waiting for the transmission of the initial frame + * (i.e. command, task, etc.). + */ + SCI_REQ_SMP_WAIT_TC_COMP, + + /* + * This state indicates that the request has completed. + * This state is entered from the STARTED state. This state is entered + * from the ABORTING state. + */ + SCI_REQ_COMPLETED, + + /* + * This state indicates that the request is in the process of being + * terminated/aborted. + * This state is entered from the CONSTRUCTED state. + * This state is entered from the STARTED state. + */ + SCI_REQ_ABORTING, + + /* + * Simply the final state for the base request state machine. + */ + SCI_REQ_FINAL, +}; + +enum sci_status sci_request_start(struct isci_request *ireq); +enum sci_status sci_io_request_terminate(struct isci_request *ireq); +enum sci_status +sci_io_request_event_handler(struct isci_request *ireq, + u32 event_code); +enum sci_status +sci_io_request_frame_handler(struct isci_request *ireq, + u32 frame_index); +enum sci_status +sci_task_request_terminate(struct isci_request *ireq); +extern enum sci_status +sci_request_complete(struct isci_request *ireq); +extern enum sci_status +sci_io_request_tc_completion(struct isci_request *ireq, u32 code); + +/* XXX open code in caller */ +static inline dma_addr_t +sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) +{ + + char *requested_addr = (char *)virt_addr; + char *base_addr = (char *)ireq; + + BUG_ON(requested_addr < base_addr); + BUG_ON((requested_addr - base_addr) >= sizeof(*ireq)); + + return ireq->request_daddr + (requested_addr - base_addr); +} + +/** + * isci_request_change_state() - This function sets the status of the request + * object. + * @request: This parameter points to the isci_request object + * @status: This Parameter is the new status of the object + * + */ +static inline enum isci_request_status +isci_request_change_state(struct isci_request *isci_request, + enum isci_request_status status) +{ + enum isci_request_status old_state; + unsigned long flags; + + dev_dbg(&isci_request->isci_host->pdev->dev, + "%s: isci_request = %p, state = 0x%x\n", + __func__, + isci_request, + status); + + BUG_ON(isci_request == NULL); + + spin_lock_irqsave(&isci_request->state_lock, flags); + old_state = isci_request->status; + isci_request->status = status; + spin_unlock_irqrestore(&isci_request->state_lock, flags); + + return old_state; +} + +/** + * isci_request_change_started_to_newstate() - This function sets the status of + * the request object. + * @request: This parameter points to the isci_request object + * @status: This Parameter is the new status of the object + * + * state previous to any change. + */ +static inline enum isci_request_status +isci_request_change_started_to_newstate(struct isci_request *isci_request, + struct completion *completion_ptr, + enum isci_request_status newstate) +{ + enum isci_request_status old_state; + unsigned long flags; + + spin_lock_irqsave(&isci_request->state_lock, flags); + + old_state = isci_request->status; + + if (old_state == started || old_state == aborting) { + BUG_ON(isci_request->io_request_completion != NULL); + + isci_request->io_request_completion = completion_ptr; + isci_request->status = newstate; + } + + spin_unlock_irqrestore(&isci_request->state_lock, flags); + + dev_dbg(&isci_request->isci_host->pdev->dev, + "%s: isci_request = %p, old_state = 0x%x\n", + __func__, + isci_request, + old_state); + + return old_state; +} + +/** + * isci_request_change_started_to_aborted() - This function sets the status of + * the request object. + * @request: This parameter points to the isci_request object + * @completion_ptr: This parameter is saved as the kernel completion structure + * signalled when the old request completes. + * + * state previous to any change. + */ +static inline enum isci_request_status +isci_request_change_started_to_aborted(struct isci_request *isci_request, + struct completion *completion_ptr) +{ + return isci_request_change_started_to_newstate(isci_request, + completion_ptr, + aborted); +} + +#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr) + +#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) + +struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, + struct isci_tmf *isci_tmf, + u16 tag); +int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, + struct sas_task *task, u16 tag); +void isci_terminate_pending_requests(struct isci_host *ihost, + struct isci_remote_device *idev); +enum sci_status +sci_task_request_construct(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 io_tag, + struct isci_request *ireq); +enum sci_status +sci_task_request_construct_ssp(struct isci_request *ireq); +enum sci_status +sci_task_request_construct_sata(struct isci_request *ireq); +void sci_smp_request_copy_response(struct isci_request *ireq); + +static inline int isci_task_is_ncq_recovery(struct sas_task *task) +{ + return (sas_protocol_ata(task->task_proto) && + task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT && + task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ); + +} + +#endif /* !defined(_ISCI_REQUEST_H_) */ diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h new file mode 100644 index 00000000000..462b15174d3 --- /dev/null +++ b/drivers/scsi/isci/sas.h @@ -0,0 +1,219 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SCI_SAS_H_ +#define _SCI_SAS_H_ + +#include <linux/kernel.h> + +/* + * SATA FIS Types These constants depict the various SATA FIS types devined in + * the serial ATA specification. + * XXX: This needs to go into <scsi/sas.h> + */ +#define FIS_REGH2D 0x27 +#define FIS_REGD2H 0x34 +#define FIS_SETDEVBITS 0xA1 +#define FIS_DMA_ACTIVATE 0x39 +#define FIS_DMA_SETUP 0x41 +#define FIS_BIST_ACTIVATE 0x58 +#define FIS_PIO_SETUP 0x5F +#define FIS_DATA 0x46 + +/**************************************************************************/ +#define SSP_RESP_IU_MAX_SIZE 280 + +/* + * contents of the SSP COMMAND INFORMATION UNIT. + * For specific information on each of these individual fields please + * reference the SAS specification SSP transport layer section. + * XXX: This needs to go into <scsi/sas.h> + */ +struct ssp_cmd_iu { + u8 LUN[8]; + u8 add_cdb_len:6; + u8 _r_a:2; + u8 _r_b; + u8 en_fburst:1; + u8 task_prio:4; + u8 task_attr:3; + u8 _r_c; + + u8 cdb[16]; +} __packed; + +/* + * contents of the SSP TASK INFORMATION UNIT. + * For specific information on each of these individual fields please + * reference the SAS specification SSP transport layer section. + * XXX: This needs to go into <scsi/sas.h> + */ +struct ssp_task_iu { + u8 LUN[8]; + u8 _r_a; + u8 task_func; + u8 _r_b[4]; + u16 task_tag; + u8 _r_c[12]; +} __packed; + + +/* + * struct smp_req_phy_id - This structure defines the contents of + * an SMP Request that is comprised of the struct smp_request_header and a + * phy identifier. + * Examples: SMP_REQUEST_DISCOVER, SMP_REQUEST_REPORT_PHY_SATA. + * + * For specific information on each of these individual fields please reference + * the SAS specification. + */ +struct smp_req_phy_id { + u8 _r_a[4]; /* bytes 4-7 */ + + u8 ign_zone_grp:1; /* byte 8 */ + u8 _r_b:7; + + u8 phy_id; /* byte 9 */ + u8 _r_c; /* byte 10 */ + u8 _r_d; /* byte 11 */ +} __packed; + +/* + * struct smp_req_config_route_info - This structure defines the + * contents of an SMP Configure Route Information request. + * + * For specific information on each of these individual fields please reference + * the SAS specification. + */ +struct smp_req_conf_rtinfo { + u16 exp_change_cnt; /* bytes 4-5 */ + u8 exp_rt_idx_hi; /* byte 6 */ + u8 exp_rt_idx; /* byte 7 */ + + u8 _r_a; /* byte 8 */ + u8 phy_id; /* byte 9 */ + u16 _r_b; /* bytes 10-11 */ + + u8 _r_c:7; /* byte 12 */ + u8 dis_rt_entry:1; + u8 _r_d[3]; /* bytes 13-15 */ + + u8 rt_sas_addr[8]; /* bytes 16-23 */ + u8 _r_e[16]; /* bytes 24-39 */ +} __packed; + +/* + * struct smp_req_phycntl - This structure defines the contents of an + * SMP Phy Controller request. + * + * For specific information on each of these individual fields please reference + * the SAS specification. + */ +struct smp_req_phycntl { + u16 exp_change_cnt; /* byte 4-5 */ + + u8 _r_a[3]; /* bytes 6-8 */ + + u8 phy_id; /* byte 9 */ + u8 phy_op; /* byte 10 */ + + u8 upd_pathway:1; /* byte 11 */ + u8 _r_b:7; + + u8 _r_c[12]; /* byte 12-23 */ + + u8 att_dev_name[8]; /* byte 24-31 */ + + u8 _r_d:4; /* byte 32 */ + u8 min_linkrate:4; + + u8 _r_e:4; /* byte 33 */ + u8 max_linkrate:4; + + u8 _r_f[2]; /* byte 34-35 */ + + u8 pathway:4; /* byte 36 */ + u8 _r_g:4; + + u8 _r_h[3]; /* bytes 37-39 */ +} __packed; + +/* + * struct smp_req - This structure simply unionizes the existing request + * structures into a common request type. + * + * XXX: This data structure may need to go to scsi/sas.h + */ +struct smp_req { + u8 type; /* byte 0 */ + u8 func; /* byte 1 */ + u8 alloc_resp_len; /* byte 2 */ + u8 req_len; /* byte 3 */ + u8 req_data[0]; +} __packed; + +#define SMP_RESP_HDR_SZ 4 + +/* + * struct sci_sas_address - This structure depicts how a SAS address is + * represented by SCI. + * XXX convert this to u8 [SAS_ADDR_SIZE] like the rest of libsas + * + */ +struct sci_sas_address { + u32 high; + u32 low; +}; +#endif diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h new file mode 100644 index 00000000000..c8b329c695f --- /dev/null +++ b/drivers/scsi/isci/scu_completion_codes.h @@ -0,0 +1,283 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SCU_COMPLETION_CODES_HEADER_ +#define _SCU_COMPLETION_CODES_HEADER_ + +/** + * This file contains the constants and macros for the SCU hardware completion + * codes. + * + * + */ + +#define SCU_COMPLETION_TYPE_SHIFT 28 +#define SCU_COMPLETION_TYPE_MASK 0x70000000 + +/** + * SCU_COMPLETION_TYPE() - + * + * This macro constructs an SCU completion type + */ +#define SCU_COMPLETION_TYPE(type) \ + ((u32)(type) << SCU_COMPLETION_TYPE_SHIFT) + +/** + * SCU_COMPLETION_TYPE() - + * + * These macros contain the SCU completion types SCU_COMPLETION_TYPE + */ +#define SCU_COMPLETION_TYPE_TASK SCU_COMPLETION_TYPE(0) +#define SCU_COMPLETION_TYPE_SDMA SCU_COMPLETION_TYPE(1) +#define SCU_COMPLETION_TYPE_UFI SCU_COMPLETION_TYPE(2) +#define SCU_COMPLETION_TYPE_EVENT SCU_COMPLETION_TYPE(3) +#define SCU_COMPLETION_TYPE_NOTIFY SCU_COMPLETION_TYPE(4) + +/** + * + * + * These constants provide the shift and mask values for the various parts of + * an SCU completion code. + */ +#define SCU_COMPLETION_STATUS_MASK 0x0FFC0000 +#define SCU_COMPLETION_TL_STATUS_MASK 0x0FC00000 +#define SCU_COMPLETION_TL_STATUS_SHIFT 22 +#define SCU_COMPLETION_SDMA_STATUS_MASK 0x003C0000 +#define SCU_COMPLETION_PEG_MASK 0x00010000 +#define SCU_COMPLETION_PORT_MASK 0x00007000 +#define SCU_COMPLETION_PE_MASK SCU_COMPLETION_PORT_MASK +#define SCU_COMPLETION_PE_SHIFT 12 +#define SCU_COMPLETION_INDEX_MASK 0x00000FFF + +/** + * SCU_GET_COMPLETION_TYPE() - + * + * This macro returns the SCU completion type. + */ +#define SCU_GET_COMPLETION_TYPE(completion_code) \ + ((completion_code) & SCU_COMPLETION_TYPE_MASK) + +/** + * SCU_GET_COMPLETION_STATUS() - + * + * This macro returns the SCU completion status. + */ +#define SCU_GET_COMPLETION_STATUS(completion_code) \ + ((completion_code) & SCU_COMPLETION_STATUS_MASK) + +/** + * SCU_GET_COMPLETION_TL_STATUS() - + * + * This macro returns the transport layer completion status. + */ +#define SCU_GET_COMPLETION_TL_STATUS(completion_code) \ + ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK) + +/** + * SCU_MAKE_COMPLETION_STATUS() - + * + * This macro takes a completion code and performs the shift and mask + * operations to turn it into a completion code that can be compared to a + * SCU_GET_COMPLETION_TL_STATUS. + */ +#define SCU_MAKE_COMPLETION_STATUS(completion_code) \ + ((u32)(completion_code) << SCU_COMPLETION_TL_STATUS_SHIFT) + +/** + * SCU_NORMALIZE_COMPLETION_STATUS() - + * + * This macro takes a SCU_GET_COMPLETION_TL_STATUS and normalizes it for a + * return code. + */ +#define SCU_NORMALIZE_COMPLETION_STATUS(completion_code) \ + (\ + ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK) \ + >> SCU_COMPLETION_TL_STATUS_SHIFT \ + ) + +/** + * SCU_GET_COMPLETION_SDMA_STATUS() - + * + * This macro returns the SDMA completion status. + */ +#define SCU_GET_COMPLETION_SDMA_STATUS(completion_code) \ + ((completion_code) & SCU_COMPLETION_SDMA_STATUS_MASK) + +/** + * SCU_GET_COMPLETION_PEG() - + * + * This macro returns the Protocol Engine Group from the completion code. + */ +#define SCU_GET_COMPLETION_PEG(completion_code) \ + ((completion_code) & SCU_COMPLETION_PEG_MASK) + +/** + * SCU_GET_COMPLETION_PORT() - + * + * This macro reuturns the logical port index from the completion code. + */ +#define SCU_GET_COMPLETION_PORT(completion_code) \ + ((completion_code) & SCU_COMPLETION_PORT_MASK) + +/** + * SCU_GET_PROTOCOL_ENGINE_INDEX() - + * + * This macro returns the PE index from the completion code. + */ +#define SCU_GET_PROTOCOL_ENGINE_INDEX(completion_code) \ + (((completion_code) & SCU_COMPLETION_PE_MASK) >> SCU_COMPLETION_PE_SHIFT) + +/** + * SCU_GET_COMPLETION_INDEX() - + * + * This macro returns the index of the completion which is either a TCi or an + * RNi depending on the completion type. + */ +#define SCU_GET_COMPLETION_INDEX(completion_code) \ + ((completion_code) & SCU_COMPLETION_INDEX_MASK) + +#define SCU_UNSOLICITED_FRAME_MASK 0x0FFF0000 +#define SCU_UNSOLICITED_FRAME_SHIFT 16 + +/** + * SCU_GET_FRAME_INDEX() - + * + * This macro returns a normalized frame index from an unsolicited frame + * completion. + */ +#define SCU_GET_FRAME_INDEX(completion_code) \ + (\ + ((completion_code) & SCU_UNSOLICITED_FRAME_MASK) \ + >> SCU_UNSOLICITED_FRAME_SHIFT \ + ) + +#define SCU_UNSOLICITED_FRAME_ERROR_MASK 0x00008000 + +/** + * SCU_GET_FRAME_ERROR() - + * + * This macro returns a zero (0) value if there is no frame error otherwise it + * returns non-zero (!0). + */ +#define SCU_GET_FRAME_ERROR(completion_code) \ + ((completion_code) & SCU_UNSOLICITED_FRAME_ERROR_MASK) + +/** + * + * + * These constants represent normalized completion codes which must be shifted + * 18 bits to match it with the hardware completion code. In a 16-bit compiler, + * immediate constants are 16-bit values (the size of an int). If we shift + * those by 18 bits, we completely lose the value. To ensure the value is a + * 32-bit value like we want, each immediate value must be cast to a u32. + */ +#define SCU_TASK_DONE_GOOD ((u32)0x00) +#define SCU_TASK_DONE_CRC_ERR ((u32)0x14) +#define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14) +#define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15) +#define SCU_TASK_DONE_NAK_CMD_ERR ((u32)0x16) +#define SCU_TASK_DONE_CMD_LL_R_ERR ((u32)0x16) +#define SCU_TASK_DONE_LL_R_ERR ((u32)0x17) +#define SCU_TASK_DONE_ACK_NAK_TO ((u32)0x17) +#define SCU_TASK_DONE_LL_PERR ((u32)0x18) +#define SCU_TASK_DONE_LL_SY_TERM ((u32)0x19) +#define SCU_TASK_DONE_NAK_ERR ((u32)0x19) +#define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A) +#define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A) +#define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B) +#define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B) +#define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C) +#define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C) +#define SCU_TASK_DONE_INV_FIS_TYPE ((u32)0x1D) +#define SCU_TASK_DONE_XR_IU_LEN_ERR ((u32)0x1D) +#define SCU_TASK_DONE_INV_FIS_LEN ((u32)0x1E) +#define SCU_TASK_DONE_XR_WD_LEN ((u32)0x1E) +#define SCU_TASK_DONE_SDMA_ERR ((u32)0x1F) +#define SCU_TASK_DONE_OFFSET_ERR ((u32)0x20) +#define SCU_TASK_DONE_MAX_PLD_ERR ((u32)0x21) +#define SCU_TASK_DONE_EXCESS_DATA ((u32)0x22) +#define SCU_TASK_DONE_LF_ERR ((u32)0x23) +#define SCU_TASK_DONE_UNEXP_FIS ((u32)0x24) +#define SCU_TASK_DONE_UNEXP_RESP ((u32)0x24) +#define SCU_TASK_DONE_EARLY_RESP ((u32)0x25) +#define SCU_TASK_DONE_SMP_RESP_TO_ERR ((u32)0x26) +#define SCU_TASK_DONE_DMASETUP_DIRERR ((u32)0x27) +#define SCU_TASK_DONE_SMP_UFI_ERR ((u32)0x27) +#define SCU_TASK_DONE_XFERCNT_ERR ((u32)0x28) +#define SCU_TASK_DONE_SMP_FRM_TYPE_ERR ((u32)0x28) +#define SCU_TASK_DONE_SMP_LL_RX_ERR ((u32)0x29) +#define SCU_TASK_DONE_RESP_LEN_ERR ((u32)0x2A) +#define SCU_TASK_DONE_UNEXP_DATA ((u32)0x2B) +#define SCU_TASK_DONE_OPEN_FAIL ((u32)0x2C) +#define SCU_TASK_DONE_UNEXP_SDBFIS ((u32)0x2D) +#define SCU_TASK_DONE_REG_ERR ((u32)0x2E) +#define SCU_TASK_DONE_SDB_ERR ((u32)0x2F) +#define SCU_TASK_DONE_TASK_ABORT ((u32)0x30) +#define SCU_TASK_DONE_CMD_SDMA_ERR ((U32)0x32) +#define SCU_TASK_DONE_CMD_LL_ABORT_ERR ((U32)0x33) +#define SCU_TASK_OPEN_REJECT_WRONG_DESTINATION ((u32)0x34) +#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1 ((u32)0x35) +#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2 ((u32)0x36) +#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3 ((u32)0x37) +#define SCU_TASK_OPEN_REJECT_BAD_DESTINATION ((u32)0x38) +#define SCU_TASK_OPEN_REJECT_ZONE_VIOLATION ((u32)0x39) +#define SCU_TASK_DONE_VIIT_ENTRY_NV ((u32)0x3A) +#define SCU_TASK_DONE_IIT_ENTRY_NV ((u32)0x3B) +#define SCU_TASK_DONE_RNCNV_OUTBOUND ((u32)0x3C) +#define SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY ((u32)0x3D) +#define SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED ((u32)0x3E) +#define SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED ((u32)0x3F) + +#endif /* _SCU_COMPLETION_CODES_HEADER_ */ diff --git a/drivers/scsi/isci/scu_event_codes.h b/drivers/scsi/isci/scu_event_codes.h new file mode 100644 index 00000000000..36a945ad572 --- /dev/null +++ b/drivers/scsi/isci/scu_event_codes.h @@ -0,0 +1,336 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SCU_EVENT_CODES_HEADER__ +#define __SCU_EVENT_CODES_HEADER__ + +/** + * This file contains the constants and macros for the SCU event codes. + * + * + */ + +#define SCU_EVENT_TYPE_CODE_SHIFT 24 +#define SCU_EVENT_TYPE_CODE_MASK 0x0F000000 + +#define SCU_EVENT_SPECIFIC_CODE_SHIFT 18 +#define SCU_EVENT_SPECIFIC_CODE_MASK 0x00FC0000 + +#define SCU_EVENT_CODE_MASK \ + (SCU_EVENT_TYPE_CODE_MASK | SCU_EVENT_SPECIFIC_CODE_MASK) + +/** + * SCU_EVENT_TYPE() - + * + * This macro constructs an SCU event type from the type value. + */ +#define SCU_EVENT_TYPE(type) \ + ((u32)(type) << SCU_EVENT_TYPE_CODE_SHIFT) + +/** + * SCU_EVENT_SPECIFIC() - + * + * This macro constructs an SCU event specifier from the code value. + */ +#define SCU_EVENT_SPECIFIC(code) \ + ((u32)(code) << SCU_EVENT_SPECIFIC_CODE_SHIFT) + +/** + * SCU_EVENT_MESSAGE() - + * + * This macro constructs a combines an SCU event type and SCU event specifier + * from the type and code values. + */ +#define SCU_EVENT_MESSAGE(type, code) \ + ((type) | SCU_EVENT_SPECIFIC(code)) + +/** + * SCU_EVENT_TYPE() - + * + * SCU_EVENT_TYPES + */ +#define SCU_EVENT_TYPE_SMU_COMMAND_ERROR SCU_EVENT_TYPE(0x08) +#define SCU_EVENT_TYPE_SMU_PCQ_ERROR SCU_EVENT_TYPE(0x09) +#define SCU_EVENT_TYPE_SMU_ERROR SCU_EVENT_TYPE(0x00) +#define SCU_EVENT_TYPE_TRANSPORT_ERROR SCU_EVENT_TYPE(0x01) +#define SCU_EVENT_TYPE_BROADCAST_CHANGE SCU_EVENT_TYPE(0x02) +#define SCU_EVENT_TYPE_OSSP_EVENT SCU_EVENT_TYPE(0x03) +#define SCU_EVENT_TYPE_FATAL_MEMORY_ERROR SCU_EVENT_TYPE(0x0F) +#define SCU_EVENT_TYPE_RNC_SUSPEND_TX SCU_EVENT_TYPE(0x04) +#define SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX SCU_EVENT_TYPE(0x05) +#define SCU_EVENT_TYPE_RNC_OPS_MISC SCU_EVENT_TYPE(0x06) +#define SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT SCU_EVENT_TYPE(0x07) +#define SCU_EVENT_TYPE_ERR_CNT_EVENT SCU_EVENT_TYPE(0x0A) + +/** + * + * + * SCU_EVENT_SPECIFIERS + */ +#define SCU_EVENT_SPECIFIER_DRIVER_SUSPEND 0x20 +#define SCU_EVENT_SPECIFIER_RNC_RELEASE 0x00 + +/** + * + * + * SMU_COMMAND_EVENTS + */ +#define SCU_EVENT_INVALID_CONTEXT_COMMAND \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_COMMAND_ERROR, 0x00) + +/** + * + * + * SMU_PCQ_EVENTS + */ +#define SCU_EVENT_UNCORRECTABLE_PCQ_ERROR \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_PCQ_ERROR, 0x00) + +/** + * + * + * SMU_EVENTS + */ +#define SCU_EVENT_UNCORRECTABLE_REGISTER_WRITE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x02) +#define SCU_EVENT_UNCORRECTABLE_REGISTER_READ \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x03) +#define SCU_EVENT_PCIE_INTERFACE_ERROR \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x04) +#define SCU_EVENT_FUNCTION_LEVEL_RESET \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x05) + +/** + * + * + * TRANSPORT_LEVEL_ERRORS + */ +#define SCU_EVENT_ACK_NAK_TIMEOUT_ERROR \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_TRANSPORT_ERROR, 0x00) + +/** + * + * + * BROADCAST_CHANGE_EVENTS + */ +#define SCU_EVENT_BROADCAST_CHANGE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x01) +#define SCU_EVENT_BROADCAST_RESERVED0 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x02) +#define SCU_EVENT_BROADCAST_RESERVED1 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x03) +#define SCU_EVENT_BROADCAST_SES \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x04) +#define SCU_EVENT_BROADCAST_EXPANDER \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x05) +#define SCU_EVENT_BROADCAST_AEN \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x06) +#define SCU_EVENT_BROADCAST_RESERVED3 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x07) +#define SCU_EVENT_BROADCAST_RESERVED4 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x08) +#define SCU_EVENT_PE_SUSPENDED \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x09) + +/** + * + * + * OSSP_EVENTS + */ +#define SCU_EVENT_PORT_SELECTOR_DETECTED \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x10) +#define SCU_EVENT_SENT_PORT_SELECTION \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x11) +#define SCU_EVENT_HARD_RESET_TRANSMITTED \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x12) +#define SCU_EVENT_HARD_RESET_RECEIVED \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x13) +#define SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x15) +#define SCU_EVENT_LINK_FAILURE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x16) +#define SCU_EVENT_SATA_SPINUP_HOLD \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x17) +#define SCU_EVENT_SAS_15_SSC \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x18) +#define SCU_EVENT_SAS_15 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x19) +#define SCU_EVENT_SAS_30_SSC \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1A) +#define SCU_EVENT_SAS_30 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1B) +#define SCU_EVENT_SAS_60_SSC \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1C) +#define SCU_EVENT_SAS_60 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1D) +#define SCU_EVENT_SATA_15_SSC \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1E) +#define SCU_EVENT_SATA_15 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1F) +#define SCU_EVENT_SATA_30_SSC \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x20) +#define SCU_EVENT_SATA_30 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x21) +#define SCU_EVENT_SATA_60_SSC \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x22) +#define SCU_EVENT_SATA_60 \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x23) +#define SCU_EVENT_SAS_PHY_DETECTED \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x24) +#define SCU_EVENT_SATA_PHY_DETECTED \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x25) + +/** + * + * + * FATAL_INTERNAL_MEMORY_ERROR_EVENTS + */ +#define SCU_EVENT_TSC_RNSC_UNCORRECTABLE_ERROR \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x00) +#define SCU_EVENT_TC_RNC_UNCORRECTABLE_ERROR \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x01) +#define SCU_EVENT_ZPT_UNCORRECTABLE_ERROR \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x02) + +/** + * + * + * REMOTE_NODE_SUSPEND_EVENTS + */ +#define SCU_EVENT_TL_RNC_SUSPEND_TX \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x00) +#define SCU_EVENT_TL_RNC_SUSPEND_TX_RX \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x00) +#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x20) +#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX_RX \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x20) + +/** + * + * + * REMOTE_NODE_MISC_EVENTS + */ +#define SCU_EVENT_POST_RCN_RELEASE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, SCU_EVENT_SPECIFIER_RNC_RELEASE) +#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_ENABLE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x01) +#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_DISABLE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x02) +#define SCU_EVENT_POST_RNC_COMPLETE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x03) +#define SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x04) + +/** + * + * + * ERROR_COUNT_EVENT + */ +#define SCU_EVENT_RX_CREDIT_BLOCKED_RECEIVED \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x00) +#define SCU_EVENT_TX_DONE_CREDIT_TIMEOUT \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x01) +#define SCU_EVENT_RX_DONE_CREDIT_TIMEOUT \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x02) + +/** + * scu_get_event_type() - + * + * This macro returns the SCU event type from the event code. + */ +#define scu_get_event_type(event_code) \ + ((event_code) & SCU_EVENT_TYPE_CODE_MASK) + +/** + * scu_get_event_specifier() - + * + * This macro returns the SCU event specifier from the event code. + */ +#define scu_get_event_specifier(event_code) \ + ((event_code) & SCU_EVENT_SPECIFIC_CODE_MASK) + +/** + * scu_get_event_code() - + * + * This macro returns the combined SCU event type and SCU event specifier from + * the event code. + */ +#define scu_get_event_code(event_code) \ + ((event_code) & SCU_EVENT_CODE_MASK) + + +/** + * + * + * PTS_SCHEDULE_EVENT + */ +#define SCU_EVENT_SMP_RESPONSE_NO_PE \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x00) +#define SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE \ + scu_get_event_specifier(SCU_EVENT_SMP_RESPONSE_NO_PE) + +#define SCU_EVENT_TASK_TIMEOUT \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x01) +#define SCU_EVENT_SPECIFIC_TASK_TIMEOUT \ + scu_get_event_specifier(SCU_EVENT_TASK_TIMEOUT) + +#define SCU_EVENT_IT_NEXUS_TIMEOUT \ + SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x02) +#define SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT \ + scu_get_event_specifier(SCU_EVENT_IT_NEXUS_TIMEOUT) + + +#endif /* __SCU_EVENT_CODES_HEADER__ */ diff --git a/drivers/scsi/isci/scu_remote_node_context.h b/drivers/scsi/isci/scu_remote_node_context.h new file mode 100644 index 00000000000..33745adc826 --- /dev/null +++ b/drivers/scsi/isci/scu_remote_node_context.h @@ -0,0 +1,229 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SCU_REMOTE_NODE_CONTEXT_HEADER__ +#define __SCU_REMOTE_NODE_CONTEXT_HEADER__ + +/** + * This file contains the structures and constatns used by the SCU hardware to + * describe a remote node context. + * + * + */ + +/** + * struct ssp_remote_node_context - This structure contains the SCU hardware + * definition for an SSP remote node. + * + * + */ +struct ssp_remote_node_context { + /* WORD 0 */ + + /** + * This field is the remote node index assigned for this remote node. All + * remote nodes must have a unique remote node index. The value of the remote + * node index can not exceed the maximum number of remote nodes reported in + * the SCU device context capacity register. + */ + u32 remote_node_index:12; + u32 reserved0_1:4; + + /** + * This field tells the SCU hardware how many simultaneous connections that + * this remote node will support. + */ + u32 remote_node_port_width:4; + + /** + * This field tells the SCU hardware which logical port to associate with this + * remote node. + */ + u32 logical_port_index:3; + u32 reserved0_2:5; + + /** + * This field will enable the I_T nexus loss timer for this remote node. + */ + u32 nexus_loss_timer_enable:1; + + /** + * This field is the for driver debug only and is not used. + */ + u32 check_bit:1; + + /** + * This field must be set to true when the hardware DMAs the remote node + * context to the hardware SRAM. When the remote node is being invalidated + * this field must be set to false. + */ + u32 is_valid:1; + + /** + * This field must be set to true. + */ + u32 is_remote_node_context:1; + + /* WORD 1 - 2 */ + + /** + * This is the low word of the remote device SAS Address + */ + u32 remote_sas_address_lo; + + /** + * This field is the high word of the remote device SAS Address + */ + u32 remote_sas_address_hi; + + /* WORD 3 */ + /** + * This field reprensets the function number assigned to this remote device. + * This value must match the virtual function number that is being used to + * communicate to the device. + */ + u32 function_number:8; + u32 reserved3_1:8; + + /** + * This field provides the driver a way to cheat on the arbitration wait time + * for this remote node. + */ + u32 arbitration_wait_time:16; + + /* WORD 4 */ + /** + * This field tells the SCU hardware how long this device may occupy the + * connection before it must be closed. + */ + u32 connection_occupancy_timeout:16; + + /** + * This field tells the SCU hardware how long to maintain a connection when + * there are no frames being transmitted on the link. + */ + u32 connection_inactivity_timeout:16; + + /* WORD 5 */ + /** + * This field allows the driver to cheat on the arbitration wait time for this + * remote node. + */ + u32 initial_arbitration_wait_time:16; + + /** + * This field is tells the hardware what to program for the connection rate in + * the open address frame. See the SAS spec for valid values. + */ + u32 oaf_connection_rate:4; + + /** + * This field tells the SCU hardware what to program for the features in the + * open address frame. See the SAS spec for valid values. + */ + u32 oaf_features:4; + + /** + * This field tells the SCU hardware what to use for the source zone group in + * the open address frame. See the SAS spec for more details on zoning. + */ + u32 oaf_source_zone_group:8; + + /* WORD 6 */ + /** + * This field tells the SCU hardware what to use as the more capibilities in + * the open address frame. See the SAS Spec for details. + */ + u32 oaf_more_compatibility_features; + + /* WORD 7 */ + u32 reserved7; + +}; + +/** + * struct stp_remote_node_context - This structure contains the SCU hardware + * definition for a STP remote node. + * + * STP Targets are not yet supported so this definition is a placeholder until + * we do support them. + */ +struct stp_remote_node_context { + /** + * Placeholder data for the STP remote node. + */ + u32 data[8]; + +}; + +/** + * This union combines the SAS and SATA remote node definitions. + * + * union scu_remote_node_context + */ +union scu_remote_node_context { + /** + * SSP Remote Node + */ + struct ssp_remote_node_context ssp; + + /** + * STP Remote Node + */ + struct stp_remote_node_context stp; + +}; + +#endif /* __SCU_REMOTE_NODE_CONTEXT_HEADER__ */ diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h new file mode 100644 index 00000000000..7df87d92328 --- /dev/null +++ b/drivers/scsi/isci/scu_task_context.h @@ -0,0 +1,942 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SCU_TASK_CONTEXT_H_ +#define _SCU_TASK_CONTEXT_H_ + +/** + * This file contains the structures and constants for the SCU hardware task + * context. + * + * + */ + + +/** + * enum scu_ssp_task_type - This enumberation defines the various SSP task + * types the SCU hardware will accept. The definition for the various task + * types the SCU hardware will accept can be found in the DS specification. + * + * + */ +typedef enum { + SCU_TASK_TYPE_IOREAD, /* /< IO READ direction or no direction */ + SCU_TASK_TYPE_IOWRITE, /* /< IO Write direction */ + SCU_TASK_TYPE_SMP_REQUEST, /* /< SMP Request type */ + SCU_TASK_TYPE_RESPONSE, /* /< Driver generated response frame (targt mode) */ + SCU_TASK_TYPE_RAW_FRAME, /* /< Raw frame request type */ + SCU_TASK_TYPE_PRIMITIVE /* /< Request for a primitive to be transmitted */ +} scu_ssp_task_type; + +/** + * enum scu_sata_task_type - This enumeration defines the various SATA task + * types the SCU hardware will accept. The definition for the various task + * types the SCU hardware will accept can be found in the DS specification. + * + * + */ +typedef enum { + SCU_TASK_TYPE_DMA_IN, /* /< Read request */ + SCU_TASK_TYPE_FPDMAQ_READ, /* /< NCQ read request */ + SCU_TASK_TYPE_PACKET_DMA_IN, /* /< Packet read request */ + SCU_TASK_TYPE_SATA_RAW_FRAME, /* /< Raw frame request */ + RESERVED_4, + RESERVED_5, + RESERVED_6, + RESERVED_7, + SCU_TASK_TYPE_DMA_OUT, /* /< Write request */ + SCU_TASK_TYPE_FPDMAQ_WRITE, /* /< NCQ write Request */ + SCU_TASK_TYPE_PACKET_DMA_OUT /* /< Packet write request */ +} scu_sata_task_type; + + +/** + * + * + * SCU_CONTEXT_TYPE + */ +#define SCU_TASK_CONTEXT_TYPE 0 +#define SCU_RNC_CONTEXT_TYPE 1 + +/** + * + * + * SCU_TASK_CONTEXT_VALIDITY + */ +#define SCU_TASK_CONTEXT_INVALID 0 +#define SCU_TASK_CONTEXT_VALID 1 + +/** + * + * + * SCU_COMMAND_CODE + */ +#define SCU_COMMAND_CODE_INITIATOR_NEW_TASK 0 +#define SCU_COMMAND_CODE_ACTIVE_TASK 1 +#define SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK 2 +#define SCU_COMMAND_CODE_TARGET_RAW_FRAMES 3 + +/** + * + * + * SCU_TASK_PRIORITY + */ +/** + * + * + * This priority is used when there is no priority request for this request. + */ +#define SCU_TASK_PRIORITY_NORMAL 0 + +/** + * + * + * This priority indicates that the task should be scheduled to the head of the + * queue. The task will NOT be executed if the TX is suspended for the remote + * node. + */ +#define SCU_TASK_PRIORITY_HEAD_OF_Q 1 + +/** + * + * + * This priority indicates that the task will be executed before all + * SCU_TASK_PRIORITY_NORMAL and SCU_TASK_PRIORITY_HEAD_OF_Q tasks. The task + * WILL be executed if the TX is suspended for the remote node. + */ +#define SCU_TASK_PRIORITY_HIGH 2 + +/** + * + * + * This task priority is reserved and should not be used. + */ +#define SCU_TASK_PRIORITY_RESERVED 3 + +#define SCU_TASK_INITIATOR_MODE 1 +#define SCU_TASK_TARGET_MODE 0 + +#define SCU_TASK_REGULAR 0 +#define SCU_TASK_ABORTED 1 + +/* direction bit defintion */ +/** + * + * + * SATA_DIRECTION + */ +#define SCU_SATA_WRITE_DATA_DIRECTION 0 +#define SCU_SATA_READ_DATA_DIRECTION 1 + +/** + * + * + * SCU_COMMAND_CONTEXT_MACROS These macros provide the mask and shift + * operations to construct the various SCU commands + */ +#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT 21 +#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK 0x00E00000 +#define scu_get_command_request_type(x) \ + ((x) & SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK) + +#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT 18 +#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK 0x001C0000 +#define scu_get_command_request_subtype(x) \ + ((x) & SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK) + +#define SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK \ + (\ + SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK \ + | SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK \ + ) +#define scu_get_command_request_full_type(x) \ + ((x) & SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK) + +#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT 16 +#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK 0x00010000 +#define scu_get_command_protocl_engine_group(x) \ + ((x) & SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK) + +#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT 12 +#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK 0x00007000 +#define scu_get_command_reqeust_logical_port(x) \ + ((x) & SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK) + + +#define MAKE_SCU_CONTEXT_COMMAND_TYPE(type) \ + ((u32)(type) << SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT) + +/** + * MAKE_SCU_CONTEXT_COMMAND_TYPE() - + * + * SCU_COMMAND_TYPES These constants provide the grouping of the different SCU + * command types. + */ +#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(0) +#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(1) +#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(2) +#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(3) +#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(6) + +#define MAKE_SCU_CONTEXT_COMMAND_REQUEST(type, command) \ + ((type) | ((command) << SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT)) + +/** + * + * + * SCU_REQUEST_TYPES These constants are the various request types that can be + * posted to the SCU hardware. + */ +#define SCU_CONTEXT_COMMAND_REQUST_POST_TC \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 0)) + +#define SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 1)) + +#define SCU_CONTEXT_COMMAND_REQUST_DUMP_TC \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC, 0)) + +#define SCU_CONTEXT_COMMAND_POST_RNC_32 \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 0)) + +#define SCU_CONTEXT_COMMAND_POST_RNC_96 \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 1)) + +#define SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 2)) + +#define SCU_CONTEXT_COMMAND_DUMP_RNC_32 \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 0)) + +#define SCU_CONTEXT_COMMAND_DUMP_RNC_96 \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 1)) + +#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 0)) + +#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 1)) + +#define SCU_CONTEXT_COMMAND_POST_RNC_RESUME \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 2)) + +#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_ENABLE \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 3)) + +#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_DISABLE \ + (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 4)) + +/** + * + * + * SCU_TASK_CONTEXT_PROTOCOL SCU Task context protocol types this is uesd to + * program the SCU Task context protocol field in word 0x00. + */ +#define SCU_TASK_CONTEXT_PROTOCOL_SMP 0x00 +#define SCU_TASK_CONTEXT_PROTOCOL_SSP 0x01 +#define SCU_TASK_CONTEXT_PROTOCOL_STP 0x02 +#define SCU_TASK_CONTEXT_PROTOCOL_NONE 0x07 + +/** + * struct ssp_task_context - This is the SCU hardware definition for an SSP + * request. + * + * + */ +struct ssp_task_context { + /* OFFSET 0x18 */ + u32 reserved00:24; + u32 frame_type:8; + + /* OFFSET 0x1C */ + u32 reserved01; + + /* OFFSET 0x20 */ + u32 fill_bytes:2; + u32 reserved02:6; + u32 changing_data_pointer:1; + u32 retransmit:1; + u32 retry_data_frame:1; + u32 tlr_control:2; + u32 reserved03:19; + + /* OFFSET 0x24 */ + u32 uiRsvd4; + + /* OFFSET 0x28 */ + u32 target_port_transfer_tag:16; + u32 tag:16; + + /* OFFSET 0x2C */ + u32 data_offset; +}; + +/** + * struct stp_task_context - This is the SCU hardware definition for an STP + * request. + * + * + */ +struct stp_task_context { + /* OFFSET 0x18 */ + u32 fis_type:8; + u32 pm_port:4; + u32 reserved0:3; + u32 control:1; + u32 command:8; + u32 features:8; + + /* OFFSET 0x1C */ + u32 reserved1; + + /* OFFSET 0x20 */ + u32 reserved2; + + /* OFFSET 0x24 */ + u32 reserved3; + + /* OFFSET 0x28 */ + u32 ncq_tag:5; + u32 reserved4:27; + + /* OFFSET 0x2C */ + u32 data_offset; /* TODO: What is this used for? */ +}; + +/** + * struct smp_task_context - This is the SCU hardware definition for an SMP + * request. + * + * + */ +struct smp_task_context { + /* OFFSET 0x18 */ + u32 response_length:8; + u32 function_result:8; + u32 function:8; + u32 frame_type:8; + + /* OFFSET 0x1C */ + u32 smp_response_ufi:12; + u32 reserved1:20; + + /* OFFSET 0x20 */ + u32 reserved2; + + /* OFFSET 0x24 */ + u32 reserved3; + + /* OFFSET 0x28 */ + u32 reserved4; + + /* OFFSET 0x2C */ + u32 reserved5; +}; + +/** + * struct primitive_task_context - This is the SCU hardware definition used + * when the driver wants to send a primitive on the link. + * + * + */ +struct primitive_task_context { + /* OFFSET 0x18 */ + /** + * This field is the control word and it must be 0. + */ + u32 control; /* /< must be set to 0 */ + + /* OFFSET 0x1C */ + /** + * This field specifies the primitive that is to be transmitted. + */ + u32 sequence; + + /* OFFSET 0x20 */ + u32 reserved0; + + /* OFFSET 0x24 */ + u32 reserved1; + + /* OFFSET 0x28 */ + u32 reserved2; + + /* OFFSET 0x2C */ + u32 reserved3; +}; + +/** + * The union of the protocols that can be selected in the SCU task context + * field. + * + * protocol_context + */ +union protocol_context { + struct ssp_task_context ssp; + struct stp_task_context stp; + struct smp_task_context smp; + struct primitive_task_context primitive; + u32 words[6]; +}; + +/** + * struct scu_sgl_element - This structure represents a single SCU defined SGL + * element. SCU SGLs contain a 64 bit address with the maximum data transfer + * being 24 bits in size. The SGL can not cross a 4GB boundary. + * + * struct scu_sgl_element + */ +struct scu_sgl_element { + /** + * This field is the upper 32 bits of the 64 bit physical address. + */ + u32 address_upper; + + /** + * This field is the lower 32 bits of the 64 bit physical address. + */ + u32 address_lower; + + /** + * This field is the number of bytes to transfer. + */ + u32 length:24; + + /** + * This field is the address modifier to be used when a virtual function is + * requesting a data transfer. + */ + u32 address_modifier:8; + +}; + +#define SCU_SGL_ELEMENT_PAIR_A 0 +#define SCU_SGL_ELEMENT_PAIR_B 1 + +/** + * struct scu_sgl_element_pair - This structure is the SCU hardware definition + * of a pair of SGL elements. The SCU hardware always works on SGL pairs. + * They are refered to in the DS specification as SGL A and SGL B. Each SGL + * pair is followed by the address of the next pair. + * + * + */ +struct scu_sgl_element_pair { + /* OFFSET 0x60-0x68 */ + /** + * This field is the SGL element A of the SGL pair. + */ + struct scu_sgl_element A; + + /* OFFSET 0x6C-0x74 */ + /** + * This field is the SGL element B of the SGL pair. + */ + struct scu_sgl_element B; + + /* OFFSET 0x78-0x7C */ + /** + * This field is the upper 32 bits of the 64 bit address to the next SGL + * element pair. + */ + u32 next_pair_upper; + + /** + * This field is the lower 32 bits of the 64 bit address to the next SGL + * element pair. + */ + u32 next_pair_lower; + +}; + +/** + * struct transport_snapshot - This structure is the SCU hardware scratch area + * for the task context. This is set to 0 by the driver but can be read by + * issuing a dump TC request to the SCU. + * + * + */ +struct transport_snapshot { + /* OFFSET 0x48 */ + u32 xfer_rdy_write_data_length; + + /* OFFSET 0x4C */ + u32 data_offset; + + /* OFFSET 0x50 */ + u32 data_transfer_size:24; + u32 reserved_50_0:8; + + /* OFFSET 0x54 */ + u32 next_initiator_write_data_offset; + + /* OFFSET 0x58 */ + u32 next_initiator_write_data_xfer_size:24; + u32 reserved_58_0:8; +}; + +/** + * struct scu_task_context - This structure defines the contents of the SCU + * silicon task context. It lays out all of the fields according to the + * expected order and location for the Storage Controller unit. + * + * + */ +struct scu_task_context { + /* OFFSET 0x00 ------ */ + /** + * This field must be encoded to one of the valid SCU task priority values + * - SCU_TASK_PRIORITY_NORMAL + * - SCU_TASK_PRIORITY_HEAD_OF_Q + * - SCU_TASK_PRIORITY_HIGH + */ + u32 priority:2; + + /** + * This field must be set to true if this is an initiator generated request. + * Until target mode is supported all task requests are initiator requests. + */ + u32 initiator_request:1; + + /** + * This field must be set to one of the valid connection rates valid values + * are 0x8, 0x9, and 0xA. + */ + u32 connection_rate:4; + + /** + * This field muse be programed when generating an SMP response since the SMP + * connection remains open until the SMP response is generated. + */ + u32 protocol_engine_index:3; + + /** + * This field must contain the logical port for the task request. + */ + u32 logical_port_index:3; + + /** + * This field must be set to one of the SCU_TASK_CONTEXT_PROTOCOL values + * - SCU_TASK_CONTEXT_PROTOCOL_SMP + * - SCU_TASK_CONTEXT_PROTOCOL_SSP + * - SCU_TASK_CONTEXT_PROTOCOL_STP + * - SCU_TASK_CONTEXT_PROTOCOL_NONE + */ + u32 protocol_type:3; + + /** + * This filed must be set to the TCi allocated for this task + */ + u32 task_index:12; + + /** + * This field is reserved and must be set to 0x00 + */ + u32 reserved_00_0:1; + + /** + * For a normal task request this must be set to 0. If this is an abort of + * this task request it must be set to 1. + */ + u32 abort:1; + + /** + * This field must be set to true for the SCU hardware to process the task. + */ + u32 valid:1; + + /** + * This field must be set to SCU_TASK_CONTEXT_TYPE + */ + u32 context_type:1; + + /* OFFSET 0x04 */ + /** + * This field contains the RNi that is the target of this request. + */ + u32 remote_node_index:12; + + /** + * This field is programmed if this is a mirrored request, which we are not + * using, in which case it is the RNi for the mirrored target. + */ + u32 mirrored_node_index:12; + + /** + * This field is programmed with the direction of the SATA reqeust + * - SCU_SATA_WRITE_DATA_DIRECTION + * - SCU_SATA_READ_DATA_DIRECTION + */ + u32 sata_direction:1; + + /** + * This field is programmsed with one of the following SCU_COMMAND_CODE + * - SCU_COMMAND_CODE_INITIATOR_NEW_TASK + * - SCU_COMMAND_CODE_ACTIVE_TASK + * - SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK + * - SCU_COMMAND_CODE_TARGET_RAW_FRAMES + */ + u32 command_code:2; + + /** + * This field is set to true if the remote node should be suspended. + * This bit is only valid for SSP & SMP target devices. + */ + u32 suspend_node:1; + + /** + * This field is programmed with one of the following command type codes + * + * For SAS requests use the scu_ssp_task_type + * - SCU_TASK_TYPE_IOREAD + * - SCU_TASK_TYPE_IOWRITE + * - SCU_TASK_TYPE_SMP_REQUEST + * - SCU_TASK_TYPE_RESPONSE + * - SCU_TASK_TYPE_RAW_FRAME + * - SCU_TASK_TYPE_PRIMITIVE + * + * For SATA requests use the scu_sata_task_type + * - SCU_TASK_TYPE_DMA_IN + * - SCU_TASK_TYPE_FPDMAQ_READ + * - SCU_TASK_TYPE_PACKET_DMA_IN + * - SCU_TASK_TYPE_SATA_RAW_FRAME + * - SCU_TASK_TYPE_DMA_OUT + * - SCU_TASK_TYPE_FPDMAQ_WRITE + * - SCU_TASK_TYPE_PACKET_DMA_OUT + */ + u32 task_type:4; + + /* OFFSET 0x08 */ + /** + * This field is reserved and the must be set to 0x00 + */ + u32 link_layer_control:8; /* presently all reserved */ + + /** + * This field is set to true when TLR is to be enabled + */ + u32 ssp_tlr_enable:1; + + /** + * This is field specifies if the SCU DMAs a response frame to host + * memory for good response frames when operating in target mode. + */ + u32 dma_ssp_target_good_response:1; + + /** + * This field indicates if the SCU should DMA the response frame to + * host memory. + */ + u32 do_not_dma_ssp_good_response:1; + + /** + * This field is set to true when strict ordering is to be enabled + */ + u32 strict_ordering:1; + + /** + * This field indicates the type of endianess to be utilized for the + * frame. command, task, and response frames utilized control_frame + * set to 1. + */ + u32 control_frame:1; + + /** + * This field is reserved and the driver should set to 0x00 + */ + u32 tl_control_reserved:3; + + /** + * This field is set to true when the SCU hardware task timeout control is to + * be enabled + */ + u32 timeout_enable:1; + + /** + * This field is reserved and the driver should set it to 0x00 + */ + u32 pts_control_reserved:7; + + /** + * This field should be set to true when block guard is to be enabled + */ + u32 block_guard_enable:1; + + /** + * This field is reserved and the driver should set to 0x00 + */ + u32 sdma_control_reserved:7; + + /* OFFSET 0x0C */ + /** + * This field is the address modifier for this io request it should be + * programmed with the virtual function that is making the request. + */ + u32 address_modifier:16; + + /** + * @todo What we support mirrored SMP response frame? + */ + u32 mirrored_protocol_engine:3; /* mirrored protocol Engine Index */ + + /** + * If this is a mirrored request the logical port index for the mirrored RNi + * must be programmed. + */ + u32 mirrored_logical_port:4; /* mirrored local port index */ + + /** + * This field is reserved and the driver must set it to 0x00 + */ + u32 reserved_0C_0:8; + + /** + * This field must be set to true if the mirrored request processing is to be + * enabled. + */ + u32 mirror_request_enable:1; /* Mirrored request Enable */ + + /* OFFSET 0x10 */ + /** + * This field is the command iu length in dwords + */ + u32 ssp_command_iu_length:8; + + /** + * This is the target TLR enable bit it must be set to 0 when creatning the + * task context. + */ + u32 xfer_ready_tlr_enable:1; + + /** + * This field is reserved and the driver must set it to 0x00 + */ + u32 reserved_10_0:7; + + /** + * This is the maximum burst size that the SCU hardware will send in one + * connection its value is (N x 512) and N must be a multiple of 2. If the + * value is 0x00 then maximum burst size is disabled. + */ + u32 ssp_max_burst_size:16; + + /* OFFSET 0x14 */ + /** + * This filed is set to the number of bytes to be transfered in the request. + */ + u32 transfer_length_bytes:24; /* In terms of bytes */ + + /** + * This field is reserved and the driver should set it to 0x00 + */ + u32 reserved_14_0:8; + + /* OFFSET 0x18-0x2C */ + /** + * This union provides for the protocol specif part of the SCU Task Context. + */ + union protocol_context type; + + /* OFFSET 0x30-0x34 */ + /** + * This field is the upper 32 bits of the 64 bit physical address of the + * command iu buffer + */ + u32 command_iu_upper; + + /** + * This field is the lower 32 bits of the 64 bit physical address of the + * command iu buffer + */ + u32 command_iu_lower; + + /* OFFSET 0x38-0x3C */ + /** + * This field is the upper 32 bits of the 64 bit physical address of the + * response iu buffer + */ + u32 response_iu_upper; + + /** + * This field is the lower 32 bits of the 64 bit physical address of the + * response iu buffer + */ + u32 response_iu_lower; + + /* OFFSET 0x40 */ + /** + * This field is set to the task phase of the SCU hardware. The driver must + * set this to 0x01 + */ + u32 task_phase:8; + + /** + * This field is set to the transport layer task status. The driver must set + * this to 0x00 + */ + u32 task_status:8; + + /** + * This field is used during initiator write TLR + */ + u32 previous_extended_tag:4; + + /** + * This field is set the maximum number of retries for a STP non-data FIS + */ + u32 stp_retry_count:2; + + /** + * This field is reserved and the driver must set it to 0x00 + */ + u32 reserved_40_1:2; + + /** + * This field is used by the SCU TL to determine when to take a snapshot when + * tranmitting read data frames. + * - 0x00 The entire IO + * - 0x01 32k + * - 0x02 64k + * - 0x04 128k + * - 0x08 256k + */ + u32 ssp_tlr_threshold:4; + + /** + * This field is reserved and the driver must set it to 0x00 + */ + u32 reserved_40_2:4; + + /* OFFSET 0x44 */ + u32 write_data_length; /* read only set to 0 */ + + /* OFFSET 0x48-0x58 */ + struct transport_snapshot snapshot; /* read only set to 0 */ + + /* OFFSET 0x5C */ + u32 block_protection_enable:1; + u32 block_size:2; + u32 block_protection_function:2; + u32 reserved_5C_0:9; + u32 active_sgl_element:2; /* read only set to 0 */ + u32 sgl_exhausted:1; /* read only set to 0 */ + u32 payload_data_transfer_error:4; /* read only set to 0 */ + u32 frame_buffer_offset:11; /* read only set to 0 */ + + /* OFFSET 0x60-0x7C */ + /** + * This field is the first SGL element pair found in the TC data structure. + */ + struct scu_sgl_element_pair sgl_pair_ab; + /* OFFSET 0x80-0x9C */ + /** + * This field is the second SGL element pair found in the TC data structure. + */ + struct scu_sgl_element_pair sgl_pair_cd; + + /* OFFSET 0xA0-BC */ + struct scu_sgl_element_pair sgl_snapshot_ac; + + /* OFFSET 0xC0 */ + u32 active_sgl_element_pair; /* read only set to 0 */ + + /* OFFSET 0xC4-0xCC */ + u32 reserved_C4_CC[3]; + + /* OFFSET 0xD0 */ + u32 intermediate_crc_value:16; + u32 initial_crc_seed:16; + + /* OFFSET 0xD4 */ + u32 application_tag_for_verify:16; + u32 application_tag_for_generate:16; + + /* OFFSET 0xD8 */ + u32 reference_tag_seed_for_verify_function; + + /* OFFSET 0xDC */ + u32 reserved_DC; + + /* OFFSET 0xE0 */ + u32 reserved_E0_0:16; + u32 application_tag_mask_for_generate:16; + + /* OFFSET 0xE4 */ + u32 block_protection_control:16; + u32 application_tag_mask_for_verify:16; + + /* OFFSET 0xE8 */ + u32 block_protection_error:8; + u32 reserved_E8_0:24; + + /* OFFSET 0xEC */ + u32 reference_tag_seed_for_verify; + + /* OFFSET 0xF0 */ + u32 intermediate_crc_valid_snapshot:16; + u32 reserved_F0_0:16; + + /* OFFSET 0xF4 */ + u32 reference_tag_seed_for_verify_function_snapshot; + + /* OFFSET 0xF8 */ + u32 snapshot_of_reserved_dword_DC_of_tc; + + /* OFFSET 0xFC */ + u32 reference_tag_seed_for_generate_function_snapshot; + +}; + +#endif /* _SCU_TASK_CONTEXT_H_ */ diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c new file mode 100644 index 00000000000..d6bcdd013dc --- /dev/null +++ b/drivers/scsi/isci/task.c @@ -0,0 +1,1676 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/completion.h> +#include <linux/irqflags.h> +#include "sas.h" +#include <scsi/libsas.h> +#include "remote_device.h" +#include "remote_node_context.h" +#include "isci.h" +#include "request.h" +#include "task.h" +#include "host.h" + +/** +* isci_task_refuse() - complete the request to the upper layer driver in +* the case where an I/O needs to be completed back in the submit path. +* @ihost: host on which the the request was queued +* @task: request to complete +* @response: response code for the completed task. +* @status: status code for the completed task. +* +*/ +static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task, + enum service_response response, + enum exec_status status) + +{ + enum isci_completion_selection disposition; + + disposition = isci_perform_normal_io_completion; + disposition = isci_task_set_completion_status(task, response, status, + disposition); + + /* Tasks aborted specifically by a call to the lldd_abort_task + * function should not be completed to the host in the regular path. + */ + switch (disposition) { + case isci_perform_normal_io_completion: + /* Normal notification (task_done) */ + dev_dbg(&ihost->pdev->dev, + "%s: Normal - task = %p, response=%d, " + "status=%d\n", + __func__, task, response, status); + + task->lldd_task = NULL; + + isci_execpath_callback(ihost, task, task->task_done); + break; + + case isci_perform_aborted_io_completion: + /* + * No notification because this request is already in the + * abort path. + */ + dev_dbg(&ihost->pdev->dev, + "%s: Aborted - task = %p, response=%d, " + "status=%d\n", + __func__, task, response, status); + break; + + case isci_perform_error_io_completion: + /* Use sas_task_abort */ + dev_dbg(&ihost->pdev->dev, + "%s: Error - task = %p, response=%d, " + "status=%d\n", + __func__, task, response, status); + + isci_execpath_callback(ihost, task, sas_task_abort); + break; + + default: + dev_dbg(&ihost->pdev->dev, + "%s: isci task notification default case!", + __func__); + sas_task_abort(task); + break; + } +} + +#define for_each_sas_task(num, task) \ + for (; num > 0; num--,\ + task = list_entry(task->list.next, struct sas_task, list)) + + +static inline int isci_device_io_ready(struct isci_remote_device *idev, + struct sas_task *task) +{ + return idev ? test_bit(IDEV_IO_READY, &idev->flags) || + (test_bit(IDEV_IO_NCQERROR, &idev->flags) && + isci_task_is_ncq_recovery(task)) + : 0; +} +/** + * isci_task_execute_task() - This function is one of the SAS Domain Template + * functions. This function is called by libsas to send a task down to + * hardware. + * @task: This parameter specifies the SAS task to send. + * @num: This parameter specifies the number of tasks to queue. + * @gfp_flags: This parameter specifies the context of this call. + * + * status, zero indicates success. + */ +int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) +{ + struct isci_host *ihost = dev_to_ihost(task->dev); + struct isci_remote_device *idev; + unsigned long flags; + bool io_ready; + u16 tag; + + dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num); + + for_each_sas_task(num, task) { + enum sci_status status = SCI_FAILURE; + + spin_lock_irqsave(&ihost->scic_lock, flags); + idev = isci_lookup_device(task->dev); + io_ready = isci_device_io_ready(idev, task); + tag = isci_alloc_tag(ihost); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + dev_dbg(&ihost->pdev->dev, + "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n", + task, num, task->dev, idev, idev ? idev->flags : 0, + task->uldd_task); + + if (!idev) { + isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, + SAS_DEVICE_UNKNOWN); + } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) { + /* Indicate QUEUE_FULL so that the scsi midlayer + * retries. + */ + isci_task_refuse(ihost, task, SAS_TASK_COMPLETE, + SAS_QUEUE_FULL); + } else { + /* There is a device and it's ready for I/O. */ + spin_lock_irqsave(&task->task_state_lock, flags); + + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + /* The I/O was aborted. */ + spin_unlock_irqrestore(&task->task_state_lock, + flags); + + isci_task_refuse(ihost, task, + SAS_TASK_UNDELIVERED, + SAM_STAT_TASK_ABORTED); + } else { + task->task_state_flags |= SAS_TASK_AT_INITIATOR; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + /* build and send the request. */ + status = isci_request_execute(ihost, idev, task, tag); + + if (status != SCI_SUCCESS) { + + spin_lock_irqsave(&task->task_state_lock, flags); + /* Did not really start this command. */ + task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + /* Indicate QUEUE_FULL so that the scsi + * midlayer retries. if the request + * failed for remote device reasons, + * it gets returned as + * SAS_TASK_UNDELIVERED next time + * through. + */ + isci_task_refuse(ihost, task, + SAS_TASK_COMPLETE, + SAS_QUEUE_FULL); + } + } + } + if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) { + spin_lock_irqsave(&ihost->scic_lock, flags); + /* command never hit the device, so just free + * the tci and skip the sequence increment + */ + isci_tci_free(ihost, ISCI_TAG_TCI(tag)); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + } + isci_put_device(idev); + } + return 0; +} + +static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq) +{ + struct isci_tmf *isci_tmf; + enum sci_status status; + + if (tmf_task != ireq->ttype) + return SCI_FAILURE; + + isci_tmf = isci_request_access_tmf(ireq); + + switch (isci_tmf->tmf_code) { + + case isci_tmf_sata_srst_high: + case isci_tmf_sata_srst_low: { + struct host_to_dev_fis *fis = &ireq->stp.cmd; + + memset(fis, 0, sizeof(*fis)); + + fis->fis_type = 0x27; + fis->flags &= ~0x80; + fis->flags &= 0xF0; + if (isci_tmf->tmf_code == isci_tmf_sata_srst_high) + fis->control |= ATA_SRST; + else + fis->control &= ~ATA_SRST; + break; + } + /* other management commnd go here... */ + default: + return SCI_FAILURE; + } + + /* core builds the protocol specific request + * based on the h2d fis. + */ + status = sci_task_request_construct_sata(ireq); + + return status; +} + +static struct isci_request *isci_task_request_build(struct isci_host *ihost, + struct isci_remote_device *idev, + u16 tag, struct isci_tmf *isci_tmf) +{ + enum sci_status status = SCI_FAILURE; + struct isci_request *ireq = NULL; + struct domain_device *dev; + + dev_dbg(&ihost->pdev->dev, + "%s: isci_tmf = %p\n", __func__, isci_tmf); + + dev = idev->domain_dev; + + /* do common allocation and init of request object. */ + ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag); + if (!ireq) + return NULL; + + /* let the core do it's construct. */ + status = sci_task_request_construct(ihost, idev, tag, + ireq); + + if (status != SCI_SUCCESS) { + dev_warn(&ihost->pdev->dev, + "%s: sci_task_request_construct failed - " + "status = 0x%x\n", + __func__, + status); + return NULL; + } + + /* XXX convert to get this from task->tproto like other drivers */ + if (dev->dev_type == SAS_END_DEV) { + isci_tmf->proto = SAS_PROTOCOL_SSP; + status = sci_task_request_construct_ssp(ireq); + if (status != SCI_SUCCESS) + return NULL; + } + + if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { + isci_tmf->proto = SAS_PROTOCOL_SATA; + status = isci_sata_management_task_request_build(ireq); + + if (status != SCI_SUCCESS) + return NULL; + } + return ireq; +} + +static int isci_task_execute_tmf(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_tmf *tmf, unsigned long timeout_ms) +{ + DECLARE_COMPLETION_ONSTACK(completion); + enum sci_task_status status = SCI_TASK_FAILURE; + struct isci_request *ireq; + int ret = TMF_RESP_FUNC_FAILED; + unsigned long flags; + unsigned long timeleft; + u16 tag; + + spin_lock_irqsave(&ihost->scic_lock, flags); + tag = isci_alloc_tag(ihost); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + if (tag == SCI_CONTROLLER_INVALID_IO_TAG) + return ret; + + /* sanity check, return TMF_RESP_FUNC_FAILED + * if the device is not there and ready. + */ + if (!idev || + (!test_bit(IDEV_IO_READY, &idev->flags) && + !test_bit(IDEV_IO_NCQERROR, &idev->flags))) { + dev_dbg(&ihost->pdev->dev, + "%s: idev = %p not ready (%#lx)\n", + __func__, + idev, idev ? idev->flags : 0); + goto err_tci; + } else + dev_dbg(&ihost->pdev->dev, + "%s: idev = %p\n", + __func__, idev); + + /* Assign the pointer to the TMF's completion kernel wait structure. */ + tmf->complete = &completion; + + ireq = isci_task_request_build(ihost, idev, tag, tmf); + if (!ireq) + goto err_tci; + + spin_lock_irqsave(&ihost->scic_lock, flags); + + /* start the TMF io. */ + status = sci_controller_start_task(ihost, idev, ireq); + + if (status != SCI_TASK_SUCCESS) { + dev_dbg(&ihost->pdev->dev, + "%s: start_io failed - status = 0x%x, request = %p\n", + __func__, + status, + ireq); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + goto err_tci; + } + + if (tmf->cb_state_func != NULL) + tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data); + + isci_request_change_state(ireq, started); + + /* add the request to the remote device request list. */ + list_add(&ireq->dev_node, &idev->reqs_in_process); + + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + /* Wait for the TMF to complete, or a timeout. */ + timeleft = wait_for_completion_timeout(&completion, + msecs_to_jiffies(timeout_ms)); + + if (timeleft == 0) { + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (tmf->cb_state_func != NULL) + tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); + + sci_controller_terminate_request(ihost, + idev, + ireq); + + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + wait_for_completion(tmf->complete); + } + + isci_print_tmf(tmf); + + if (tmf->status == SCI_SUCCESS) + ret = TMF_RESP_FUNC_COMPLETE; + else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) { + dev_dbg(&ihost->pdev->dev, + "%s: tmf.status == " + "SCI_FAILURE_IO_RESPONSE_VALID\n", + __func__); + ret = TMF_RESP_FUNC_COMPLETE; + } + /* Else - leave the default "failed" status alone. */ + + dev_dbg(&ihost->pdev->dev, + "%s: completed request = %p\n", + __func__, + ireq); + + return ret; + + err_tci: + spin_lock_irqsave(&ihost->scic_lock, flags); + isci_tci_free(ihost, ISCI_TAG_TCI(tag)); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + return ret; +} + +static void isci_task_build_tmf(struct isci_tmf *tmf, + enum isci_tmf_function_codes code, + void (*tmf_sent_cb)(enum isci_tmf_cb_state, + struct isci_tmf *, + void *), + void *cb_data) +{ + memset(tmf, 0, sizeof(*tmf)); + + tmf->tmf_code = code; + tmf->cb_state_func = tmf_sent_cb; + tmf->cb_data = cb_data; +} + +static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf, + enum isci_tmf_function_codes code, + void (*tmf_sent_cb)(enum isci_tmf_cb_state, + struct isci_tmf *, + void *), + struct isci_request *old_request) +{ + isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request); + tmf->io_tag = old_request->io_tag; +} + +/** + * isci_task_validate_request_to_abort() - This function checks the given I/O + * against the "started" state. If the request is still "started", it's + * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD + * BEFORE CALLING THIS FUNCTION. + * @isci_request: This parameter specifies the request object to control. + * @isci_host: This parameter specifies the ISCI host object + * @isci_device: This is the device to which the request is pending. + * @aborted_io_completion: This is a completion structure that will be added to + * the request in case it is changed to aborting; this completion is + * triggered when the request is fully completed. + * + * Either "started" on successful change of the task status to "aborted", or + * "unallocated" if the task cannot be controlled. + */ +static enum isci_request_status isci_task_validate_request_to_abort( + struct isci_request *isci_request, + struct isci_host *isci_host, + struct isci_remote_device *isci_device, + struct completion *aborted_io_completion) +{ + enum isci_request_status old_state = unallocated; + + /* Only abort the task if it's in the + * device's request_in_process list + */ + if (isci_request && !list_empty(&isci_request->dev_node)) { + old_state = isci_request_change_started_to_aborted( + isci_request, aborted_io_completion); + + } + + return old_state; +} + +/** +* isci_request_cleanup_completed_loiterer() - This function will take care of +* the final cleanup on any request which has been explicitly terminated. +* @isci_host: This parameter specifies the ISCI host object +* @isci_device: This is the device to which the request is pending. +* @isci_request: This parameter specifies the terminated request object. +* @task: This parameter is the libsas I/O request. +*/ +static void isci_request_cleanup_completed_loiterer( + struct isci_host *isci_host, + struct isci_remote_device *isci_device, + struct isci_request *isci_request, + struct sas_task *task) +{ + unsigned long flags; + + dev_dbg(&isci_host->pdev->dev, + "%s: isci_device=%p, request=%p, task=%p\n", + __func__, isci_device, isci_request, task); + + if (task != NULL) { + + spin_lock_irqsave(&task->task_state_lock, flags); + task->lldd_task = NULL; + + task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET; + + isci_set_task_doneflags(task); + + /* If this task is not in the abort path, call task_done. */ + if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + + spin_unlock_irqrestore(&task->task_state_lock, flags); + task->task_done(task); + } else + spin_unlock_irqrestore(&task->task_state_lock, flags); + } + + if (isci_request != NULL) { + spin_lock_irqsave(&isci_host->scic_lock, flags); + list_del_init(&isci_request->dev_node); + spin_unlock_irqrestore(&isci_host->scic_lock, flags); + } +} + +/** + * isci_terminate_request_core() - This function will terminate the given + * request, and wait for it to complete. This function must only be called + * from a thread that can wait. Note that the request is terminated and + * completed (back to the host, if started there). + * @ihost: This SCU. + * @idev: The target. + * @isci_request: The I/O request to be terminated. + * + */ +static void isci_terminate_request_core(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *isci_request) +{ + enum sci_status status = SCI_SUCCESS; + bool was_terminated = false; + bool needs_cleanup_handling = false; + enum isci_request_status request_status; + unsigned long flags; + unsigned long termination_completed = 1; + struct completion *io_request_completion; + struct sas_task *task; + + dev_dbg(&ihost->pdev->dev, + "%s: device = %p; request = %p\n", + __func__, idev, isci_request); + + spin_lock_irqsave(&ihost->scic_lock, flags); + + io_request_completion = isci_request->io_request_completion; + + task = (isci_request->ttype == io_task) + ? isci_request_access_task(isci_request) + : NULL; + + /* Note that we are not going to control + * the target to abort the request. + */ + set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags); + + /* Make sure the request wasn't just sitting around signalling + * device condition (if the request handle is NULL, then the + * request completed but needed additional handling here). + */ + if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { + was_terminated = true; + needs_cleanup_handling = true; + status = sci_controller_terminate_request(ihost, + idev, + isci_request); + } + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + /* + * The only time the request to terminate will + * fail is when the io request is completed and + * being aborted. + */ + if (status != SCI_SUCCESS) { + dev_dbg(&ihost->pdev->dev, + "%s: sci_controller_terminate_request" + " returned = 0x%x\n", + __func__, status); + + isci_request->io_request_completion = NULL; + + } else { + if (was_terminated) { + dev_dbg(&ihost->pdev->dev, + "%s: before completion wait (%p/%p)\n", + __func__, isci_request, io_request_completion); + + /* Wait here for the request to complete. */ + #define TERMINATION_TIMEOUT_MSEC 500 + termination_completed + = wait_for_completion_timeout( + io_request_completion, + msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC)); + + if (!termination_completed) { + + /* The request to terminate has timed out. */ + spin_lock_irqsave(&ihost->scic_lock, + flags); + + /* Check for state changes. */ + if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { + + /* The best we can do is to have the + * request die a silent death if it + * ever really completes. + * + * Set the request state to "dead", + * and clear the task pointer so that + * an actual completion event callback + * doesn't do anything. + */ + isci_request->status = dead; + isci_request->io_request_completion + = NULL; + + if (isci_request->ttype == io_task) { + + /* Break links with the + * sas_task. + */ + isci_request->ttype_ptr.io_task_ptr + = NULL; + } + } else + termination_completed = 1; + + spin_unlock_irqrestore(&ihost->scic_lock, + flags); + + if (!termination_completed) { + + dev_dbg(&ihost->pdev->dev, + "%s: *** Timeout waiting for " + "termination(%p/%p)\n", + __func__, io_request_completion, + isci_request); + + /* The request can no longer be referenced + * safely since it may go away if the + * termination every really does complete. + */ + isci_request = NULL; + } + } + if (termination_completed) + dev_dbg(&ihost->pdev->dev, + "%s: after completion wait (%p/%p)\n", + __func__, isci_request, io_request_completion); + } + + if (termination_completed) { + + isci_request->io_request_completion = NULL; + + /* Peek at the status of the request. This will tell + * us if there was special handling on the request such that it + * needs to be detached and freed here. + */ + spin_lock_irqsave(&isci_request->state_lock, flags); + request_status = isci_request->status; + + if ((isci_request->ttype == io_task) /* TMFs are in their own thread */ + && ((request_status == aborted) + || (request_status == aborting) + || (request_status == terminating) + || (request_status == completed) + || (request_status == dead) + ) + ) { + + /* The completion routine won't free a request in + * the aborted/aborting/etc. states, so we do + * it here. + */ + needs_cleanup_handling = true; + } + spin_unlock_irqrestore(&isci_request->state_lock, flags); + + } + if (needs_cleanup_handling) + isci_request_cleanup_completed_loiterer( + ihost, idev, isci_request, task); + } +} + +/** + * isci_terminate_pending_requests() - This function will change the all of the + * requests on the given device's state to "aborting", will terminate the + * requests, and wait for them to complete. This function must only be + * called from a thread that can wait. Note that the requests are all + * terminated and completed (back to the host, if started there). + * @isci_host: This parameter specifies SCU. + * @idev: This parameter specifies the target. + * + */ +void isci_terminate_pending_requests(struct isci_host *ihost, + struct isci_remote_device *idev) +{ + struct completion request_completion; + enum isci_request_status old_state; + unsigned long flags; + LIST_HEAD(list); + + spin_lock_irqsave(&ihost->scic_lock, flags); + list_splice_init(&idev->reqs_in_process, &list); + + /* assumes that isci_terminate_request_core deletes from the list */ + while (!list_empty(&list)) { + struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node); + + /* Change state to "terminating" if it is currently + * "started". + */ + old_state = isci_request_change_started_to_newstate(ireq, + &request_completion, + terminating); + switch (old_state) { + case started: + case completed: + case aborting: + break; + default: + /* termination in progress, or otherwise dispositioned. + * We know the request was on 'list' so should be safe + * to move it back to reqs_in_process + */ + list_move(&ireq->dev_node, &idev->reqs_in_process); + ireq = NULL; + break; + } + + if (!ireq) + continue; + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + init_completion(&request_completion); + + dev_dbg(&ihost->pdev->dev, + "%s: idev=%p request=%p; task=%p old_state=%d\n", + __func__, idev, ireq, + ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL, + old_state); + + /* If the old_state is started: + * This request was not already being aborted. If it had been, + * then the aborting I/O (ie. the TMF request) would not be in + * the aborting state, and thus would be terminated here. Note + * that since the TMF completion's call to the kernel function + * "complete()" does not happen until the pending I/O request + * terminate fully completes, we do not have to implement a + * special wait here for already aborting requests - the + * termination of the TMF request will force the request + * to finish it's already started terminate. + * + * If old_state == completed: + * This request completed from the SCU hardware perspective + * and now just needs cleaning up in terms of freeing the + * request and potentially calling up to libsas. + * + * If old_state == aborting: + * This request has already gone through a TMF timeout, but may + * not have been terminated; needs cleaning up at least. + */ + isci_terminate_request_core(ihost, idev, ireq); + spin_lock_irqsave(&ihost->scic_lock, flags); + } + spin_unlock_irqrestore(&ihost->scic_lock, flags); +} + +/** + * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain + * Template functions. + * @lun: This parameter specifies the lun to be reset. + * + * status, zero indicates success. + */ +static int isci_task_send_lu_reset_sas( + struct isci_host *isci_host, + struct isci_remote_device *isci_device, + u8 *lun) +{ + struct isci_tmf tmf; + int ret = TMF_RESP_FUNC_FAILED; + + dev_dbg(&isci_host->pdev->dev, + "%s: isci_host = %p, isci_device = %p\n", + __func__, isci_host, isci_device); + /* Send the LUN reset to the target. By the time the call returns, + * the TMF has fully exected in the target (in which case the return + * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or + * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). + */ + isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL); + + #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ + ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS); + + if (ret == TMF_RESP_FUNC_COMPLETE) + dev_dbg(&isci_host->pdev->dev, + "%s: %p: TMF_LU_RESET passed\n", + __func__, isci_device); + else + dev_dbg(&isci_host->pdev->dev, + "%s: %p: TMF_LU_RESET failed (%x)\n", + __func__, isci_device, ret); + + return ret; +} + +static int isci_task_send_lu_reset_sata(struct isci_host *ihost, + struct isci_remote_device *idev, u8 *lun) +{ + int ret = TMF_RESP_FUNC_FAILED; + struct isci_tmf tmf; + + /* Send the soft reset to the target */ + #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */ + isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL); + + ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS); + + if (ret != TMF_RESP_FUNC_COMPLETE) { + dev_dbg(&ihost->pdev->dev, + "%s: Assert SRST failed (%p) = %x", + __func__, idev, ret); + + /* Return the failure so that the LUN reset is escalated + * to a target reset. + */ + } + return ret; +} + +/** + * isci_task_lu_reset() - This function is one of the SAS Domain Template + * functions. This is one of the Task Management functoins called by libsas, + * to reset the given lun. Note the assumption that while this call is + * executing, no I/O will be sent by the host to the device. + * @lun: This parameter specifies the lun to be reset. + * + * status, zero indicates success. + */ +int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun) +{ + struct isci_host *isci_host = dev_to_ihost(domain_device); + struct isci_remote_device *isci_device; + unsigned long flags; + int ret; + + spin_lock_irqsave(&isci_host->scic_lock, flags); + isci_device = isci_lookup_device(domain_device); + spin_unlock_irqrestore(&isci_host->scic_lock, flags); + + dev_dbg(&isci_host->pdev->dev, + "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", + __func__, domain_device, isci_host, isci_device); + + if (isci_device) + set_bit(IDEV_EH, &isci_device->flags); + + /* If there is a device reset pending on any request in the + * device's list, fail this LUN reset request in order to + * escalate to the device reset. + */ + if (!isci_device || + isci_device_is_reset_pending(isci_host, isci_device)) { + dev_dbg(&isci_host->pdev->dev, + "%s: No dev (%p), or " + "RESET PENDING: domain_device=%p\n", + __func__, isci_device, domain_device); + ret = TMF_RESP_FUNC_FAILED; + goto out; + } + + /* Send the task management part of the reset. */ + if (sas_protocol_ata(domain_device->tproto)) { + ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun); + } else + ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun); + + /* If the LUN reset worked, all the I/O can now be terminated. */ + if (ret == TMF_RESP_FUNC_COMPLETE) + /* Terminate all I/O now. */ + isci_terminate_pending_requests(isci_host, + isci_device); + + out: + isci_put_device(isci_device); + return ret; +} + + +/* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */ +int isci_task_clear_nexus_port(struct asd_sas_port *port) +{ + return TMF_RESP_FUNC_FAILED; +} + + + +int isci_task_clear_nexus_ha(struct sas_ha_struct *ha) +{ + return TMF_RESP_FUNC_FAILED; +} + +/* Task Management Functions. Must be called from process context. */ + +/** + * isci_abort_task_process_cb() - This is a helper function for the abort task + * TMF command. It manages the request state with respect to the successful + * transmission / completion of the abort task request. + * @cb_state: This parameter specifies when this function was called - after + * the TMF request has been started and after it has timed-out. + * @tmf: This parameter specifies the TMF in progress. + * + * + */ +static void isci_abort_task_process_cb( + enum isci_tmf_cb_state cb_state, + struct isci_tmf *tmf, + void *cb_data) +{ + struct isci_request *old_request; + + old_request = (struct isci_request *)cb_data; + + dev_dbg(&old_request->isci_host->pdev->dev, + "%s: tmf=%p, old_request=%p\n", + __func__, tmf, old_request); + + switch (cb_state) { + + case isci_tmf_started: + /* The TMF has been started. Nothing to do here, since the + * request state was already set to "aborted" by the abort + * task function. + */ + if ((old_request->status != aborted) + && (old_request->status != completed)) + dev_dbg(&old_request->isci_host->pdev->dev, + "%s: Bad request status (%d): tmf=%p, old_request=%p\n", + __func__, old_request->status, tmf, old_request); + break; + + case isci_tmf_timed_out: + + /* Set the task's state to "aborting", since the abort task + * function thread set it to "aborted" (above) in anticipation + * of the task management request working correctly. Since the + * timeout has now fired, the TMF request failed. We set the + * state such that the request completion will indicate the + * device is no longer present. + */ + isci_request_change_state(old_request, aborting); + break; + + default: + dev_dbg(&old_request->isci_host->pdev->dev, + "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n", + __func__, cb_state, tmf, old_request); + break; + } +} + +/** + * isci_task_abort_task() - This function is one of the SAS Domain Template + * functions. This function is called by libsas to abort a specified task. + * @task: This parameter specifies the SAS task to abort. + * + * status, zero indicates success. + */ +int isci_task_abort_task(struct sas_task *task) +{ + struct isci_host *isci_host = dev_to_ihost(task->dev); + DECLARE_COMPLETION_ONSTACK(aborted_io_completion); + struct isci_request *old_request = NULL; + enum isci_request_status old_state; + struct isci_remote_device *isci_device = NULL; + struct isci_tmf tmf; + int ret = TMF_RESP_FUNC_FAILED; + unsigned long flags; + bool any_dev_reset = false; + + /* Get the isci_request reference from the task. Note that + * this check does not depend on the pending request list + * in the device, because tasks driving resets may land here + * after completion in the core. + */ + spin_lock_irqsave(&isci_host->scic_lock, flags); + spin_lock(&task->task_state_lock); + + old_request = task->lldd_task; + + /* If task is already done, the request isn't valid */ + if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && + (task->task_state_flags & SAS_TASK_AT_INITIATOR) && + old_request) + isci_device = isci_lookup_device(task->dev); + + spin_unlock(&task->task_state_lock); + spin_unlock_irqrestore(&isci_host->scic_lock, flags); + + dev_dbg(&isci_host->pdev->dev, + "%s: task = %p\n", __func__, task); + + if (!isci_device || !old_request) + goto out; + + set_bit(IDEV_EH, &isci_device->flags); + + /* This version of the driver will fail abort requests for + * SATA/STP. Failing the abort request this way will cause the + * SCSI error handler thread to escalate to LUN reset + */ + if (sas_protocol_ata(task->task_proto)) { + dev_dbg(&isci_host->pdev->dev, + " task %p is for a STP/SATA device;" + " returning TMF_RESP_FUNC_FAILED\n" + " to cause a LUN reset...\n", task); + goto out; + } + + dev_dbg(&isci_host->pdev->dev, + "%s: old_request == %p\n", __func__, old_request); + + any_dev_reset = isci_device_is_reset_pending(isci_host, isci_device); + + spin_lock_irqsave(&task->task_state_lock, flags); + + any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET); + + /* If the extraction of the request reference from the task + * failed, then the request has been completed (or if there is a + * pending reset then this abort request function must be failed + * in order to escalate to the target reset). + */ + if ((old_request == NULL) || any_dev_reset) { + + /* If the device reset task flag is set, fail the task + * management request. Otherwise, the original request + * has completed. + */ + if (any_dev_reset) { + + /* Turn off the task's DONE to make sure this + * task is escalated to a target reset. + */ + task->task_state_flags &= ~SAS_TASK_STATE_DONE; + + /* Make the reset happen as soon as possible. */ + task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; + + spin_unlock_irqrestore(&task->task_state_lock, flags); + + /* Fail the task management request in order to + * escalate to the target reset. + */ + ret = TMF_RESP_FUNC_FAILED; + + dev_dbg(&isci_host->pdev->dev, + "%s: Failing task abort in order to " + "escalate to target reset because\n" + "SAS_TASK_NEED_DEV_RESET is set for " + "task %p on dev %p\n", + __func__, task, isci_device); + + + } else { + /* The request has already completed and there + * is nothing to do here other than to set the task + * done bit, and indicate that the task abort function + * was sucessful. + */ + isci_set_task_doneflags(task); + + spin_unlock_irqrestore(&task->task_state_lock, flags); + + ret = TMF_RESP_FUNC_COMPLETE; + + dev_dbg(&isci_host->pdev->dev, + "%s: abort task not needed for %p\n", + __func__, task); + } + goto out; + } else { + spin_unlock_irqrestore(&task->task_state_lock, flags); + } + + spin_lock_irqsave(&isci_host->scic_lock, flags); + + /* Check the request status and change to "aborted" if currently + * "starting"; if true then set the I/O kernel completion + * struct that will be triggered when the request completes. + */ + old_state = isci_task_validate_request_to_abort( + old_request, isci_host, isci_device, + &aborted_io_completion); + if ((old_state != started) && + (old_state != completed) && + (old_state != aborting)) { + + spin_unlock_irqrestore(&isci_host->scic_lock, flags); + + /* The request was already being handled by someone else (because + * they got to set the state away from started). + */ + dev_dbg(&isci_host->pdev->dev, + "%s: device = %p; old_request %p already being aborted\n", + __func__, + isci_device, old_request); + ret = TMF_RESP_FUNC_COMPLETE; + goto out; + } + if (task->task_proto == SAS_PROTOCOL_SMP || + test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { + + spin_unlock_irqrestore(&isci_host->scic_lock, flags); + + dev_dbg(&isci_host->pdev->dev, + "%s: SMP request (%d)" + " or complete_in_target (%d), thus no TMF\n", + __func__, (task->task_proto == SAS_PROTOCOL_SMP), + test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)); + + /* Set the state on the task. */ + isci_task_all_done(task); + + ret = TMF_RESP_FUNC_COMPLETE; + + /* Stopping and SMP devices are not sent a TMF, and are not + * reset, but the outstanding I/O request is terminated below. + */ + } else { + /* Fill in the tmf stucture */ + isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, + isci_abort_task_process_cb, + old_request); + + spin_unlock_irqrestore(&isci_host->scic_lock, flags); + + #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */ + ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, + ISCI_ABORT_TASK_TIMEOUT_MS); + + if (ret != TMF_RESP_FUNC_COMPLETE) + dev_dbg(&isci_host->pdev->dev, + "%s: isci_task_send_tmf failed\n", + __func__); + } + if (ret == TMF_RESP_FUNC_COMPLETE) { + set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags); + + /* Clean up the request on our side, and wait for the aborted + * I/O to complete. + */ + isci_terminate_request_core(isci_host, isci_device, old_request); + } + + /* Make sure we do not leave a reference to aborted_io_completion */ + old_request->io_request_completion = NULL; + out: + isci_put_device(isci_device); + return ret; +} + +/** + * isci_task_abort_task_set() - This function is one of the SAS Domain Template + * functions. This is one of the Task Management functoins called by libsas, + * to abort all task for the given lun. + * @d_device: This parameter specifies the domain device associated with this + * request. + * @lun: This parameter specifies the lun associated with this request. + * + * status, zero indicates success. + */ +int isci_task_abort_task_set( + struct domain_device *d_device, + u8 *lun) +{ + return TMF_RESP_FUNC_FAILED; +} + + +/** + * isci_task_clear_aca() - This function is one of the SAS Domain Template + * functions. This is one of the Task Management functoins called by libsas. + * @d_device: This parameter specifies the domain device associated with this + * request. + * @lun: This parameter specifies the lun associated with this request. + * + * status, zero indicates success. + */ +int isci_task_clear_aca( + struct domain_device *d_device, + u8 *lun) +{ + return TMF_RESP_FUNC_FAILED; +} + + + +/** + * isci_task_clear_task_set() - This function is one of the SAS Domain Template + * functions. This is one of the Task Management functoins called by libsas. + * @d_device: This parameter specifies the domain device associated with this + * request. + * @lun: This parameter specifies the lun associated with this request. + * + * status, zero indicates success. + */ +int isci_task_clear_task_set( + struct domain_device *d_device, + u8 *lun) +{ + return TMF_RESP_FUNC_FAILED; +} + + +/** + * isci_task_query_task() - This function is implemented to cause libsas to + * correctly escalate the failed abort to a LUN or target reset (this is + * because sas_scsi_find_task libsas function does not correctly interpret + * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is + * returned, libsas turns this into a LUN reset; when FUNC_FAILED is + * returned, libsas will turn this into a target reset + * @task: This parameter specifies the sas task being queried. + * @lun: This parameter specifies the lun associated with this request. + * + * status, zero indicates success. + */ +int isci_task_query_task( + struct sas_task *task) +{ + /* See if there is a pending device reset for this device. */ + if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) + return TMF_RESP_FUNC_FAILED; + else + return TMF_RESP_FUNC_SUCC; +} + +/* + * isci_task_request_complete() - This function is called by the sci core when + * an task request completes. + * @ihost: This parameter specifies the ISCI host object + * @ireq: This parameter is the completed isci_request object. + * @completion_status: This parameter specifies the completion status from the + * sci core. + * + * none. + */ +void +isci_task_request_complete(struct isci_host *ihost, + struct isci_request *ireq, + enum sci_task_status completion_status) +{ + struct isci_tmf *tmf = isci_request_access_tmf(ireq); + struct completion *tmf_complete; + + dev_dbg(&ihost->pdev->dev, + "%s: request = %p, status=%d\n", + __func__, ireq, completion_status); + + isci_request_change_state(ireq, completed); + + tmf->status = completion_status; + set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); + + if (tmf->proto == SAS_PROTOCOL_SSP) { + memcpy(&tmf->resp.resp_iu, + &ireq->ssp.rsp, + SSP_RESP_IU_MAX_SIZE); + } else if (tmf->proto == SAS_PROTOCOL_SATA) { + memcpy(&tmf->resp.d2h_fis, + &ireq->stp.rsp, + sizeof(struct dev_to_host_fis)); + } + + /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ + tmf_complete = tmf->complete; + + sci_controller_complete_io(ihost, ireq->target_device, ireq); + /* set the 'terminated' flag handle to make sure it cannot be terminated + * or completed again. + */ + set_bit(IREQ_TERMINATED, &ireq->flags); + + isci_request_change_state(ireq, unallocated); + list_del_init(&ireq->dev_node); + + /* The task management part completes last. */ + complete(tmf_complete); +} + +static void isci_smp_task_timedout(unsigned long _task) +{ + struct sas_task *task = (void *) _task; + unsigned long flags; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + complete(&task->completion); +} + +static void isci_smp_task_done(struct sas_task *task) +{ + if (!del_timer(&task->timer)) + return; + complete(&task->completion); +} + +static struct sas_task *isci_alloc_task(void) +{ + struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL); + + if (task) { + INIT_LIST_HEAD(&task->list); + spin_lock_init(&task->task_state_lock); + task->task_state_flags = SAS_TASK_STATE_PENDING; + init_timer(&task->timer); + init_completion(&task->completion); + } + + return task; +} + +static void isci_free_task(struct isci_host *ihost, struct sas_task *task) +{ + if (task) { + BUG_ON(!list_empty(&task->list)); + kfree(task); + } +} + +static int isci_smp_execute_task(struct isci_host *ihost, + struct domain_device *dev, void *req, + int req_size, void *resp, int resp_size) +{ + int res, retry; + struct sas_task *task = NULL; + + for (retry = 0; retry < 3; retry++) { + task = isci_alloc_task(); + if (!task) + return -ENOMEM; + + task->dev = dev; + task->task_proto = dev->tproto; + sg_init_one(&task->smp_task.smp_req, req, req_size); + sg_init_one(&task->smp_task.smp_resp, resp, resp_size); + + task->task_done = isci_smp_task_done; + + task->timer.data = (unsigned long) task; + task->timer.function = isci_smp_task_timedout; + task->timer.expires = jiffies + 10*HZ; + add_timer(&task->timer); + + res = isci_task_execute_task(task, 1, GFP_KERNEL); + + if (res) { + del_timer(&task->timer); + dev_dbg(&ihost->pdev->dev, + "%s: executing SMP task failed:%d\n", + __func__, res); + goto ex_err; + } + + wait_for_completion(&task->completion); + res = -ECOMM; + if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + dev_dbg(&ihost->pdev->dev, + "%s: smp task timed out or aborted\n", + __func__); + isci_task_abort_task(task); + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + dev_dbg(&ihost->pdev->dev, + "%s: SMP task aborted and not done\n", + __func__); + goto ex_err; + } + } + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAM_STAT_GOOD) { + res = 0; + break; + } + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_UNDERRUN) { + /* no error, but return the number of bytes of + * underrun */ + res = task->task_status.residual; + break; + } + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_OVERRUN) { + res = -EMSGSIZE; + break; + } else { + dev_dbg(&ihost->pdev->dev, + "%s: task to dev %016llx response: 0x%x " + "status 0x%x\n", __func__, + SAS_ADDR(dev->sas_addr), + task->task_status.resp, + task->task_status.stat); + isci_free_task(ihost, task); + task = NULL; + } + } +ex_err: + BUG_ON(retry == 3 && task != NULL); + isci_free_task(ihost, task); + return res; +} + +#define DISCOVER_REQ_SIZE 16 +#define DISCOVER_RESP_SIZE 56 + +int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost, + struct domain_device *dev, + int phy_id, int *adt) +{ + struct smp_resp *disc_resp; + u8 *disc_req; + int res; + + disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL); + if (!disc_resp) + return -ENOMEM; + + disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL); + if (disc_req) { + disc_req[0] = SMP_REQUEST; + disc_req[1] = SMP_DISCOVER; + disc_req[9] = phy_id; + } else { + kfree(disc_resp); + return -ENOMEM; + } + res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE, + disc_resp, DISCOVER_RESP_SIZE); + if (!res) { + if (disc_resp->result != SMP_RESP_FUNC_ACC) + res = disc_resp->result; + else + *adt = disc_resp->disc.attached_dev_type; + } + kfree(disc_req); + kfree(disc_resp); + + return res; +} + +static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num) +{ + struct domain_device *dev = idev->domain_dev; + struct isci_port *iport = idev->isci_port; + struct isci_host *ihost = iport->isci_host; + int res, iteration = 0, attached_device_type; + #define STP_WAIT_MSECS 25000 + unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS); + unsigned long deadline = jiffies + tmo; + enum { + SMP_PHYWAIT_PHYDOWN, + SMP_PHYWAIT_PHYUP, + SMP_PHYWAIT_DONE + } phy_state = SMP_PHYWAIT_PHYDOWN; + + /* While there is time, wait for the phy to go away and come back */ + while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) { + int event = atomic_read(&iport->event); + + ++iteration; + + tmo = wait_event_timeout(ihost->eventq, + event != atomic_read(&iport->event) || + !test_bit(IPORT_BCN_BLOCKED, &iport->flags), + tmo); + /* link down, stop polling */ + if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags)) + break; + + dev_dbg(&ihost->pdev->dev, + "%s: iport %p, iteration %d," + " phase %d: time_remaining %lu, bcns = %d\n", + __func__, iport, iteration, phy_state, + tmo, test_bit(IPORT_BCN_PENDING, &iport->flags)); + + res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num, + &attached_device_type); + tmo = deadline - jiffies; + + if (res) { + dev_dbg(&ihost->pdev->dev, + "%s: iteration %d, phase %d:" + " SMP error=%d, time_remaining=%lu\n", + __func__, iteration, phy_state, res, tmo); + break; + } + dev_dbg(&ihost->pdev->dev, + "%s: iport %p, iteration %d," + " phase %d: time_remaining %lu, bcns = %d, " + "attdevtype = %x\n", + __func__, iport, iteration, phy_state, + tmo, test_bit(IPORT_BCN_PENDING, &iport->flags), + attached_device_type); + + switch (phy_state) { + case SMP_PHYWAIT_PHYDOWN: + /* Has the device gone away? */ + if (!attached_device_type) + phy_state = SMP_PHYWAIT_PHYUP; + + break; + + case SMP_PHYWAIT_PHYUP: + /* Has the device come back? */ + if (attached_device_type) + phy_state = SMP_PHYWAIT_DONE; + break; + + case SMP_PHYWAIT_DONE: + break; + } + + } + dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__); +} + +static int isci_reset_device(struct isci_host *ihost, + struct isci_remote_device *idev) +{ + struct sas_phy *phy = sas_find_local_phy(idev->domain_dev); + struct isci_port *iport = idev->isci_port; + enum sci_status status; + unsigned long flags; + int rc; + + dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); + + spin_lock_irqsave(&ihost->scic_lock, flags); + status = sci_remote_device_reset(idev); + if (status != SCI_SUCCESS) { + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + dev_dbg(&ihost->pdev->dev, + "%s: sci_remote_device_reset(%p) returned %d!\n", + __func__, idev, status); + + return TMF_RESP_FUNC_FAILED; + } + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + /* Make sure all pending requests are able to be fully terminated. */ + isci_device_clear_reset_pending(ihost, idev); + + /* If this is a device on an expander, disable BCN processing. */ + if (!scsi_is_sas_phy_local(phy)) + set_bit(IPORT_BCN_BLOCKED, &iport->flags); + + rc = sas_phy_reset(phy, true); + + /* Terminate in-progress I/O now. */ + isci_remote_device_nuke_requests(ihost, idev); + + /* Since all pending TCs have been cleaned, resume the RNC. */ + spin_lock_irqsave(&ihost->scic_lock, flags); + status = sci_remote_device_reset_complete(idev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + /* If this is a device on an expander, bring the phy back up. */ + if (!scsi_is_sas_phy_local(phy)) { + /* A phy reset will cause the device to go away then reappear. + * Since libsas will take action on incoming BCNs (eg. remove + * a device going through an SMP phy-control driven reset), + * we need to wait until the phy comes back up before letting + * discovery proceed in libsas. + */ + isci_wait_for_smp_phy_reset(idev, phy->number); + + spin_lock_irqsave(&ihost->scic_lock, flags); + isci_port_bcn_enable(ihost, idev->isci_port); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + } + + if (status != SCI_SUCCESS) { + dev_dbg(&ihost->pdev->dev, + "%s: sci_remote_device_reset_complete(%p) " + "returned %d!\n", __func__, idev, status); + } + + dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev); + + return rc; +} + +int isci_task_I_T_nexus_reset(struct domain_device *dev) +{ + struct isci_host *ihost = dev_to_ihost(dev); + struct isci_remote_device *idev; + unsigned long flags; + int ret; + + spin_lock_irqsave(&ihost->scic_lock, flags); + idev = isci_lookup_device(dev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + if (!idev || !test_bit(IDEV_EH, &idev->flags)) { + ret = TMF_RESP_FUNC_COMPLETE; + goto out; + } + + ret = isci_reset_device(ihost, idev); + out: + isci_put_device(idev); + return ret; +} + +int isci_bus_reset_handler(struct scsi_cmnd *cmd) +{ + struct domain_device *dev = sdev_to_domain_dev(cmd->device); + struct isci_host *ihost = dev_to_ihost(dev); + struct isci_remote_device *idev; + unsigned long flags; + int ret; + + spin_lock_irqsave(&ihost->scic_lock, flags); + idev = isci_lookup_device(dev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + if (!idev) { + ret = TMF_RESP_FUNC_COMPLETE; + goto out; + } + + ret = isci_reset_device(ihost, idev); + out: + isci_put_device(idev); + return ret; +} diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h new file mode 100644 index 00000000000..4a7fa90287e --- /dev/null +++ b/drivers/scsi/isci/task.h @@ -0,0 +1,367 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _ISCI_TASK_H_ +#define _ISCI_TASK_H_ + +#include <scsi/sas_ata.h> +#include "host.h" + +struct isci_request; + +/** + * enum isci_tmf_cb_state - This enum defines the possible states in which the + * TMF callback function is invoked during the TMF execution process. + * + * + */ +enum isci_tmf_cb_state { + + isci_tmf_init_state = 0, + isci_tmf_started, + isci_tmf_timed_out +}; + +/** + * enum isci_tmf_function_codes - This enum defines the possible preparations + * of task management requests. + * + * + */ +enum isci_tmf_function_codes { + + isci_tmf_func_none = 0, + isci_tmf_ssp_task_abort = TMF_ABORT_TASK, + isci_tmf_ssp_lun_reset = TMF_LU_RESET, + isci_tmf_sata_srst_high = TMF_LU_RESET + 0x100, /* Non SCSI */ + isci_tmf_sata_srst_low = TMF_LU_RESET + 0x101 /* Non SCSI */ +}; +/** + * struct isci_tmf - This class represents the task management object which + * acts as an interface to libsas for processing task management requests + * + * + */ +struct isci_tmf { + + struct completion *complete; + enum sas_protocol proto; + union { + struct ssp_response_iu resp_iu; + struct dev_to_host_fis d2h_fis; + u8 rsp_buf[SSP_RESP_IU_MAX_SIZE]; + } resp; + unsigned char lun[8]; + u16 io_tag; + struct isci_remote_device *device; + enum isci_tmf_function_codes tmf_code; + int status; + + /* The optional callback function allows the user process to + * track the TMF transmit / timeout conditions. + */ + void (*cb_state_func)( + enum isci_tmf_cb_state, + struct isci_tmf *, void *); + void *cb_data; + +}; + +static inline void isci_print_tmf(struct isci_tmf *tmf) +{ + if (SAS_PROTOCOL_SATA == tmf->proto) + dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev, + "%s: status = %x\n" + "tmf->resp.d2h_fis.status = %x\n" + "tmf->resp.d2h_fis.error = %x\n", + __func__, + tmf->status, + tmf->resp.d2h_fis.status, + tmf->resp.d2h_fis.error); + else + dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev, + "%s: status = %x\n" + "tmf->resp.resp_iu.data_present = %x\n" + "tmf->resp.resp_iu.status = %x\n" + "tmf->resp.resp_iu.data_length = %x\n" + "tmf->resp.resp_iu.data[0] = %x\n" + "tmf->resp.resp_iu.data[1] = %x\n" + "tmf->resp.resp_iu.data[2] = %x\n" + "tmf->resp.resp_iu.data[3] = %x\n", + __func__, + tmf->status, + tmf->resp.resp_iu.datapres, + tmf->resp.resp_iu.status, + be32_to_cpu(tmf->resp.resp_iu.response_data_len), + tmf->resp.resp_iu.resp_data[0], + tmf->resp.resp_iu.resp_data[1], + tmf->resp.resp_iu.resp_data[2], + tmf->resp.resp_iu.resp_data[3]); +} + + +int isci_task_execute_task( + struct sas_task *task, + int num, + gfp_t gfp_flags); + +int isci_task_abort_task( + struct sas_task *task); + +int isci_task_abort_task_set( + struct domain_device *d_device, + u8 *lun); + +int isci_task_clear_aca( + struct domain_device *d_device, + u8 *lun); + +int isci_task_clear_task_set( + struct domain_device *d_device, + u8 *lun); + +int isci_task_query_task( + struct sas_task *task); + +int isci_task_lu_reset( + struct domain_device *d_device, + u8 *lun); + +int isci_task_clear_nexus_port( + struct asd_sas_port *port); + +int isci_task_clear_nexus_ha( + struct sas_ha_struct *ha); + +int isci_task_I_T_nexus_reset( + struct domain_device *d_device); + +void isci_task_request_complete( + struct isci_host *isci_host, + struct isci_request *request, + enum sci_task_status completion_status); + +u16 isci_task_ssp_request_get_io_tag_to_manage( + struct isci_request *request); + +u8 isci_task_ssp_request_get_function( + struct isci_request *request); + + +void *isci_task_ssp_request_get_response_data_address( + struct isci_request *request); + +u32 isci_task_ssp_request_get_response_data_length( + struct isci_request *request); + +int isci_queuecommand( + struct scsi_cmnd *scsi_cmd, + void (*donefunc)(struct scsi_cmnd *)); + +int isci_bus_reset_handler(struct scsi_cmnd *cmd); + +/** + * enum isci_completion_selection - This enum defines the possible actions to + * take with respect to a given request's notification back to libsas. + * + * + */ +enum isci_completion_selection { + + isci_perform_normal_io_completion, /* Normal notify (task_done) */ + isci_perform_aborted_io_completion, /* No notification. */ + isci_perform_error_io_completion /* Use sas_task_abort */ +}; + +static inline void isci_set_task_doneflags( + struct sas_task *task) +{ + /* Since no futher action will be taken on this task, + * make sure to mark it complete from the lldd perspective. + */ + task->task_state_flags |= SAS_TASK_STATE_DONE; + task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; +} +/** + * isci_task_all_done() - This function clears the task bits to indicate the + * LLDD is done with the task. + * + * + */ +static inline void isci_task_all_done( + struct sas_task *task) +{ + unsigned long flags; + + /* Since no futher action will be taken on this task, + * make sure to mark it complete from the lldd perspective. + */ + spin_lock_irqsave(&task->task_state_lock, flags); + isci_set_task_doneflags(task); + spin_unlock_irqrestore(&task->task_state_lock, flags); +} + +/** + * isci_task_set_completion_status() - This function sets the completion status + * for the request. + * @task: This parameter is the completed request. + * @response: This parameter is the response code for the completed task. + * @status: This parameter is the status code for the completed task. + * +* @return The new notification mode for the request. +*/ +static inline enum isci_completion_selection +isci_task_set_completion_status( + struct sas_task *task, + enum service_response response, + enum exec_status status, + enum isci_completion_selection task_notification_selection) +{ + unsigned long flags; + + spin_lock_irqsave(&task->task_state_lock, flags); + + /* If a device reset is being indicated, make sure the I/O + * is in the error path. + */ + if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { + /* Fail the I/O to make sure it goes into the error path. */ + response = SAS_TASK_UNDELIVERED; + status = SAM_STAT_TASK_ABORTED; + + task_notification_selection = isci_perform_error_io_completion; + } + task->task_status.resp = response; + task->task_status.stat = status; + + switch (task_notification_selection) { + + case isci_perform_error_io_completion: + + if (task->task_proto == SAS_PROTOCOL_SMP) { + /* There is no error escalation in the SMP case. + * Convert to a normal completion to avoid the + * timeout in the discovery path and to let the + * next action take place quickly. + */ + task_notification_selection + = isci_perform_normal_io_completion; + + /* Fall through to the normal case... */ + } else { + /* Use sas_task_abort */ + /* Leave SAS_TASK_STATE_DONE clear + * Leave SAS_TASK_AT_INITIATOR set. + */ + break; + } + + case isci_perform_aborted_io_completion: + /* This path can occur with task-managed requests as well as + * requests terminated because of LUN or device resets. + */ + /* Fall through to the normal case... */ + case isci_perform_normal_io_completion: + /* Normal notification (task_done) */ + isci_set_task_doneflags(task); + break; + default: + WARN_ONCE(1, "unknown task_notification_selection: %d\n", + task_notification_selection); + break; + } + + spin_unlock_irqrestore(&task->task_state_lock, flags); + + return task_notification_selection; + +} +/** +* isci_execpath_callback() - This function is called from the task +* execute path when the task needs to callback libsas about the submit-time +* task failure. The callback occurs either through the task's done function +* or through sas_task_abort. In the case of regular non-discovery SATA/STP I/O +* requests, libsas takes the host lock before calling execute task. Therefore +* in this situation the host lock must be managed before calling the func. +* +* @ihost: This parameter is the controller to which the I/O request was sent. +* @task: This parameter is the I/O request. +* @func: This parameter is the function to call in the correct context. +* @status: This parameter is the status code for the completed task. +* +*/ +static inline void isci_execpath_callback(struct isci_host *ihost, + struct sas_task *task, + void (*func)(struct sas_task *)) +{ + struct domain_device *dev = task->dev; + + if (dev_is_sata(dev) && task->uldd_task) { + unsigned long flags; + + /* Since we are still in the submit path, and since + * libsas takes the host lock on behalf of SATA + * devices before I/O starts (in the non-discovery case), + * we need to unlock before we can call the callback function. + */ + raw_local_irq_save(flags); + spin_unlock(dev->sata_dev.ap->lock); + func(task); + spin_lock(dev->sata_dev.ap->lock); + raw_local_irq_restore(flags); + } else + func(task); +} +#endif /* !defined(_SCI_TASK_H_) */ diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c new file mode 100644 index 00000000000..e9e1e2abacb --- /dev/null +++ b/drivers/scsi/isci/unsolicited_frame_control.c @@ -0,0 +1,225 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "host.h" +#include "unsolicited_frame_control.h" +#include "registers.h" + +int sci_unsolicited_frame_control_construct(struct isci_host *ihost) +{ + struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control; + struct sci_unsolicited_frame *uf; + u32 buf_len, header_len, i; + dma_addr_t dma; + size_t size; + void *virt; + + /* + * Prepare all of the memory sizes for the UF headers, UF address + * table, and UF buffers themselves. + */ + buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE; + header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header); + size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t); + + /* + * The Unsolicited Frame buffers are set at the start of the UF + * memory descriptor entry. The headers and address table will be + * placed after the buffers. + */ + virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL); + if (!virt) + return -ENOMEM; + + /* + * Program the location of the UF header table into the SCU. + * Notes: + * - The address must align on a 64-byte boundary. Guaranteed to be + * on 64-byte boundary already 1KB boundary for unsolicited frames. + * - Program unused header entries to overlap with the last + * unsolicited frame. The silicon will never DMA to these unused + * headers, since we program the UF address table pointers to + * NULL. + */ + uf_control->headers.physical_address = dma + buf_len; + uf_control->headers.array = virt + buf_len; + + /* + * Program the location of the UF address table into the SCU. + * Notes: + * - The address must align on a 64-bit boundary. Guaranteed to be on 64 + * byte boundary already due to above programming headers being on a + * 64-bit boundary and headers are on a 64-bytes in size. + */ + uf_control->address_table.physical_address = dma + buf_len + header_len; + uf_control->address_table.array = virt + buf_len + header_len; + uf_control->get = 0; + + /* + * UF buffer requirements are: + * - The last entry in the UF queue is not NULL. + * - There is a power of 2 number of entries (NULL or not-NULL) + * programmed into the queue. + * - Aligned on a 1KB boundary. */ + + /* + * Program the actual used UF buffers into the UF address table and + * the controller's array of UFs. + */ + for (i = 0; i < SCU_MAX_UNSOLICITED_FRAMES; i++) { + uf = &uf_control->buffers.array[i]; + + uf_control->address_table.array[i] = dma; + + uf->buffer = virt; + uf->header = &uf_control->headers.array[i]; + uf->state = UNSOLICITED_FRAME_EMPTY; + + /* + * Increment the address of the physical and virtual memory + * pointers. Everything is aligned on 1k boundary with an + * increment of 1k. + */ + virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; + dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; + } + + return 0; +} + +enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control, + u32 frame_index, + void **frame_header) +{ + if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { + /* Skip the first word in the frame since this is a controll word used + * by the hardware. + */ + *frame_header = &uf_control->buffers.array[frame_index].header->data; + + return SCI_SUCCESS; + } + + return SCI_FAILURE_INVALID_PARAMETER_VALUE; +} + +enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control, + u32 frame_index, + void **frame_buffer) +{ + if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { + *frame_buffer = uf_control->buffers.array[frame_index].buffer; + + return SCI_SUCCESS; + } + + return SCI_FAILURE_INVALID_PARAMETER_VALUE; +} + +bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control, + u32 frame_index) +{ + u32 frame_get; + u32 frame_cycle; + + frame_get = uf_control->get & (SCU_MAX_UNSOLICITED_FRAMES - 1); + frame_cycle = uf_control->get & SCU_MAX_UNSOLICITED_FRAMES; + + /* + * In the event there are NULL entries in the UF table, we need to + * advance the get pointer in order to find out if this frame should + * be released (i.e. update the get pointer) + */ + while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 && + upper_32_bits(uf_control->address_table.array[frame_get]) == 0 && + frame_get < SCU_MAX_UNSOLICITED_FRAMES) + frame_get++; + + /* + * The table has a NULL entry as it's last element. This is + * illegal. + */ + BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES); + if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES) + return false; + + uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED; + + if (frame_get != frame_index) { + /* + * Frames remain in use until we advance the get pointer + * so there is nothing we can do here + */ + return false; + } + + /* + * The frame index is equal to the current get pointer so we + * can now free up all of the frame entries that + */ + while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) { + uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY; + + if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) { + frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES; + frame_get = 0; + } else + frame_get++; + } + + uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get; + + return true; +} diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h new file mode 100644 index 00000000000..31cb9506f52 --- /dev/null +++ b/drivers/scsi/isci/unsolicited_frame_control.h @@ -0,0 +1,278 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ +#define _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ + +#include "isci.h" + +#define SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS 15 + +/** + * struct scu_unsolicited_frame_header - + * + * This structure delineates the format of an unsolicited frame header. The + * first DWORD are UF attributes defined by the silicon architecture. The data + * depicts actual header information received on the link. + */ +struct scu_unsolicited_frame_header { + /** + * This field indicates if there is an Initiator Index Table entry with + * which this header is associated. + */ + u32 iit_exists:1; + + /** + * This field simply indicates the protocol type (i.e. SSP, STP, SMP). + */ + u32 protocol_type:3; + + /** + * This field indicates if the frame is an address frame (IAF or OAF) + * or if it is a information unit frame. + */ + u32 is_address_frame:1; + + /** + * This field simply indicates the connection rate at which the frame + * was received. + */ + u32 connection_rate:4; + + u32 reserved:23; + + /** + * This field represents the actual header data received on the link. + */ + u32 data[SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS]; + +}; + + + +/** + * enum unsolicited_frame_state - + * + * This enumeration represents the current unsolicited frame state. The + * controller object can not updtate the hardware unsolicited frame put pointer + * unless it has already processed the priror unsolicited frames. + */ +enum unsolicited_frame_state { + /** + * This state is when the frame is empty and not in use. It is + * different from the released state in that the hardware could DMA + * data to this frame buffer. + */ + UNSOLICITED_FRAME_EMPTY, + + /** + * This state is set when the frame buffer is in use by by some + * object in the system. + */ + UNSOLICITED_FRAME_IN_USE, + + /** + * This state is set when the frame is returned to the free pool + * but one or more frames prior to this one are still in use. + * Once all of the frame before this one are freed it will go to + * the empty state. + */ + UNSOLICITED_FRAME_RELEASED, + + UNSOLICITED_FRAME_MAX_STATES +}; + +/** + * struct sci_unsolicited_frame - + * + * This is the unsolicited frame data structure it acts as the container for + * the current frame state, frame header and frame buffer. + */ +struct sci_unsolicited_frame { + /** + * This field contains the current frame state + */ + enum unsolicited_frame_state state; + + /** + * This field points to the frame header data. + */ + struct scu_unsolicited_frame_header *header; + + /** + * This field points to the frame buffer data. + */ + void *buffer; + +}; + +/** + * struct sci_uf_header_array - + * + * This structure contains all of the unsolicited frame header information. + */ +struct sci_uf_header_array { + /** + * This field is represents a virtual pointer to the start + * address of the UF address table. The table contains + * 64-bit pointers as required by the hardware. + */ + struct scu_unsolicited_frame_header *array; + + /** + * This field specifies the physical address location for the UF + * buffer array. + */ + dma_addr_t physical_address; + +}; + +/** + * struct sci_uf_buffer_array - + * + * This structure contains all of the unsolicited frame buffer (actual payload) + * information. + */ +struct sci_uf_buffer_array { + /** + * This field is the unsolicited frame data its used to manage + * the data for the unsolicited frame requests. It also represents + * the virtual address location that corresponds to the + * physical_address field. + */ + struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES]; + + /** + * This field specifies the physical address location for the UF + * buffer array. + */ + dma_addr_t physical_address; +}; + +/** + * struct sci_uf_address_table_array - + * + * This object maintains all of the unsolicited frame address table specific + * data. The address table is a collection of 64-bit pointers that point to + * 1KB buffers into which the silicon will DMA unsolicited frames. + */ +struct sci_uf_address_table_array { + /** + * This field represents a virtual pointer that refers to the + * starting address of the UF address table. + * 64-bit pointers are required by the hardware. + */ + dma_addr_t *array; + + /** + * This field specifies the physical address location for the UF + * address table. + */ + dma_addr_t physical_address; + +}; + +/** + * struct sci_unsolicited_frame_control - + * + * This object contains all of the data necessary to handle unsolicited frames. + */ +struct sci_unsolicited_frame_control { + /** + * This field is the software copy of the unsolicited frame queue + * get pointer. The controller object writes this value to the + * hardware to let the hardware put more unsolicited frame entries. + */ + u32 get; + + /** + * This field contains all of the unsolicited frame header + * specific fields. + */ + struct sci_uf_header_array headers; + + /** + * This field contains all of the unsolicited frame buffer + * specific fields. + */ + struct sci_uf_buffer_array buffers; + + /** + * This field contains all of the unsolicited frame address table + * specific fields. + */ + struct sci_uf_address_table_array address_table; + +}; + +struct isci_host; + +int sci_unsolicited_frame_control_construct(struct isci_host *ihost); + +enum sci_status sci_unsolicited_frame_control_get_header( + struct sci_unsolicited_frame_control *uf_control, + u32 frame_index, + void **frame_header); + +enum sci_status sci_unsolicited_frame_control_get_buffer( + struct sci_unsolicited_frame_control *uf_control, + u32 frame_index, + void **frame_buffer); + +bool sci_unsolicited_frame_control_release_frame( + struct sci_unsolicited_frame_control *uf_control, + u32 frame_index); + +#endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */ diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c index df6bff7366c..89700cbca16 100644 --- a/drivers/scsi/iscsi_boot_sysfs.c +++ b/drivers/scsi/iscsi_boot_sysfs.c @@ -64,7 +64,8 @@ static void iscsi_boot_kobj_release(struct kobject *kobj) struct iscsi_boot_kobj *boot_kobj = container_of(kobj, struct iscsi_boot_kobj, kobj); - kfree(boot_kobj->data); + if (boot_kobj->release) + boot_kobj->release(boot_kobj->data); kfree(boot_kobj); } @@ -305,7 +306,8 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset, struct attribute_group *attr_group, const char *name, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), - mode_t (*is_visible) (void *data, int type)) + mode_t (*is_visible) (void *data, int type), + void (*release) (void *data)) { struct iscsi_boot_kobj *boot_kobj; @@ -323,6 +325,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset, boot_kobj->data = data; boot_kobj->show = show; boot_kobj->is_visible = is_visible; + boot_kobj->release = release; if (sysfs_create_group(&boot_kobj->kobj, attr_group)) { /* @@ -331,7 +334,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset, * the boot kobj was not setup and the normal release * path is not being run. */ - boot_kobj->data = NULL; + boot_kobj->release = NULL; kobject_put(&boot_kobj->kobj); return NULL; } @@ -357,6 +360,7 @@ static void iscsi_boot_remove_kobj(struct iscsi_boot_kobj *boot_kobj) * @data: driver specific data for target * @show: attr show function * @is_visible: attr visibility function + * @release: release function * * Note: The boot sysfs lib will free the data passed in for the caller * when all refs to the target kobject have been released. @@ -365,10 +369,12 @@ struct iscsi_boot_kobj * iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), - mode_t (*is_visible) (void *data, int type)) + mode_t (*is_visible) (void *data, int type), + void (*release) (void *data)) { return iscsi_boot_create_kobj(boot_kset, &iscsi_boot_target_attr_group, - "target%d", index, data, show, is_visible); + "target%d", index, data, show, is_visible, + release); } EXPORT_SYMBOL_GPL(iscsi_boot_create_target); @@ -379,6 +385,7 @@ EXPORT_SYMBOL_GPL(iscsi_boot_create_target); * @data: driver specific data * @show: attr show function * @is_visible: attr visibility function + * @release: release function * * Note: The boot sysfs lib will free the data passed in for the caller * when all refs to the initiator kobject have been released. @@ -387,12 +394,13 @@ struct iscsi_boot_kobj * iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), - mode_t (*is_visible) (void *data, int type)) + mode_t (*is_visible) (void *data, int type), + void (*release) (void *data)) { return iscsi_boot_create_kobj(boot_kset, &iscsi_boot_initiator_attr_group, "initiator", index, data, show, - is_visible); + is_visible, release); } EXPORT_SYMBOL_GPL(iscsi_boot_create_initiator); @@ -403,6 +411,7 @@ EXPORT_SYMBOL_GPL(iscsi_boot_create_initiator); * @data: driver specific data * @show: attr show function * @is_visible: attr visibility function + * @release: release function * * Note: The boot sysfs lib will free the data passed in for the caller * when all refs to the ethernet kobject have been released. @@ -411,12 +420,13 @@ struct iscsi_boot_kobj * iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), - mode_t (*is_visible) (void *data, int type)) + mode_t (*is_visible) (void *data, int type), + void (*release) (void *data)) { return iscsi_boot_create_kobj(boot_kset, &iscsi_boot_ethernet_attr_group, "ethernet%d", index, data, show, - is_visible); + is_visible, release); } EXPORT_SYMBOL_GPL(iscsi_boot_create_ethernet); @@ -472,6 +482,9 @@ void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset) { struct iscsi_boot_kobj *boot_kobj, *tmp_kobj; + if (!boot_kset) + return; + list_for_each_entry_safe(boot_kobj, tmp_kobj, &boot_kset->kobj_list, list) iscsi_boot_remove_kobj(boot_kobj); diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 3df985305f6..7724414588f 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -107,10 +107,12 @@ static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, * If the socket is in CLOSE or CLOSE_WAIT we should * not close the connection if there is still some * data pending. + * + * Must be called with sk_callback_lock. */ static inline int iscsi_sw_sk_state_check(struct sock *sk) { - struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data; + struct iscsi_conn *conn = sk->sk_user_data; if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) && !atomic_read(&sk->sk_rmem_alloc)) { @@ -123,11 +125,17 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk) static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag) { - struct iscsi_conn *conn = sk->sk_user_data; - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_conn *conn; + struct iscsi_tcp_conn *tcp_conn; read_descriptor_t rd_desc; read_lock(&sk->sk_callback_lock); + conn = sk->sk_user_data; + if (!conn) { + read_unlock(&sk->sk_callback_lock); + return; + } + tcp_conn = conn->dd_data; /* * Use rd_desc to pass 'conn' to iscsi_tcp_recv. @@ -141,11 +149,10 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag) iscsi_sw_sk_state_check(sk); - read_unlock(&sk->sk_callback_lock); - /* If we had to (atomically) map a highmem page, * unmap it now. */ iscsi_tcp_segment_unmap(&tcp_conn->in.segment); + read_unlock(&sk->sk_callback_lock); } static void iscsi_sw_tcp_state_change(struct sock *sk) @@ -157,8 +164,11 @@ static void iscsi_sw_tcp_state_change(struct sock *sk) void (*old_state_change)(struct sock *); read_lock(&sk->sk_callback_lock); - - conn = (struct iscsi_conn*)sk->sk_user_data; + conn = sk->sk_user_data; + if (!conn) { + read_unlock(&sk->sk_callback_lock); + return; + } session = conn->session; iscsi_sw_sk_state_check(sk); @@ -178,11 +188,25 @@ static void iscsi_sw_tcp_state_change(struct sock *sk) **/ static void iscsi_sw_tcp_write_space(struct sock *sk) { - struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data; - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct iscsi_conn *conn; + struct iscsi_tcp_conn *tcp_conn; + struct iscsi_sw_tcp_conn *tcp_sw_conn; + void (*old_write_space)(struct sock *); + + read_lock_bh(&sk->sk_callback_lock); + conn = sk->sk_user_data; + if (!conn) { + read_unlock_bh(&sk->sk_callback_lock); + return; + } + + tcp_conn = conn->dd_data; + tcp_sw_conn = tcp_conn->dd_data; + old_write_space = tcp_sw_conn->old_write_space; + read_unlock_bh(&sk->sk_callback_lock); + + old_write_space(sk); - tcp_sw_conn->old_write_space(sk); ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n"); iscsi_conn_queue_work(conn); } @@ -592,20 +616,17 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) /* userspace may have goofed up and not bound us */ if (!sock) return; - /* - * Make sure our recv side is stopped. - * Older tools called conn stop before ep_disconnect - * so IO could still be coming in. - */ - write_lock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); - write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); sock->sk->sk_err = EIO; wake_up_interruptible(sk_sleep(sock->sk)); - iscsi_conn_stop(cls_conn, flag); + /* stop xmit side */ + iscsi_suspend_tx(conn); + + /* stop recv side and release socket */ iscsi_sw_tcp_release_conn(conn); + + iscsi_conn_stop(cls_conn, flag); } static int diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 3b8a6451ea2..01ff082dc34 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -802,10 +802,8 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); spin_lock_bh(&pool->lock); ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); - if (ep) { + if (ep && ep->xid == xid) fc_exch_hold(ep); - WARN_ON(ep->xid != xid); - } spin_unlock_bh(&pool->lock); } return ep; @@ -965,8 +963,30 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, sp = &ep->seq; if (sp->id != fh->fh_seq_id) { atomic_inc(&mp->stats.seq_not_found); - reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */ - goto rel; + if (f_ctl & FC_FC_END_SEQ) { + /* + * Update sequence_id based on incoming last + * frame of sequence exchange. This is needed + * for FCoE target where DDP has been used + * on target where, stack is indicated only + * about last frame's (payload _header) header. + * Whereas "seq_id" which is part of + * frame_header is allocated by initiator + * which is totally different from "seq_id" + * allocated when XFER_RDY was sent by target. + * To avoid false -ve which results into not + * sending RSP, hence write request on other + * end never finishes. + */ + spin_lock_bh(&ep->ex_lock); + sp->ssb_stat |= SSB_ST_RESP; + sp->id = fh->fh_seq_id; + spin_unlock_bh(&ep->ex_lock); + } else { + /* sequence/exch should exist */ + reject = FC_RJT_SEQ_ID; + goto rel; + } } } WARN_ON(ep != fc_seq_exch(sp)); @@ -2443,8 +2463,11 @@ int fc_setup_exch_mgr(void) fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); if (!fc_exch_workqueue) - return -ENOMEM; + goto err; return 0; +err: + kmem_cache_destroy(fc_em_cachep); + return -ENOMEM; } /** diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 9cd2149519a..afb63c84314 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -498,7 +498,7 @@ crc_err: stats = per_cpu_ptr(lport->dev_stats, get_cpu()); stats->ErrorFrames++; /* per cpu count, not total count, but OK for limit */ - if (stats->InvalidCRCCount++ < 5) + if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT) printk(KERN_WARNING "libfc: CRC error on data " "frame for port (%6.6x)\n", lport->port_id); @@ -690,7 +690,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, } /** - * fc_fcp_abts_resp() - Send an ABTS response + * fc_fcp_abts_resp() - Receive an ABTS response * @fsp: The FCP packet that is being aborted * @fp: The response frame */ @@ -730,7 +730,7 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) } /** - * fc_fcp_recv() - Reveive an FCP frame + * fc_fcp_recv() - Receive an FCP frame * @seq: The sequence the frame is on * @fp: The received frame * @arg: The related FCP packet @@ -1084,6 +1084,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv); if (unlikely(rc)) { spin_lock_irqsave(&si->scsi_queue_lock, flags); + fsp->cmd->SCp.ptr = NULL; list_del(&fsp->list); spin_unlock_irqrestore(&si->scsi_queue_lock, flags); } @@ -1645,12 +1646,10 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) struct fc_seq *seq; struct fcp_srr *srr; struct fc_frame *fp; - u8 cdb_op; unsigned int rec_tov; rport = fsp->rport; rpriv = rport->dd_data; - cdb_op = fsp->cdb_cmd.fc_cdb[0]; if (!(rpriv->flags & FC_RP_FLAGS_RETRY) || rpriv->rp_state != RPORT_ST_READY) diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 389ab80aef0..e55ed9cf23f 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1025,6 +1025,8 @@ static void fc_lport_enter_reset(struct fc_lport *lport) fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN); } fc_lport_state_enter(lport, LPORT_ST_RESET); + fc_host_post_event(lport->host, fc_get_event_number(), + FCH_EVT_LIPRESET, 0); fc_vports_linkchange(lport); fc_lport_reset_locked(lport); if (lport->link_up) @@ -1350,7 +1352,6 @@ static void fc_lport_timeout(struct work_struct *work) WARN_ON(1); break; case LPORT_ST_READY: - WARN_ON(1); break; case LPORT_ST_RESET: break; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 49e1ccca09d..760db761944 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -153,18 +153,6 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, } /** - * fc_rport_free_rcu() - Free a remote port - * @rcu: The rcu_head structure inside the remote port - */ -static void fc_rport_free_rcu(struct rcu_head *rcu) -{ - struct fc_rport_priv *rdata; - - rdata = container_of(rcu, struct fc_rport_priv, rcu); - kfree(rdata); -} - -/** * fc_rport_destroy() - Free a remote port after last reference is released * @kref: The remote port's kref */ @@ -173,7 +161,7 @@ static void fc_rport_destroy(struct kref *kref) struct fc_rport_priv *rdata; rdata = container_of(kref, struct fc_rport_priv, kref); - call_rcu(&rdata->rcu, fc_rport_free_rcu); + kfree_rcu(rdata, rcu); } /** @@ -801,6 +789,20 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, switch (rdata->rp_state) { case RPORT_ST_INIT: + /* + * If received the FLOGI request on RPORT which is INIT state + * (means not transition to FLOGI either fc_rport timeout + * function didn;t trigger or this end hasn;t received + * beacon yet from other end. In that case only, allow RPORT + * state machine to continue, otherwise fall through which + * causes the code to send reject response. + * NOTE; Not checking for FIP->state such as VNMP_UP or + * VNMP_CLAIM because if FIP state is not one of those, + * RPORT wouldn;t have created and 'rport_lookup' would have + * failed anyway in that case. + */ + if (lport->point_to_multipoint) + break; case RPORT_ST_DELETE: mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_FIP; diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 0c550d5b913..256a999d010 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -84,22 +84,6 @@ MODULE_PARM_DESC(debug_libiscsi_eh, __func__, ##arg); \ } while (0); -/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */ -#define SNA32_CHECK 2147483648UL - -static int iscsi_sna_lt(u32 n1, u32 n2) -{ - return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) || - (n1 > n2 && (n2 - n1 < SNA32_CHECK))); -} - -/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */ -static int iscsi_sna_lte(u32 n1, u32 n2) -{ - return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) || - (n1 > n2 && (n2 - n1 < SNA32_CHECK))); -} - inline void iscsi_conn_queue_work(struct iscsi_conn *conn) { struct Scsi_Host *shost = conn->session->host; @@ -169,7 +153,7 @@ void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t hdr->datasn = cpu_to_be32(r2t->datasn); r2t->datasn++; hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; - memcpy(hdr->lun, task->lun, sizeof(hdr->lun)); + hdr->lun = task->lun; hdr->itt = task->hdr_itt; hdr->exp_statsn = r2t->exp_statsn; hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent); @@ -296,7 +280,7 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) /* * Allow PDUs for unrelated LUNs */ - hdr_lun = scsilun_to_int((struct scsi_lun *)tmf->lun); + hdr_lun = scsilun_to_int(&tmf->lun); if (hdr_lun != task->sc->device->lun) return 0; /* fall through */ @@ -360,7 +344,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct scsi_cmnd *sc = task->sc; - struct iscsi_cmd *hdr; + struct iscsi_scsi_req *hdr; unsigned hdrlength, cmd_len; itt_t itt; int rc; @@ -374,7 +358,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) if (rc) return rc; } - hdr = (struct iscsi_cmd *) task->hdr; + hdr = (struct iscsi_scsi_req *)task->hdr; itt = hdr->itt; memset(hdr, 0, sizeof(*hdr)); @@ -389,8 +373,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) return rc; hdr->opcode = ISCSI_OP_SCSI_CMD; hdr->flags = ISCSI_ATTR_SIMPLE; - int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); - memcpy(task->lun, hdr->lun, sizeof(task->lun)); + int_to_scsilun(sc->device->lun, &hdr->lun); + task->lun = hdr->lun; hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); cmd_len = sc->cmd_len; if (cmd_len < ISCSI_CDB_SIZE) @@ -830,7 +814,7 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, struct iscsi_task *task, char *data, int datalen) { - struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr; + struct iscsi_scsi_rsp *rhdr = (struct iscsi_scsi_rsp *)hdr; struct iscsi_session *session = conn->session; struct scsi_cmnd *sc = task->sc; @@ -968,7 +952,7 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) hdr.flags = ISCSI_FLAG_CMD_FINAL; if (rhdr) { - memcpy(hdr.lun, rhdr->lun, 8); + hdr.lun = rhdr->lun; hdr.ttt = rhdr->ttt; hdr.itt = RESERVED_ITT; } else @@ -2092,7 +2076,7 @@ static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK; hdr->flags |= ISCSI_FLAG_CMD_FINAL; - memcpy(hdr->lun, task->lun, sizeof(hdr->lun)); + hdr->lun = task->lun; hdr->rtt = task->hdr_itt; hdr->refcmdsn = task->cmdsn; } @@ -2233,7 +2217,7 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK; hdr->flags |= ISCSI_FLAG_CMD_FINAL; - int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); + int_to_scsilun(sc->device->lun, &hdr->lun); hdr->rtt = RESERVED_ITT; } diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index e98ae33f129..09b232fd9a1 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -1084,7 +1084,8 @@ iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size, struct iscsi_cls_conn *cls_conn; struct iscsi_tcp_conn *tcp_conn; - cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx); + cls_conn = iscsi_conn_setup(cls_session, + sizeof(*tcp_conn) + dd_data_size, conn_idx); if (!cls_conn) return NULL; conn = cls_conn->dd_data; @@ -1096,22 +1097,13 @@ iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size, tcp_conn = conn->dd_data; tcp_conn->iscsi_conn = conn; - - tcp_conn->dd_data = kzalloc(dd_data_size, GFP_KERNEL); - if (!tcp_conn->dd_data) { - iscsi_conn_teardown(cls_conn); - return NULL; - } + tcp_conn->dd_data = conn->dd_data + sizeof(*tcp_conn); return cls_conn; } EXPORT_SYMBOL_GPL(iscsi_tcp_conn_setup); void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn) { - struct iscsi_conn *conn = cls_conn->dd_data; - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - - kfree(tcp_conn->dd_data); iscsi_conn_teardown(cls_conn); } EXPORT_SYMBOL_GPL(iscsi_tcp_conn_teardown); diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 874e29d9533..f84084bba2f 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -849,6 +849,9 @@ static struct domain_device *sas_ex_discover_expander( res = sas_discover_expander(child); if (res) { + spin_lock_irq(&parent->port->dev_list_lock); + list_del(&child->dev_list_node); + spin_unlock_irq(&parent->port->dev_list_lock); kfree(child); return NULL; } diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 8ec2c86a49d..c088a36d1f3 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -20,6 +20,11 @@ *******************************************************************/ #include <scsi/scsi_host.h> + +#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS) +#define CONFIG_SCSI_LPFC_DEBUG_FS +#endif + struct lpfc_sli2_slim; #define LPFC_PCI_DEV_LP 0x1 @@ -465,9 +470,10 @@ enum intr_type_t { struct unsol_rcv_ct_ctx { uint32_t ctxt_id; uint32_t SID; - uint32_t oxid; uint32_t flags; #define UNSOL_VALID 0x00000001 + uint16_t oxid; + uint16_t rxid; }; #define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/ @@ -674,6 +680,9 @@ struct lpfc_hba { uint32_t cfg_enable_rrq; uint32_t cfg_topology; uint32_t cfg_link_speed; +#define LPFC_FCF_FOV 1 /* Fast fcf failover */ +#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */ + uint32_t cfg_fcf_failover_policy; uint32_t cfg_cr_delay; uint32_t cfg_cr_count; uint32_t cfg_multi_ring_support; @@ -845,9 +854,13 @@ struct lpfc_hba { /* iDiag debugfs sub-directory */ struct dentry *idiag_root; struct dentry *idiag_pci_cfg; + struct dentry *idiag_bar_acc; struct dentry *idiag_que_info; struct dentry *idiag_que_acc; struct dentry *idiag_drb_acc; + struct dentry *idiag_ctl_acc; + struct dentry *idiag_mbx_acc; + struct dentry *idiag_ext_acc; #endif /* Used for deferred freeing of ELS data buffers */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 135a53baa73..2542f1f8bf8 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -755,6 +755,47 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr, } /** + * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness + * @phba: lpfc_hba pointer. + * + * Description: + * SLI4 interface type-2 device to wait on the sliport status register for + * the readyness after performing a firmware reset. + * + * Returns: + * zero for success + **/ +static int +lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) +{ + struct lpfc_register portstat_reg; + int i; + + + lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + &portstat_reg.word0); + + /* wait for the SLI port firmware ready after firmware reset */ + for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) { + msleep(10); + lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + &portstat_reg.word0); + if (!bf_get(lpfc_sliport_status_err, &portstat_reg)) + continue; + if (!bf_get(lpfc_sliport_status_rn, &portstat_reg)) + continue; + if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg)) + continue; + break; + } + + if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT) + return 0; + else + return -EIO; +} + +/** * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc * @phba: lpfc_hba pointer. * @@ -769,6 +810,7 @@ static ssize_t lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) { struct completion online_compl; + struct pci_dev *pdev = phba->pcidev; uint32_t reg_val; int status = 0; int rc; @@ -781,6 +823,14 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) LPFC_SLI_INTF_IF_TYPE_2)) return -EPERM; + if (!pdev->is_physfn) + return -EPERM; + + /* Disable SR-IOV virtual functions if enabled */ + if (phba->cfg_sriov_nr_virtfn) { + pci_disable_sriov(pdev); + phba->cfg_sriov_nr_virtfn = 0; + } status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); if (status != 0) @@ -805,7 +855,10 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); /* delay driver action following IF_TYPE_2 reset */ - msleep(100); + rc = lpfc_sli4_pdev_status_reg_wait(phba); + + if (rc) + return -EIO; init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, @@ -895,6 +948,10 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, if (!phba->cfg_enable_hba_reset) return -EACCES; + + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "3050 lpfc_board_mode set to %s\n", buf); + init_completion(&online_compl); if(strncmp(buf, "online", sizeof("online") - 1) == 0) { @@ -1290,6 +1347,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, if (phba->sli_rev == LPFC_SLI_REV4) val = 0; + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "3051 lpfc_poll changed from %d to %d\n", + phba->cfg_poll, val); + spin_lock_irq(&phba->hbalock); old_val = phba->cfg_poll; @@ -1414,80 +1475,10 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev, struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - struct pci_dev *pdev = phba->pcidev; - union lpfc_sli4_cfg_shdr *shdr; - uint32_t shdr_status, shdr_add_status; - LPFC_MBOXQ_t *mboxq; - struct lpfc_mbx_get_prof_cfg *get_prof_cfg; - struct lpfc_rsrc_desc_pcie *desc; - uint32_t max_nr_virtfn; - uint32_t desc_count; - int length, rc, i; - - if ((phba->sli_rev < LPFC_SLI_REV4) || - (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != - LPFC_SLI_INTF_IF_TYPE_2)) - return -EPERM; - - if (!pdev->is_physfn) - return snprintf(buf, PAGE_SIZE, "%d\n", 0); - - mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mboxq) - return -ENOMEM; - - /* get the maximum number of virtfn support by physfn */ - length = (sizeof(struct lpfc_mbx_get_prof_cfg) - - sizeof(struct lpfc_sli4_cfg_mhdr)); - lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, - LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG, - length, LPFC_SLI4_MBX_EMBED); - shdr = (union lpfc_sli4_cfg_shdr *) - &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; - bf_set(lpfc_mbox_hdr_pf_num, &shdr->request, - phba->sli4_hba.iov.pf_number + 1); - - get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg; - bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request, - LPFC_CFG_TYPE_CURRENT_ACTIVE); - - rc = lpfc_sli_issue_mbox_wait(phba, mboxq, - lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG)); - - if (rc != MBX_TIMEOUT) { - /* check return status */ - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, - &shdr->response); - if (shdr_status || shdr_add_status || rc) - goto error_out; - - } else - goto error_out; - - desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count; - - for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { - desc = (struct lpfc_rsrc_desc_pcie *) - &get_prof_cfg->u.response.prof_cfg.desc[i]; - if (LPFC_RSRC_DESC_TYPE_PCIE == - bf_get(lpfc_rsrc_desc_pcie_type, desc)) { - max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn, - desc); - break; - } - } - - if (i < LPFC_RSRC_DESC_MAX_NUM) { - if (rc != MBX_TIMEOUT) - mempool_free(mboxq, phba->mbox_mem_pool); - return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn); - } + uint16_t max_nr_virtfn; -error_out: - if (rc != MBX_TIMEOUT) - mempool_free(mboxq, phba->mbox_mem_pool); - return -EIO; + max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba); + return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn); } /** @@ -1605,6 +1596,9 @@ static int \ lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \ { \ if (val >= minval && val <= maxval) {\ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ + "3052 lpfc_" #attr " changed from %d to %d\n", \ + phba->cfg_##attr, val); \ phba->cfg_##attr = val;\ return 0;\ }\ @@ -1762,6 +1756,9 @@ static int \ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ { \ if (val >= minval && val <= maxval) {\ + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ + "3053 lpfc_" #attr " changed from %d to %d\n", \ + vport->cfg_##attr, val); \ vport->cfg_##attr = val;\ return 0;\ }\ @@ -2196,6 +2193,9 @@ lpfc_param_show(enable_npiv); lpfc_param_init(enable_npiv, 1, 0, 1); static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); +LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, + "FCF Fast failover=1 Priority failover=2"); + int lpfc_enable_rrq; module_param(lpfc_enable_rrq, int, S_IRUGO); MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); @@ -2678,6 +2678,9 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, if (nolip) return strlen(buf); + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "3054 lpfc_topology changed from %d to %d\n", + prev_val, val); err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); if (err) { phba->cfg_topology = prev_val; @@ -3101,6 +3104,10 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, if (sscanf(val_buf, "%i", &val) != 1) return -EINVAL; + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "3055 lpfc_link_speed changed from %d to %d %s\n", + phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)"); + if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || @@ -3678,7 +3685,9 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); # - Default will result in registering capabilities for all profiles. # */ -unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION; +unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION | + SHOST_DIX_TYPE0_PROTECTION | + SHOST_DIX_TYPE1_PROTECTION; module_param(lpfc_prot_mask, uint, S_IRUGO); MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); @@ -3769,6 +3778,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_fdmi_on, &dev_attr_lpfc_max_luns, &dev_attr_lpfc_enable_npiv, + &dev_attr_lpfc_fcf_failover_policy, &dev_attr_lpfc_enable_rrq, &dev_attr_nport_evt_cnt, &dev_attr_board_mode, @@ -4989,6 +4999,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_link_speed_init(phba, lpfc_link_speed); lpfc_poll_tmo_init(phba, lpfc_poll_tmo); lpfc_enable_npiv_init(phba, lpfc_enable_npiv); + lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); lpfc_enable_rrq_init(phba, lpfc_enable_rrq); lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 7fb0ba4cbfa..6760c69f525 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -42,6 +42,7 @@ #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" +#include "lpfc_debugfs.h" #include "lpfc_vport.h" #include "lpfc_version.h" @@ -960,8 +961,10 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, evt_dat->immed_dat].oxid, phba->ct_ctx[ evt_dat->immed_dat].SID); + phba->ct_ctx[evt_dat->immed_dat].rxid = + piocbq->iocb.ulpContext; phba->ct_ctx[evt_dat->immed_dat].oxid = - piocbq->iocb.ulpContext; + piocbq->iocb.unsli3.rcvsli3.ox_id; phba->ct_ctx[evt_dat->immed_dat].SID = piocbq->iocb.un.rcvels.remoteID; phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID; @@ -1312,7 +1315,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, rc = IOCB_ERROR; goto issue_ct_rsp_exit; } - icmd->ulpContext = phba->ct_ctx[tag].oxid; + icmd->ulpContext = phba->ct_ctx[tag].rxid; + icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid; ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); if (!ndlp) { lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, @@ -1337,9 +1341,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, goto issue_ct_rsp_exit; } - icmd->un.ulpWord[3] = ndlp->nlp_rpi; - if (phba->sli_rev == LPFC_SLI_REV4) - icmd->ulpContext = + icmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; /* The exchange is done, mark the entry as invalid */ @@ -1351,8 +1353,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, /* Xmit CT response on exchange <xid> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "2722 Xmit CT response on exchange x%x Data: x%x x%x\n", - icmd->ulpContext, icmd->ulpIoTag, phba->link_state); + "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n", + icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state); ctiocb->iocb_cmpl = NULL; ctiocb->iocb_flag |= LPFC_IO_LIBDFC; @@ -1471,13 +1473,12 @@ send_mgmt_rsp_exit: /** * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode * @phba: Pointer to HBA context object. - * @job: LPFC_BSG_VENDOR_DIAG_MODE * * This function is responsible for preparing driver for diag loopback * on device. */ static int -lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job) +lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) { struct lpfc_vport **vports; struct Scsi_Host *shost; @@ -1521,7 +1522,6 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job) /** * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode * @phba: Pointer to HBA context object. - * @job: LPFC_BSG_VENDOR_DIAG_MODE * * This function is responsible for driver exit processing of setting up * diag loopback mode on device. @@ -1567,7 +1567,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) uint32_t link_flags; uint32_t timeout; LPFC_MBOXQ_t *pmboxq; - int mbxstatus; + int mbxstatus = MBX_SUCCESS; int i = 0; int rc = 0; @@ -1586,7 +1586,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) goto job_error; } - rc = lpfc_bsg_diag_mode_enter(phba, job); + rc = lpfc_bsg_diag_mode_enter(phba); if (rc) goto job_error; @@ -1741,7 +1741,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) uint32_t link_flags, timeout, req_len, alloc_len; struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback; LPFC_MBOXQ_t *pmboxq = NULL; - int mbxstatus, i, rc = 0; + int mbxstatus = MBX_SUCCESS, i, rc = 0; /* no data to return just the return code */ job->reply->reply_payload_rcv_len = 0; @@ -1758,7 +1758,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) goto job_error; } - rc = lpfc_bsg_diag_mode_enter(phba, job); + rc = lpfc_bsg_diag_mode_enter(phba); if (rc) goto job_error; @@ -1982,7 +1982,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job) goto job_error; } - rc = lpfc_bsg_diag_mode_enter(phba, job); + rc = lpfc_bsg_diag_mode_enter(phba); if (rc) goto job_error; @@ -3178,6 +3178,11 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) "(x%x/x%x) complete bsg job done, bsize:%d\n", phba->mbox_ext_buf_ctx.nembType, phba->mbox_ext_buf_ctx.mboxType, size); + lpfc_idiag_mbxacc_dump_bsg_mbox(phba, + phba->mbox_ext_buf_ctx.nembType, + phba->mbox_ext_buf_ctx.mboxType, + dma_ebuf, sta_pos_addr, + phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); } else spin_unlock_irqrestore(&phba->ct_ev_lock, flags); @@ -3430,6 +3435,10 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, "ext_buf_cnt:%d\n", ext_buf_cnt); } + /* before dma descriptor setup */ + lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, + sta_pre_addr, dmabuf, ext_buf_cnt); + /* reject non-embedded mailbox command with none external buffer */ if (ext_buf_cnt == 0) { rc = -EPERM; @@ -3477,6 +3486,10 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, } } + /* after dma descriptor setup */ + lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, + sta_pos_addr, dmabuf, ext_buf_cnt); + /* construct base driver mbox command */ pmb = &pmboxq->u.mb; pmbx = (uint8_t *)dmabuf->virt; @@ -3511,7 +3524,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2947 Issued SLI_CONFIG ext-buffer " "maibox command, rc:x%x\n", rc); - return 1; + return SLI_CONFIG_HANDLED; } lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2948 Failed to issue SLI_CONFIG ext-buffer " @@ -3549,7 +3562,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, LPFC_MBOXQ_t *pmboxq = NULL; MAILBOX_t *pmb; uint8_t *mbx; - int rc = 0, i; + int rc = SLI_CONFIG_NOT_HANDLED, i; mbox_req = (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; @@ -3591,12 +3604,20 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, "ext_buf_cnt:%d\n", ext_buf_cnt); } + /* before dma buffer descriptor setup */ + lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, + sta_pre_addr, dmabuf, ext_buf_cnt); + if (ext_buf_cnt == 0) return -EPERM; /* for the first external buffer */ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); + /* after dma descriptor setup */ + lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, + sta_pos_addr, dmabuf, ext_buf_cnt); + /* log for looking forward */ for (i = 1; i < ext_buf_cnt; i++) { if (nemb_tp == nemb_mse) @@ -3660,7 +3681,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2955 Issued SLI_CONFIG ext-buffer " "maibox command, rc:x%x\n", rc); - return 1; + return SLI_CONFIG_HANDLED; } lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2956 Failed to issue SLI_CONFIG ext-buffer " @@ -3668,6 +3689,11 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, rc = -EPIPE; } + /* wait for additoinal external buffers */ + job->reply->result = 0; + job->job_done(job); + return SLI_CONFIG_HANDLED; + job_error: if (pmboxq) mempool_free(pmboxq, phba->mbox_mem_pool); @@ -3840,6 +3866,12 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job) dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list, struct lpfc_dmabuf, list); list_del_init(&dmabuf->list); + + /* after dma buffer descriptor setup */ + lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, + mbox_rd, dma_ebuf, sta_pos_addr, + dmabuf, index); + pbuf = (uint8_t *)dmabuf->virt; job->reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, @@ -3922,6 +3954,11 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job, dmabuf); list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); + /* after write dma buffer */ + lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, + mbox_wr, dma_ebuf, sta_pos_addr, + dmabuf, index); + if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2968 SLI_CONFIG ext-buffer wr all %d " @@ -3959,7 +3996,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job, lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2969 Issued SLI_CONFIG ext-buffer " "maibox command, rc:x%x\n", rc); - return 1; + return SLI_CONFIG_HANDLED; } lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2970 Failed to issue SLI_CONFIG ext-buffer " @@ -4039,14 +4076,14 @@ lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, struct lpfc_dmabuf *dmabuf) { struct dfc_mbox_req *mbox_req; - int rc; + int rc = SLI_CONFIG_NOT_HANDLED; mbox_req = (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; /* mbox command with/without single external buffer */ if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) - return SLI_CONFIG_NOT_HANDLED; + return rc; /* mbox command and first external buffer */ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) { @@ -4249,7 +4286,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, * mailbox extension size */ if ((transmit_length > receive_length) || - (transmit_length > MAILBOX_EXT_SIZE)) { + (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { rc = -ERANGE; goto job_done; } @@ -4272,7 +4309,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, /* receive length cannot be greater than mailbox * extension size */ - if (receive_length > MAILBOX_EXT_SIZE) { + if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { rc = -ERANGE; goto job_done; } @@ -4306,7 +4343,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; /* bde size cannot be greater than mailbox ext size */ - if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) { + if (bde->tus.f.bdeSize > + BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { rc = -ERANGE; goto job_done; } @@ -4332,7 +4370,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, * mailbox extension size */ if ((receive_length == 0) || - (receive_length > MAILBOX_EXT_SIZE)) { + (receive_length > + BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { rc = -ERANGE; goto job_done; } diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index fc20c247f36..a6db6aef133 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -235,9 +235,11 @@ int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *); void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *); uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); +void lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *, uint16_t); int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); +void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *); int lpfc_mem_alloc(struct lpfc_hba *, int align); void lpfc_mem_free(struct lpfc_hba *); @@ -371,6 +373,10 @@ extern struct lpfc_hbq_init *lpfc_hbq_defs[]; /* SLI4 if_type 2 externs. */ int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *); int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *); +int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t, + uint16_t *, uint16_t *); +int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t, + uint16_t *, uint16_t *); /* externs BlockGuard */ extern char *_dump_buf_data; @@ -432,10 +438,16 @@ void lpfc_handle_rrq_active(struct lpfc_hba *); int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *); int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, uint16_t, uint16_t, uint16_t); +uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t); void lpfc_cleanup_wt_rrqs(struct lpfc_hba *); void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *); struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t, uint32_t); +void lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *, enum nemb_type, + enum mbox_type, enum dma_type, enum sta_type, + struct lpfc_dmabuf *, uint32_t); +void lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *, MAILBOX_t *); int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *); /* functions to support SR-IOV */ int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int); +uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index ffe82d169b4..a0424dd90e4 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -48,6 +48,7 @@ #include "lpfc_version.h" #include "lpfc_compat.h" #include "lpfc_debugfs.h" +#include "lpfc_bsg.h" #ifdef CONFIG_SCSI_LPFC_DEBUG_FS /* @@ -135,7 +136,11 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) int i, index, len, enable; uint32_t ms; struct lpfc_debugfs_trc *dtp; - char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE]; + char *buffer; + + buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL); + if (!buffer) + return 0; enable = lpfc_debugfs_enable; lpfc_debugfs_enable = 0; @@ -167,6 +172,8 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) } lpfc_debugfs_enable = enable; + kfree(buffer); + return len; } @@ -195,8 +202,11 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) int i, index, len, enable; uint32_t ms; struct lpfc_debugfs_trc *dtp; - char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE]; + char *buffer; + buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL); + if (!buffer) + return 0; enable = lpfc_debugfs_enable; lpfc_debugfs_enable = 0; @@ -228,6 +238,8 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) } lpfc_debugfs_enable = enable; + kfree(buffer); + return len; } @@ -378,7 +390,11 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) int len = 0; int i, off; uint32_t *ptr; - char buffer[1024]; + char *buffer; + + buffer = kmalloc(1024, GFP_KERNEL); + if (!buffer) + return 0; off = 0; spin_lock_irq(&phba->hbalock); @@ -407,6 +423,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) } spin_unlock_irq(&phba->hbalock); + kfree(buffer); + return len; } @@ -1147,7 +1165,8 @@ static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes, { char mybuf[64]; char *pbuf, *step_str; - int bsize, i; + int i; + size_t bsize; /* Protect copy from user */ if (!access_ok(VERIFY_READ, buf, nbytes)) @@ -1326,8 +1345,8 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes, return 0; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { - where = idiag.cmd.data[0]; - count = idiag.cmd.data[1]; + where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX]; + count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX]; } else return 0; @@ -1372,6 +1391,11 @@ pcicfg_browse: len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%08x ", u32val); offset += sizeof(uint32_t); + if (offset >= LPFC_PCI_CFG_SIZE) { + len += snprintf(pbuffer+len, + LPFC_PCI_CFG_SIZE-len, "\n"); + break; + } index -= sizeof(uint32_t); if (!index) len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, @@ -1384,8 +1408,11 @@ pcicfg_browse: } /* Set up the offset for next portion of pci cfg read */ - idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE; - if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE) + if (index == 0) { + idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE; + if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE) + idiag.offset.last_rd = 0; + } else idiag.offset.last_rd = 0; return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); @@ -1438,8 +1465,8 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf, if (rc != LPFC_PCI_CFG_RD_CMD_ARG) goto error_out; /* Read command from PCI config space, set up command fields */ - where = idiag.cmd.data[0]; - count = idiag.cmd.data[1]; + where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX]; + count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX]; if (count == LPFC_PCI_CFG_BROWSE) { if (where % sizeof(uint32_t)) goto error_out; @@ -1474,9 +1501,9 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf, if (rc != LPFC_PCI_CFG_WR_CMD_ARG) goto error_out; /* Write command to PCI config space, read-modify-write */ - where = idiag.cmd.data[0]; - count = idiag.cmd.data[1]; - value = idiag.cmd.data[2]; + where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX]; + count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX]; + value = idiag.cmd.data[IDIAG_PCICFG_VALUE_INDX]; /* Sanity checks */ if ((count != sizeof(uint8_t)) && (count != sizeof(uint16_t)) && @@ -1569,6 +1596,292 @@ error_out: } /** + * lpfc_idiag_baracc_read - idiag debugfs pci bar access read + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba pci bar memory mapped space + * according to the idiag command, and copies to user @buf. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + int offset_label, offset, offset_run, len = 0, index; + int bar_num, acc_range, bar_size; + char *pbuffer; + void __iomem *mem_mapped_bar; + uint32_t if_type; + struct pci_dev *pdev; + uint32_t u32val; + + pdev = phba->pcidev; + if (!pdev) + return 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_PCI_BAR_RD_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) { + bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX]; + offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX]; + acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX]; + bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX]; + } else + return 0; + + if (acc_range == 0) + return 0; + + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if (bar_num == IDIAG_BARACC_BAR_0) + mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p; + else if (bar_num == IDIAG_BARACC_BAR_1) + mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p; + else if (bar_num == IDIAG_BARACC_BAR_2) + mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p; + else + return 0; + } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { + if (bar_num == IDIAG_BARACC_BAR_0) + mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p; + else + return 0; + } else + return 0; + + /* Read single PCI bar space register */ + if (acc_range == SINGLE_WORD) { + offset_run = offset; + u32val = readl(mem_mapped_bar + offset_run); + len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, + "%05x: %08x\n", offset_run, u32val); + } else + goto baracc_browse; + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); + +baracc_browse: + + /* Browse all PCI bar space registers */ + offset_label = idiag.offset.last_rd; + offset_run = offset_label; + + /* Read PCI bar memory mapped space */ + len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, + "%05x: ", offset_label); + index = LPFC_PCI_BAR_RD_SIZE; + while (index > 0) { + u32val = readl(mem_mapped_bar + offset_run); + len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, + "%08x ", u32val); + offset_run += sizeof(uint32_t); + if (acc_range == LPFC_PCI_BAR_BROWSE) { + if (offset_run >= bar_size) { + len += snprintf(pbuffer+len, + LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n"); + break; + } + } else { + if (offset_run >= offset + + (acc_range * sizeof(uint32_t))) { + len += snprintf(pbuffer+len, + LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n"); + break; + } + } + index -= sizeof(uint32_t); + if (!index) + len += snprintf(pbuffer+len, + LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n"); + else if (!(index % (8 * sizeof(uint32_t)))) { + offset_label += (8 * sizeof(uint32_t)); + len += snprintf(pbuffer+len, + LPFC_PCI_BAR_RD_BUF_SIZE-len, + "\n%05x: ", offset_label); + } + } + + /* Set up the offset for next portion of pci bar read */ + if (index == 0) { + idiag.offset.last_rd += LPFC_PCI_BAR_RD_SIZE; + if (acc_range == LPFC_PCI_BAR_BROWSE) { + if (idiag.offset.last_rd >= bar_size) + idiag.offset.last_rd = 0; + } else { + if (offset_run >= offset + + (acc_range * sizeof(uint32_t))) + idiag.offset.last_rd = offset; + } + } else { + if (acc_range == LPFC_PCI_BAR_BROWSE) + idiag.offset.last_rd = 0; + else + idiag.offset.last_rd = offset; + } + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +/** + * lpfc_idiag_baracc_write - Syntax check and set up idiag bar access commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and + * then perform the syntax check for PCI bar memory mapped space read or + * write command accordingly. In the case of PCI bar memory mapped space + * read command, it sets up the command in the idiag command struct for + * the debugfs read operation. In the case of PCI bar memorpy mapped space + * write operation, it executes the write operation into the PCI bar memory + * mapped space accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + */ +static ssize_t +lpfc_idiag_baracc_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t bar_num, bar_size, offset, value, acc_range; + struct pci_dev *pdev; + void __iomem *mem_mapped_bar; + uint32_t if_type; + uint32_t u32val; + int rc; + + pdev = phba->pcidev; + if (!pdev) + return -EFAULT; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc < 0) + return rc; + + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX]; + + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if ((bar_num != IDIAG_BARACC_BAR_0) && + (bar_num != IDIAG_BARACC_BAR_1) && + (bar_num != IDIAG_BARACC_BAR_2)) + goto error_out; + } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { + if (bar_num != IDIAG_BARACC_BAR_0) + goto error_out; + } else + goto error_out; + + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if (bar_num == IDIAG_BARACC_BAR_0) { + idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] = + LPFC_PCI_IF0_BAR0_SIZE; + mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p; + } else if (bar_num == IDIAG_BARACC_BAR_1) { + idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] = + LPFC_PCI_IF0_BAR1_SIZE; + mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p; + } else if (bar_num == IDIAG_BARACC_BAR_2) { + idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] = + LPFC_PCI_IF0_BAR2_SIZE; + mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p; + } else + goto error_out; + } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { + if (bar_num == IDIAG_BARACC_BAR_0) { + idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] = + LPFC_PCI_IF2_BAR0_SIZE; + mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p; + } else + goto error_out; + } else + goto error_out; + + offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX]; + if (offset % sizeof(uint32_t)) + goto error_out; + + bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX]; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) { + /* Sanity check on PCI config read command line arguments */ + if (rc != LPFC_PCI_BAR_RD_CMD_ARG) + goto error_out; + acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX]; + if (acc_range == LPFC_PCI_BAR_BROWSE) { + if (offset > bar_size - sizeof(uint32_t)) + goto error_out; + /* Starting offset to browse */ + idiag.offset.last_rd = offset; + } else if (acc_range > SINGLE_WORD) { + if (offset + acc_range * sizeof(uint32_t) > bar_size) + goto error_out; + /* Starting offset to browse */ + idiag.offset.last_rd = offset; + } else if (acc_range != SINGLE_WORD) + goto error_out; + } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) { + /* Sanity check on PCI bar write command line arguments */ + if (rc != LPFC_PCI_BAR_WR_CMD_ARG) + goto error_out; + /* Write command to PCI bar space, read-modify-write */ + acc_range = SINGLE_WORD; + value = idiag.cmd.data[IDIAG_BARACC_REG_VAL_INDX]; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR) { + writel(value, mem_mapped_bar + offset); + readl(mem_mapped_bar + offset); + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST) { + u32val = readl(mem_mapped_bar + offset); + u32val |= value; + writel(u32val, mem_mapped_bar + offset); + readl(mem_mapped_bar + offset); + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) { + u32val = readl(mem_mapped_bar + offset); + u32val &= ~value; + writel(u32val, mem_mapped_bar + offset); + readl(mem_mapped_bar + offset); + } + } else + /* All other opecodes are illegal for now */ + goto error_out; + + return nbytes; +error_out: + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +/** * lpfc_idiag_queinfo_read - idiag debugfs read queue information * @file: The file pointer to read from. * @buf: The buffer to copy the data to. @@ -1870,8 +2183,8 @@ lpfc_idiag_queacc_read(struct file *file, char __user *buf, size_t nbytes, return 0; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { - index = idiag.cmd.data[2]; - count = idiag.cmd.data[3]; + index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX]; + count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX]; pque = (struct lpfc_queue *)idiag.ptr_private; } else return 0; @@ -1943,12 +2256,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, return rc; /* Get and sanity check on command feilds */ - quetp = idiag.cmd.data[0]; - queid = idiag.cmd.data[1]; - index = idiag.cmd.data[2]; - count = idiag.cmd.data[3]; - offset = idiag.cmd.data[4]; - value = idiag.cmd.data[5]; + quetp = idiag.cmd.data[IDIAG_QUEACC_QUETP_INDX]; + queid = idiag.cmd.data[IDIAG_QUEACC_QUEID_INDX]; + index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX]; + count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX]; + offset = idiag.cmd.data[IDIAG_QUEACC_OFFST_INDX]; + value = idiag.cmd.data[IDIAG_QUEACC_VALUE_INDX]; /* Sanity check on command line arguments */ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR || @@ -2217,7 +2530,7 @@ lpfc_idiag_drbacc_read(struct file *file, char __user *buf, size_t nbytes, return 0; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) - drb_reg_id = idiag.cmd.data[0]; + drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX]; else return 0; @@ -2256,7 +2569,7 @@ lpfc_idiag_drbacc_write(struct file *file, const char __user *buf, { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; - uint32_t drb_reg_id, value, reg_val; + uint32_t drb_reg_id, value, reg_val = 0; void __iomem *drb_reg; int rc; @@ -2268,8 +2581,8 @@ lpfc_idiag_drbacc_write(struct file *file, const char __user *buf, return rc; /* Sanity check on command line arguments */ - drb_reg_id = idiag.cmd.data[0]; - value = idiag.cmd.data[1]; + drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX]; + value = idiag.cmd.data[IDIAG_DRBACC_VALUE_INDX]; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR || idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST || @@ -2329,6 +2642,679 @@ error_out: return -EINVAL; } +/** + * lpfc_idiag_ctlacc_read_reg - idiag debugfs read a control registers + * @phba: The pointer to hba structure. + * @pbuffer: The pointer to the buffer to copy the data to. + * @len: The lenght of bytes to copied. + * @drbregid: The id to doorbell registers. + * + * Description: + * This routine reads a control register and copies its content to the + * user buffer pointed to by @pbuffer. + * + * Returns: + * This function returns the amount of data that was copied into @pbuffer. + **/ +static int +lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer, + int len, uint32_t ctlregid) +{ + + if (!pbuffer) + return 0; + + switch (ctlregid) { + case LPFC_CTL_PORT_SEM: + len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + "Port SemReg: 0x%08x\n", + readl(phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_SEM_OFFSET)); + break; + case LPFC_CTL_PORT_STA: + len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + "Port StaReg: 0x%08x\n", + readl(phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_STA_OFFSET)); + break; + case LPFC_CTL_PORT_CTL: + len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + "Port CtlReg: 0x%08x\n", + readl(phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_CTL_OFFSET)); + break; + case LPFC_CTL_PORT_ER1: + len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + "Port Er1Reg: 0x%08x\n", + readl(phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_ER1_OFFSET)); + break; + case LPFC_CTL_PORT_ER2: + len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + "Port Er2Reg: 0x%08x\n", + readl(phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_ER2_OFFSET)); + break; + case LPFC_CTL_PDEV_CTL: + len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + "PDev CtlReg: 0x%08x\n", + readl(phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PDEV_CTL_OFFSET)); + break; + default: + break; + } + return len; +} + +/** + * lpfc_idiag_ctlacc_read - idiag debugfs read port and device control register + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba port and device registers according + * to the idiag command, and copies to user @buf. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_ctlacc_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t ctl_reg_id, i; + char *pbuffer; + int len = 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_CTL_ACC_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD) + ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX]; + else + return 0; + + if (ctl_reg_id == LPFC_CTL_ACC_ALL) + for (i = 1; i <= LPFC_CTL_MAX; i++) + len = lpfc_idiag_ctlacc_read_reg(phba, + pbuffer, len, i); + else + len = lpfc_idiag_ctlacc_read_reg(phba, + pbuffer, len, ctl_reg_id); + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +/** + * lpfc_idiag_ctlacc_write - Syntax check and set up idiag ctlacc commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and then + * perform the syntax check for port and device control register read (dump) + * or write (set) command accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + **/ +static ssize_t +lpfc_idiag_ctlacc_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t ctl_reg_id, value, reg_val = 0; + void __iomem *ctl_reg; + int rc; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc < 0) + return rc; + + /* Sanity check on command line arguments */ + ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX]; + value = idiag.cmd.data[IDIAG_CTLACC_VALUE_INDX]; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) { + if (rc != LPFC_CTL_ACC_WR_CMD_ARG) + goto error_out; + if (ctl_reg_id > LPFC_CTL_MAX) + goto error_out; + } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD) { + if (rc != LPFC_CTL_ACC_RD_CMD_ARG) + goto error_out; + if ((ctl_reg_id > LPFC_CTL_MAX) && + (ctl_reg_id != LPFC_CTL_ACC_ALL)) + goto error_out; + } else + goto error_out; + + /* Perform the write access operation */ + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) { + switch (ctl_reg_id) { + case LPFC_CTL_PORT_SEM: + ctl_reg = phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_SEM_OFFSET; + break; + case LPFC_CTL_PORT_STA: + ctl_reg = phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_STA_OFFSET; + break; + case LPFC_CTL_PORT_CTL: + ctl_reg = phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_CTL_OFFSET; + break; + case LPFC_CTL_PORT_ER1: + ctl_reg = phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_ER1_OFFSET; + break; + case LPFC_CTL_PORT_ER2: + ctl_reg = phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_ER2_OFFSET; + break; + case LPFC_CTL_PDEV_CTL: + ctl_reg = phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PDEV_CTL_OFFSET; + break; + default: + goto error_out; + } + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR) + reg_val = value; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST) { + reg_val = readl(ctl_reg); + reg_val |= value; + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) { + reg_val = readl(ctl_reg); + reg_val &= ~value; + } + writel(reg_val, ctl_reg); + readl(ctl_reg); /* flush */ + } + return nbytes; + +error_out: + /* Clean out command structure on command error out */ + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +/** + * lpfc_idiag_mbxacc_get_setup - idiag debugfs get mailbox access setup + * @phba: Pointer to HBA context object. + * @pbuffer: Pointer to data buffer. + * + * Description: + * This routine gets the driver mailbox access debugfs setup information. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static int +lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer) +{ + uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd; + int len = 0; + + mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX]; + mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX]; + mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX]; + mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX]; + + len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + "mbx_dump_map: 0x%08x\n", mbx_dump_map); + len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + "mbx_dump_cnt: %04d\n", mbx_dump_cnt); + len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + "mbx_word_cnt: %04d\n", mbx_word_cnt); + len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + "mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd); + + return len; +} + +/** + * lpfc_idiag_mbxacc_read - idiag debugfs read on mailbox access + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba driver mailbox access debugfs setup + * information. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_mbxacc_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + char *pbuffer; + int len = 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_MBX_ACC_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + if ((idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP) && + (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP)) + return 0; + + len = lpfc_idiag_mbxacc_get_setup(phba, pbuffer); + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +/** + * lpfc_idiag_mbxacc_write - Syntax check and set up idiag mbxacc commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and then + * perform the syntax check for driver mailbox command (dump) and sets up the + * necessary states in the idiag command struct accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + **/ +static ssize_t +lpfc_idiag_mbxacc_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd; + int rc; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc < 0) + return rc; + + /* Sanity check on command line arguments */ + mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX]; + mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX]; + mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX]; + mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX]; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_MBXACC_DP) { + if (!(mbx_dump_map & LPFC_MBX_DMP_MBX_ALL)) + goto error_out; + if ((mbx_dump_map & ~LPFC_MBX_DMP_MBX_ALL) && + (mbx_dump_map != LPFC_MBX_DMP_ALL)) + goto error_out; + if (mbx_word_cnt > sizeof(MAILBOX_t)) + goto error_out; + } else if (idiag.cmd.opcode == LPFC_IDIAG_BSG_MBXACC_DP) { + if (!(mbx_dump_map & LPFC_BSG_DMP_MBX_ALL)) + goto error_out; + if ((mbx_dump_map & ~LPFC_BSG_DMP_MBX_ALL) && + (mbx_dump_map != LPFC_MBX_DMP_ALL)) + goto error_out; + if (mbx_word_cnt > (BSG_MBOX_SIZE)/4) + goto error_out; + if (mbx_mbox_cmd != 0x9b) + goto error_out; + } else + goto error_out; + + if (mbx_word_cnt == 0) + goto error_out; + if (rc != LPFC_MBX_DMP_ARG) + goto error_out; + if (mbx_mbox_cmd & ~0xff) + goto error_out; + + /* condition for stop mailbox dump */ + if (mbx_dump_cnt == 0) + goto reset_out; + + return nbytes; + +reset_out: + /* Clean out command structure on command error out */ + memset(&idiag, 0, sizeof(idiag)); + return nbytes; + +error_out: + /* Clean out command structure on command error out */ + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +/** + * lpfc_idiag_extacc_avail_get - get the available extents information + * @phba: pointer to lpfc hba data structure. + * @pbuffer: pointer to internal buffer. + * @len: length into the internal buffer data has been copied. + * + * Description: + * This routine is to get the available extent information. + * + * Returns: + * overall lenth of the data read into the internal buffer. + **/ +static int +lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len) +{ + uint16_t ext_cnt, ext_size; + + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\nAvailable Extents Information:\n"); + + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tPort Available VPI extents: "); + lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI, + &ext_cnt, &ext_size); + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Count %3d, Size %3d\n", ext_cnt, ext_size); + + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tPort Available VFI extents: "); + lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI, + &ext_cnt, &ext_size); + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Count %3d, Size %3d\n", ext_cnt, ext_size); + + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tPort Available RPI extents: "); + lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI, + &ext_cnt, &ext_size); + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Count %3d, Size %3d\n", ext_cnt, ext_size); + + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tPort Available XRI extents: "); + lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI, + &ext_cnt, &ext_size); + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Count %3d, Size %3d\n", ext_cnt, ext_size); + + return len; +} + +/** + * lpfc_idiag_extacc_alloc_get - get the allocated extents information + * @phba: pointer to lpfc hba data structure. + * @pbuffer: pointer to internal buffer. + * @len: length into the internal buffer data has been copied. + * + * Description: + * This routine is to get the allocated extent information. + * + * Returns: + * overall lenth of the data read into the internal buffer. + **/ +static int +lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len) +{ + uint16_t ext_cnt, ext_size; + int rc; + + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\nAllocated Extents Information:\n"); + + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tHost Allocated VPI extents: "); + rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI, + &ext_cnt, &ext_size); + if (!rc) + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Port %d Extent %3d, Size %3d\n", + phba->brd_no, ext_cnt, ext_size); + else + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "N/A\n"); + + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tHost Allocated VFI extents: "); + rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI, + &ext_cnt, &ext_size); + if (!rc) + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Port %d Extent %3d, Size %3d\n", + phba->brd_no, ext_cnt, ext_size); + else + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "N/A\n"); + + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tHost Allocated RPI extents: "); + rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI, + &ext_cnt, &ext_size); + if (!rc) + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Port %d Extent %3d, Size %3d\n", + phba->brd_no, ext_cnt, ext_size); + else + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "N/A\n"); + + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tHost Allocated XRI extents: "); + rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI, + &ext_cnt, &ext_size); + if (!rc) + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "Port %d Extent %3d, Size %3d\n", + phba->brd_no, ext_cnt, ext_size); + else + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "N/A\n"); + + return len; +} + +/** + * lpfc_idiag_extacc_drivr_get - get driver extent information + * @phba: pointer to lpfc hba data structure. + * @pbuffer: pointer to internal buffer. + * @len: length into the internal buffer data has been copied. + * + * Description: + * This routine is to get the driver extent information. + * + * Returns: + * overall lenth of the data read into the internal buffer. + **/ +static int +lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len) +{ + struct lpfc_rsrc_blks *rsrc_blks; + int index; + + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\nDriver Extents Information:\n"); + + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tVPI extents:\n"); + index = 0; + list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) { + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\t\tBlock %3d: Start %4d, Count %4d\n", + index, rsrc_blks->rsrc_start, + rsrc_blks->rsrc_size); + index++; + } + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tVFI extents:\n"); + index = 0; + list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list, + list) { + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\t\tBlock %3d: Start %4d, Count %4d\n", + index, rsrc_blks->rsrc_start, + rsrc_blks->rsrc_size); + index++; + } + + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tRPI extents:\n"); + index = 0; + list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list, + list) { + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\t\tBlock %3d: Start %4d, Count %4d\n", + index, rsrc_blks->rsrc_start, + rsrc_blks->rsrc_size); + index++; + } + + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\tXRI extents:\n"); + index = 0; + list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list, + list) { + len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + "\t\tBlock %3d: Start %4d, Count %4d\n", + index, rsrc_blks->rsrc_start, + rsrc_blks->rsrc_size); + index++; + } + + return len; +} + +/** + * lpfc_idiag_extacc_write - Syntax check and set up idiag extacc commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and then + * perform the syntax check for extent information access commands and sets + * up the necessary states in the idiag command struct accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + **/ +static ssize_t +lpfc_idiag_extacc_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + uint32_t ext_map; + int rc; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc < 0) + return rc; + + ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX]; + + if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD) + goto error_out; + if (rc != LPFC_EXT_ACC_CMD_ARG) + goto error_out; + if (!(ext_map & LPFC_EXT_ACC_ALL)) + goto error_out; + + return nbytes; +error_out: + /* Clean out command structure on command error out */ + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +/** + * lpfc_idiag_extacc_read - idiag debugfs read access to extent information + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the proper extent information according to + * the idiag command, and copies to user @buf. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + char *pbuffer; + uint32_t ext_map; + int len = 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_EXT_ACC_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + if (*ppos) + return 0; + if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD) + return 0; + + ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX]; + if (ext_map & LPFC_EXT_ACC_AVAIL) + len = lpfc_idiag_extacc_avail_get(phba, pbuffer, len); + if (ext_map & LPFC_EXT_ACC_ALLOC) + len = lpfc_idiag_extacc_alloc_get(phba, pbuffer, len); + if (ext_map & LPFC_EXT_ACC_DRIVR) + len = lpfc_idiag_extacc_drivr_get(phba, pbuffer, len); + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + #undef lpfc_debugfs_op_disc_trc static const struct file_operations lpfc_debugfs_op_disc_trc = { .owner = THIS_MODULE, @@ -2419,6 +3405,16 @@ static const struct file_operations lpfc_idiag_op_pciCfg = { .release = lpfc_idiag_cmd_release, }; +#undef lpfc_idiag_op_barAcc +static const struct file_operations lpfc_idiag_op_barAcc = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_baracc_read, + .write = lpfc_idiag_baracc_write, + .release = lpfc_idiag_cmd_release, +}; + #undef lpfc_idiag_op_queInfo static const struct file_operations lpfc_idiag_op_queInfo = { .owner = THIS_MODULE, @@ -2427,7 +3423,7 @@ static const struct file_operations lpfc_idiag_op_queInfo = { .release = lpfc_idiag_release, }; -#undef lpfc_idiag_op_queacc +#undef lpfc_idiag_op_queAcc static const struct file_operations lpfc_idiag_op_queAcc = { .owner = THIS_MODULE, .open = lpfc_idiag_open, @@ -2437,7 +3433,7 @@ static const struct file_operations lpfc_idiag_op_queAcc = { .release = lpfc_idiag_cmd_release, }; -#undef lpfc_idiag_op_drbacc +#undef lpfc_idiag_op_drbAcc static const struct file_operations lpfc_idiag_op_drbAcc = { .owner = THIS_MODULE, .open = lpfc_idiag_open, @@ -2447,8 +3443,234 @@ static const struct file_operations lpfc_idiag_op_drbAcc = { .release = lpfc_idiag_cmd_release, }; +#undef lpfc_idiag_op_ctlAcc +static const struct file_operations lpfc_idiag_op_ctlAcc = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_ctlacc_read, + .write = lpfc_idiag_ctlacc_write, + .release = lpfc_idiag_cmd_release, +}; + +#undef lpfc_idiag_op_mbxAcc +static const struct file_operations lpfc_idiag_op_mbxAcc = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_mbxacc_read, + .write = lpfc_idiag_mbxacc_write, + .release = lpfc_idiag_cmd_release, +}; + +#undef lpfc_idiag_op_extAcc +static const struct file_operations lpfc_idiag_op_extAcc = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_extacc_read, + .write = lpfc_idiag_extacc_write, + .release = lpfc_idiag_cmd_release, +}; + #endif +/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command + * @phba: Pointer to HBA context object. + * @dmabuf: Pointer to a DMA buffer descriptor. + * + * Description: + * This routine dump a bsg pass-through non-embedded mailbox command with + * external buffer. + **/ +void +lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, + enum mbox_type mbox_tp, enum dma_type dma_tp, + enum sta_type sta_tp, + struct lpfc_dmabuf *dmabuf, uint32_t ext_buf) +{ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t *mbx_mbox_cmd, *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt; + char line_buf[LPFC_MBX_ACC_LBUF_SZ]; + int len = 0; + uint32_t do_dump = 0; + uint32_t *pword; + uint32_t i; + + if (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP) + return; + + mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX]; + mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX]; + mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX]; + mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX]; + + if (!(*mbx_dump_map & LPFC_MBX_DMP_ALL) || + (*mbx_dump_cnt == 0) || + (*mbx_word_cnt == 0)) + return; + + if (*mbx_mbox_cmd != 0x9B) + return; + + if ((mbox_tp == mbox_rd) && (dma_tp == dma_mbox)) { + if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_MBX) { + do_dump |= LPFC_BSG_DMP_MBX_RD_MBX; + printk(KERN_ERR "\nRead mbox command (x%x), " + "nemb:0x%x, extbuf_cnt:%d:\n", + sta_tp, nemb_tp, ext_buf); + } + } + if ((mbox_tp == mbox_rd) && (dma_tp == dma_ebuf)) { + if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_BUF) { + do_dump |= LPFC_BSG_DMP_MBX_RD_BUF; + printk(KERN_ERR "\nRead mbox buffer (x%x), " + "nemb:0x%x, extbuf_seq:%d:\n", + sta_tp, nemb_tp, ext_buf); + } + } + if ((mbox_tp == mbox_wr) && (dma_tp == dma_mbox)) { + if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_MBX) { + do_dump |= LPFC_BSG_DMP_MBX_WR_MBX; + printk(KERN_ERR "\nWrite mbox command (x%x), " + "nemb:0x%x, extbuf_cnt:%d:\n", + sta_tp, nemb_tp, ext_buf); + } + } + if ((mbox_tp == mbox_wr) && (dma_tp == dma_ebuf)) { + if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_BUF) { + do_dump |= LPFC_BSG_DMP_MBX_WR_BUF; + printk(KERN_ERR "\nWrite mbox buffer (x%x), " + "nemb:0x%x, extbuf_seq:%d:\n", + sta_tp, nemb_tp, ext_buf); + } + } + + /* dump buffer content */ + if (do_dump) { + pword = (uint32_t *)dmabuf->virt; + for (i = 0; i < *mbx_word_cnt; i++) { + if (!(i % 8)) { + if (i != 0) + printk(KERN_ERR "%s\n", line_buf); + len = 0; + len += snprintf(line_buf+len, + LPFC_MBX_ACC_LBUF_SZ-len, + "%03d: ", i); + } + len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, + "%08x ", (uint32_t)*pword); + pword++; + } + if ((i - 1) % 8) + printk(KERN_ERR "%s\n", line_buf); + (*mbx_dump_cnt)--; + } + + /* Clean out command structure on reaching dump count */ + if (*mbx_dump_cnt == 0) + memset(&idiag, 0, sizeof(idiag)); + return; +#endif +} + +/* lpfc_idiag_mbxacc_dump_issue_mbox - idiag debugfs dump issue mailbox command + * @phba: Pointer to HBA context object. + * @dmabuf: Pointer to a DMA buffer descriptor. + * + * Description: + * This routine dump a pass-through non-embedded mailbox command from issue + * mailbox command. + **/ +void +lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox) +{ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint32_t *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt, *mbx_mbox_cmd; + char line_buf[LPFC_MBX_ACC_LBUF_SZ]; + int len = 0; + uint32_t *pword; + uint8_t *pbyte; + uint32_t i, j; + + if (idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP) + return; + + mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX]; + mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX]; + mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX]; + mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX]; + + if (!(*mbx_dump_map & LPFC_MBX_DMP_MBX_ALL) || + (*mbx_dump_cnt == 0) || + (*mbx_word_cnt == 0)) + return; + + if ((*mbx_mbox_cmd != LPFC_MBX_ALL_CMD) && + (*mbx_mbox_cmd != pmbox->mbxCommand)) + return; + + /* dump buffer content */ + if (*mbx_dump_map & LPFC_MBX_DMP_MBX_WORD) { + printk(KERN_ERR "Mailbox command:0x%x dump by word:\n", + pmbox->mbxCommand); + pword = (uint32_t *)pmbox; + for (i = 0; i < *mbx_word_cnt; i++) { + if (!(i % 8)) { + if (i != 0) + printk(KERN_ERR "%s\n", line_buf); + len = 0; + memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ); + len += snprintf(line_buf+len, + LPFC_MBX_ACC_LBUF_SZ-len, + "%03d: ", i); + } + len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, + "%08x ", + ((uint32_t)*pword) & 0xffffffff); + pword++; + } + if ((i - 1) % 8) + printk(KERN_ERR "%s\n", line_buf); + printk(KERN_ERR "\n"); + } + if (*mbx_dump_map & LPFC_MBX_DMP_MBX_BYTE) { + printk(KERN_ERR "Mailbox command:0x%x dump by byte:\n", + pmbox->mbxCommand); + pbyte = (uint8_t *)pmbox; + for (i = 0; i < *mbx_word_cnt; i++) { + if (!(i % 8)) { + if (i != 0) + printk(KERN_ERR "%s\n", line_buf); + len = 0; + memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ); + len += snprintf(line_buf+len, + LPFC_MBX_ACC_LBUF_SZ-len, + "%03d: ", i); + } + for (j = 0; j < 4; j++) { + len += snprintf(line_buf+len, + LPFC_MBX_ACC_LBUF_SZ-len, + "%02x", + ((uint8_t)*pbyte) & 0xff); + pbyte++; + } + len += snprintf(line_buf+len, + LPFC_MBX_ACC_LBUF_SZ-len, " "); + } + if ((i - 1) % 8) + printk(KERN_ERR "%s\n", line_buf); + printk(KERN_ERR "\n"); + } + (*mbx_dump_cnt)--; + + /* Clean out command structure on reaching dump count */ + if (*mbx_dump_cnt == 0) + memset(&idiag, 0, sizeof(idiag)); + return; +#endif +} + /** * lpfc_debugfs_initialize - Initialize debugfs for a vport * @vport: The vport pointer to initialize. @@ -2672,7 +3894,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) vport, &lpfc_debugfs_op_nodelist); if (!vport->debug_nodelist) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0409 Can't create debugfs nodelist\n"); + "2985 Can't create debugfs nodelist\n"); goto debug_failed; } @@ -2709,6 +3931,20 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) idiag.offset.last_rd = 0; } + /* iDiag PCI BAR access */ + snprintf(name, sizeof(name), "barAcc"); + if (!phba->idiag_bar_acc) { + phba->idiag_bar_acc = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, &lpfc_idiag_op_barAcc); + if (!phba->idiag_bar_acc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "3056 Can't create idiag debugfs\n"); + goto debug_failed; + } + idiag.offset.last_rd = 0; + } + /* iDiag get PCI function queue information */ snprintf(name, sizeof(name), "queInfo"); if (!phba->idiag_que_info) { @@ -2748,6 +3984,50 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } } + /* iDiag access PCI function control registers */ + snprintf(name, sizeof(name), "ctlAcc"); + if (!phba->idiag_ctl_acc) { + phba->idiag_ctl_acc = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc); + if (!phba->idiag_ctl_acc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2981 Can't create idiag debugfs\n"); + goto debug_failed; + } + } + + /* iDiag access mbox commands */ + snprintf(name, sizeof(name), "mbxAcc"); + if (!phba->idiag_mbx_acc) { + phba->idiag_mbx_acc = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc); + if (!phba->idiag_mbx_acc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2980 Can't create idiag debugfs\n"); + goto debug_failed; + } + } + + /* iDiag extents access commands */ + if (phba->sli4_hba.extents_in_use) { + snprintf(name, sizeof(name), "extAcc"); + if (!phba->idiag_ext_acc) { + phba->idiag_ext_acc = + debugfs_create_file(name, + S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, + &lpfc_idiag_op_extAcc); + if (!phba->idiag_ext_acc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2986 Cant create " + "idiag debugfs\n"); + goto debug_failed; + } + } + } + debug_failed: return; #endif @@ -2782,7 +4062,6 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) debugfs_remove(vport->debug_nodelist); /* nodelist */ vport->debug_nodelist = NULL; } - if (vport->vport_debugfs_root) { debugfs_remove(vport->vport_debugfs_root); /* vportX */ vport->vport_debugfs_root = NULL; @@ -2826,6 +4105,21 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) * iDiag release */ if (phba->sli_rev == LPFC_SLI_REV4) { + if (phba->idiag_ext_acc) { + /* iDiag extAcc */ + debugfs_remove(phba->idiag_ext_acc); + phba->idiag_ext_acc = NULL; + } + if (phba->idiag_mbx_acc) { + /* iDiag mbxAcc */ + debugfs_remove(phba->idiag_mbx_acc); + phba->idiag_mbx_acc = NULL; + } + if (phba->idiag_ctl_acc) { + /* iDiag ctlAcc */ + debugfs_remove(phba->idiag_ctl_acc); + phba->idiag_ctl_acc = NULL; + } if (phba->idiag_drb_acc) { /* iDiag drbAcc */ debugfs_remove(phba->idiag_drb_acc); @@ -2841,6 +4135,11 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) debugfs_remove(phba->idiag_que_info); phba->idiag_que_info = NULL; } + if (phba->idiag_bar_acc) { + /* iDiag barAcc */ + debugfs_remove(phba->idiag_bar_acc); + phba->idiag_bar_acc = NULL; + } if (phba->idiag_pci_cfg) { /* iDiag pciCfg */ debugfs_remove(phba->idiag_pci_cfg); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 6525a5e62d2..f83bd944edd 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -39,14 +39,51 @@ /* hbqinfo output buffer size */ #define LPFC_HBQINFO_SIZE 8192 +/* + * For SLI4 iDiag debugfs diagnostics tool + */ + /* pciConf */ #define LPFC_PCI_CFG_BROWSE 0xffff #define LPFC_PCI_CFG_RD_CMD_ARG 2 #define LPFC_PCI_CFG_WR_CMD_ARG 3 #define LPFC_PCI_CFG_SIZE 4096 -#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2) #define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4) +#define IDIAG_PCICFG_WHERE_INDX 0 +#define IDIAG_PCICFG_COUNT_INDX 1 +#define IDIAG_PCICFG_VALUE_INDX 2 + +/* barAcc */ +#define LPFC_PCI_BAR_BROWSE 0xffff +#define LPFC_PCI_BAR_RD_CMD_ARG 3 +#define LPFC_PCI_BAR_WR_CMD_ARG 3 + +#define LPFC_PCI_IF0_BAR0_SIZE (1024 * 16) +#define LPFC_PCI_IF0_BAR1_SIZE (1024 * 128) +#define LPFC_PCI_IF0_BAR2_SIZE (1024 * 128) +#define LPFC_PCI_IF2_BAR0_SIZE (1024 * 32) + +#define LPFC_PCI_BAR_RD_BUF_SIZE 4096 +#define LPFC_PCI_BAR_RD_SIZE (LPFC_PCI_BAR_RD_BUF_SIZE/4) + +#define LPFC_PCI_IF0_BAR0_RD_SIZE (LPFC_PCI_IF0_BAR0_SIZE/4) +#define LPFC_PCI_IF0_BAR1_RD_SIZE (LPFC_PCI_IF0_BAR1_SIZE/4) +#define LPFC_PCI_IF0_BAR2_RD_SIZE (LPFC_PCI_IF0_BAR2_SIZE/4) +#define LPFC_PCI_IF2_BAR0_RD_SIZE (LPFC_PCI_IF2_BAR0_SIZE/4) + +#define IDIAG_BARACC_BAR_NUM_INDX 0 +#define IDIAG_BARACC_OFF_SET_INDX 1 +#define IDIAG_BARACC_ACC_MOD_INDX 2 +#define IDIAG_BARACC_REG_VAL_INDX 2 +#define IDIAG_BARACC_BAR_SZE_INDX 3 + +#define IDIAG_BARACC_BAR_0 0 +#define IDIAG_BARACC_BAR_1 1 +#define IDIAG_BARACC_BAR_2 2 + +#define SINGLE_WORD 1 + /* queue info */ #define LPFC_QUE_INFO_GET_BUF_SIZE 4096 @@ -63,7 +100,14 @@ #define LPFC_IDIAG_WQ 4 #define LPFC_IDIAG_RQ 5 -/* doorbell acc */ +#define IDIAG_QUEACC_QUETP_INDX 0 +#define IDIAG_QUEACC_QUEID_INDX 1 +#define IDIAG_QUEACC_INDEX_INDX 2 +#define IDIAG_QUEACC_COUNT_INDX 3 +#define IDIAG_QUEACC_OFFST_INDX 4 +#define IDIAG_QUEACC_VALUE_INDX 5 + +/* doorbell register acc */ #define LPFC_DRB_ACC_ALL 0xffff #define LPFC_DRB_ACC_RD_CMD_ARG 1 #define LPFC_DRB_ACC_WR_CMD_ARG 2 @@ -76,6 +120,67 @@ #define LPFC_DRB_MAX 4 +#define IDIAG_DRBACC_REGID_INDX 0 +#define IDIAG_DRBACC_VALUE_INDX 1 + +/* control register acc */ +#define LPFC_CTL_ACC_ALL 0xffff +#define LPFC_CTL_ACC_RD_CMD_ARG 1 +#define LPFC_CTL_ACC_WR_CMD_ARG 2 +#define LPFC_CTL_ACC_BUF_SIZE 256 + +#define LPFC_CTL_PORT_SEM 1 +#define LPFC_CTL_PORT_STA 2 +#define LPFC_CTL_PORT_CTL 3 +#define LPFC_CTL_PORT_ER1 4 +#define LPFC_CTL_PORT_ER2 5 +#define LPFC_CTL_PDEV_CTL 6 + +#define LPFC_CTL_MAX 6 + +#define IDIAG_CTLACC_REGID_INDX 0 +#define IDIAG_CTLACC_VALUE_INDX 1 + +/* mailbox access */ +#define LPFC_MBX_DMP_ARG 4 + +#define LPFC_MBX_ACC_BUF_SIZE 512 +#define LPFC_MBX_ACC_LBUF_SZ 128 + +#define LPFC_MBX_DMP_MBX_WORD 0x00000001 +#define LPFC_MBX_DMP_MBX_BYTE 0x00000002 +#define LPFC_MBX_DMP_MBX_ALL (LPFC_MBX_DMP_MBX_WORD | LPFC_MBX_DMP_MBX_BYTE) + +#define LPFC_BSG_DMP_MBX_RD_MBX 0x00000001 +#define LPFC_BSG_DMP_MBX_RD_BUF 0x00000002 +#define LPFC_BSG_DMP_MBX_WR_MBX 0x00000004 +#define LPFC_BSG_DMP_MBX_WR_BUF 0x00000008 +#define LPFC_BSG_DMP_MBX_ALL (LPFC_BSG_DMP_MBX_RD_MBX | \ + LPFC_BSG_DMP_MBX_RD_BUF | \ + LPFC_BSG_DMP_MBX_WR_MBX | \ + LPFC_BSG_DMP_MBX_WR_BUF) + +#define LPFC_MBX_DMP_ALL 0xffff +#define LPFC_MBX_ALL_CMD 0xff + +#define IDIAG_MBXACC_MBCMD_INDX 0 +#define IDIAG_MBXACC_DPMAP_INDX 1 +#define IDIAG_MBXACC_DPCNT_INDX 2 +#define IDIAG_MBXACC_WDCNT_INDX 3 + +/* extents access */ +#define LPFC_EXT_ACC_CMD_ARG 1 +#define LPFC_EXT_ACC_BUF_SIZE 4096 + +#define LPFC_EXT_ACC_AVAIL 0x1 +#define LPFC_EXT_ACC_ALLOC 0x2 +#define LPFC_EXT_ACC_DRIVR 0x4 +#define LPFC_EXT_ACC_ALL (LPFC_EXT_ACC_DRIVR | \ + LPFC_EXT_ACC_AVAIL | \ + LPFC_EXT_ACC_ALLOC) + +#define IDIAG_EXTACC_EXMAP_INDX 0 + #define SIZE_U8 sizeof(uint8_t) #define SIZE_U16 sizeof(uint16_t) #define SIZE_U32 sizeof(uint32_t) @@ -110,6 +215,11 @@ struct lpfc_idiag_cmd { #define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003 #define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004 +#define LPFC_IDIAG_CMD_BARACC_RD 0x00000008 +#define LPFC_IDIAG_CMD_BARACC_WR 0x00000009 +#define LPFC_IDIAG_CMD_BARACC_ST 0x0000000a +#define LPFC_IDIAG_CMD_BARACC_CL 0x0000000b + #define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011 #define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012 #define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013 @@ -119,6 +229,17 @@ struct lpfc_idiag_cmd { #define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022 #define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023 #define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024 + +#define LPFC_IDIAG_CMD_CTLACC_RD 0x00000031 +#define LPFC_IDIAG_CMD_CTLACC_WR 0x00000032 +#define LPFC_IDIAG_CMD_CTLACC_ST 0x00000033 +#define LPFC_IDIAG_CMD_CTLACC_CL 0x00000034 + +#define LPFC_IDIAG_CMD_MBXACC_DP 0x00000041 +#define LPFC_IDIAG_BSG_MBXACC_DP 0x00000042 + +#define LPFC_IDIAG_CMD_EXTACC_RD 0x00000051 + uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE]; }; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 32a084534f3..023da0e00d3 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -647,21 +647,15 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } lpfc_cleanup_pending_mbox(vport); - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { lpfc_sli4_unreg_all_rpis(vport); - - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; - spin_unlock_irq(shost->host_lock); - } - /* - * If VPI is unreged, driver need to do INIT_VPI - * before re-registering - */ - if (phba->sli_rev == LPFC_SLI_REV4) { - spin_lock_irq(shost->host_lock); + /* + * If VPI is unreged, driver need to do INIT_VPI + * before re-registering + */ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); } @@ -880,6 +874,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, phba->fcf.current_rec.fcf_indx, irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout); + lpfc_sli4_set_fcf_flogi_fail(phba, + phba->fcf.current_rec.fcf_indx); fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); if (rc) @@ -1096,11 +1092,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Set the fcfi to the fcfi we registered with */ elsiocb->iocb.ulpContext = phba->fcf.fcfi; } - } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { - sp->cmn.request_multiple_Nport = 1; - /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ - icmd->ulpCt_h = 1; - icmd->ulpCt_l = 0; + } else { + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { + sp->cmn.request_multiple_Nport = 1; + /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ + icmd->ulpCt_h = 1; + icmd->ulpCt_l = 0; + } else + sp->cmn.request_multiple_Nport = 0; } if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { @@ -3656,7 +3655,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, } icmd = &elsiocb->iocb; - icmd->ulpContext = oldcmd->ulpContext; /* Xri */ + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_ACC; pcmd += sizeof(uint32_t); @@ -3673,7 +3673,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, return 1; icmd = &elsiocb->iocb; - icmd->ulpContext = oldcmd->ulpContext; /* Xri */ + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); if (mbox) @@ -3695,7 +3696,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, return 1; icmd = &elsiocb->iocb; - icmd->ulpContext = oldcmd->ulpContext; /* Xri */ + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, @@ -3781,7 +3783,8 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, icmd = &elsiocb->iocb; oldcmd = &oldiocb->iocb; - icmd->ulpContext = oldcmd->ulpContext; /* Xri */ + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; @@ -3853,7 +3856,8 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, icmd = &elsiocb->iocb; oldcmd = &oldiocb->iocb; - icmd->ulpContext = oldcmd->ulpContext; /* Xri */ + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; /* Xmit ADISC ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, @@ -3931,7 +3935,9 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, icmd = &elsiocb->iocb; oldcmd = &oldiocb->iocb; - icmd->ulpContext = oldcmd->ulpContext; /* Xri */ + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; + /* Xmit PRLI ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0131 Xmit PRLI ACC response tag x%x xri x%x, " @@ -4035,7 +4041,9 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, icmd = &elsiocb->iocb; oldcmd = &oldiocb->iocb; - icmd->ulpContext = oldcmd->ulpContext; /* Xri */ + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; + /* Xmit RNID ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0132 Xmit RNID ACC response tag x%x xri x%x\n", @@ -4163,7 +4171,9 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, if (!elsiocb) return 1; - elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */ + elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */ + elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id; + /* Xmit ECHO ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2876 Xmit ECHO ACC response tag x%x xri x%x\n", @@ -5054,13 +5064,15 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) uint8_t *pcmd; struct lpfc_iocbq *elsiocb; struct lpfc_nodelist *ndlp; - uint16_t xri; + uint16_t oxid; + uint16_t rxid; uint32_t cmdsize; mb = &pmb->u.mb; ndlp = (struct lpfc_nodelist *) pmb->context2; - xri = (uint16_t) ((unsigned long)(pmb->context1)); + rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff); + oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff); pmb->context1 = NULL; pmb->context2 = NULL; @@ -5082,7 +5094,8 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) return; icmd = &elsiocb->iocb; - icmd->ulpContext = xri; + icmd->ulpContext = rxid; + icmd->unsli3.rcvsli3.ox_id = oxid; pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_ACC; @@ -5137,13 +5150,16 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) uint8_t *pcmd; struct lpfc_iocbq *elsiocb; struct lpfc_nodelist *ndlp; - uint16_t xri, status; + uint16_t status; + uint16_t oxid; + uint16_t rxid; uint32_t cmdsize; mb = &pmb->u.mb; ndlp = (struct lpfc_nodelist *) pmb->context2; - xri = (uint16_t) ((unsigned long)(pmb->context1)); + rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff); + oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff); pmb->context1 = NULL; pmb->context2 = NULL; @@ -5165,7 +5181,8 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) return; icmd = &elsiocb->iocb; - icmd->ulpContext = xri; + icmd->ulpContext = rxid; + icmd->unsli3.rcvsli3.ox_id = oxid; pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_ACC; @@ -5238,8 +5255,9 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); if (mbox) { lpfc_read_lnk_stat(phba, mbox); - mbox->context1 = - (void *)((unsigned long) cmdiocb->iocb.ulpContext); + mbox->context1 = (void *)((unsigned long) + ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | + cmdiocb->iocb.ulpContext)); /* rx_id */ mbox->context2 = lpfc_nlp_get(ndlp); mbox->vport = vport; mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; @@ -5314,7 +5332,8 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, pcmd += sizeof(uint32_t); /* Skip past command */ /* use the command's xri in the response */ - elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; + elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */ + elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; rtv_rsp = (struct RTV_RSP *)pcmd; @@ -5399,8 +5418,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); if (mbox) { lpfc_read_lnk_stat(phba, mbox); - mbox->context1 = - (void *)((unsigned long) cmdiocb->iocb.ulpContext); + mbox->context1 = (void *)((unsigned long) + ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | + cmdiocb->iocb.ulpContext)); /* rx_id */ mbox->context2 = lpfc_nlp_get(ndlp); mbox->vport = vport; mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; @@ -5554,7 +5574,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, icmd = &elsiocb->iocb; oldcmd = &oldiocb->iocb; - icmd->ulpContext = oldcmd->ulpContext; /* Xri */ + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_ACC; @@ -6586,7 +6607,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) { struct lpfc_vport *vport; unsigned long flags; - int i; + int i = 0; /* The physical ports are always vpi 0 - translate is unnecessary. */ if (vpi > 0) { @@ -6609,7 +6630,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) spin_lock_irqsave(&phba->hbalock, flags); list_for_each_entry(vport, &phba->port_list, listentry) { - if (vport->vpi == vpi) { + if (vport->vpi == i) { spin_unlock_irqrestore(&phba->hbalock, flags); return vport; } @@ -7787,6 +7808,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, { uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); + uint16_t lxri = 0; struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; unsigned long iflag = 0; @@ -7815,7 +7837,12 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, } } spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); - sglq_entry = __lpfc_get_active_sglq(phba, xri); + lxri = lpfc_sli4_xri_inrange(phba, xri); + if (lxri == NO_XRI) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; + } + sglq_entry = __lpfc_get_active_sglq(phba, lxri); if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { spin_unlock_irqrestore(&phba->hbalock, iflag); return; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 18d0dbfda2b..0b47adf9fee 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1109,6 +1109,28 @@ out: return; } +/** + * lpfc_sli4_clear_fcf_rr_bmask + * @phba pointer to the struct lpfc_hba for this port. + * This fucnction resets the round robin bit mask and clears the + * fcf priority list. The list deletions are done while holding the + * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared + * from the lpfc_fcf_pri record. + **/ +void +lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba) +{ + struct lpfc_fcf_pri *fcf_pri; + struct lpfc_fcf_pri *next_fcf_pri; + memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(fcf_pri, next_fcf_pri, + &phba->fcf.fcf_pri_list, list) { + list_del_init(&fcf_pri->list); + fcf_pri->fcf_rec.flag = 0; + } + spin_unlock_irq(&phba->hbalock); +} static void lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { @@ -1130,7 +1152,8 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) spin_unlock_irq(&phba->hbalock); /* If there is a pending FCoE event, restart FCF table scan. */ - if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) + if ((!(phba->hba_flag & FCF_RR_INPROG)) && + lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) goto fail_out; /* Mark successful completion of FCF table scan */ @@ -1250,6 +1273,30 @@ lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id) } /** + * lpfc_update_fcf_record - Update driver fcf record + * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record. + * @phba: pointer to lpfc hba data structure. + * @fcf_index: Index for the lpfc_fcf_record. + * @new_fcf_record: pointer to hba fcf record. + * + * This routine updates the driver FCF priority record from the new HBA FCF + * record. This routine is called with the host lock held. + **/ +static void +__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, + struct fcf_record *new_fcf_record + ) +{ + struct lpfc_fcf_pri *fcf_pri; + + fcf_pri = &phba->fcf.fcf_pri[fcf_index]; + fcf_pri->fcf_rec.fcf_index = fcf_index; + /* FCF record priority */ + fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; + +} + +/** * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. * @fcf: pointer to driver fcf record. * @new_fcf_record: pointer to fcf record. @@ -1332,6 +1379,9 @@ __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, fcf_rec->addr_mode = addr_mode; fcf_rec->vlan_id = vlan_id; fcf_rec->flag |= (flag | RECORD_VALID); + __lpfc_update_fcf_record_pri(phba, + bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), + new_fcf_record); } /** @@ -1834,6 +1884,8 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba, return false; if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record)) return false; + if (fcf_rec->priority != new_fcf_record->fip_priority) + return false; return true; } @@ -1897,6 +1949,152 @@ stop_flogi_current_fcf: } /** + * lpfc_sli4_fcf_pri_list_del + * @phba: pointer to lpfc hba data structure. + * @fcf_index the index of the fcf record to delete + * This routine checks the on list flag of the fcf_index to be deleted. + * If it is one the list then it is removed from the list, and the flag + * is cleared. This routine grab the hbalock before removing the fcf + * record from the list. + **/ +static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba, + uint16_t fcf_index) +{ + struct lpfc_fcf_pri *new_fcf_pri; + + new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "3058 deleting idx x%x pri x%x flg x%x\n", + fcf_index, new_fcf_pri->fcf_rec.priority, + new_fcf_pri->fcf_rec.flag); + spin_lock_irq(&phba->hbalock); + if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) { + if (phba->fcf.current_rec.priority == + new_fcf_pri->fcf_rec.priority) + phba->fcf.eligible_fcf_cnt--; + list_del_init(&new_fcf_pri->list); + new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST; + } + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli4_set_fcf_flogi_fail + * @phba: pointer to lpfc hba data structure. + * @fcf_index the index of the fcf record to update + * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED + * flag so the the round robin slection for the particular priority level + * will try a different fcf record that does not have this bit set. + * If the fcf record is re-read for any reason this flag is cleared brfore + * adding it to the priority list. + **/ +void +lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index) +{ + struct lpfc_fcf_pri *new_fcf_pri; + new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; + spin_lock_irq(&phba->hbalock); + new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED; + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli4_fcf_pri_list_add + * @phba: pointer to lpfc hba data structure. + * @fcf_index the index of the fcf record to add + * This routine checks the priority of the fcf_index to be added. + * If it is a lower priority than the current head of the fcf_pri list + * then it is added to the list in the right order. + * If it is the same priority as the current head of the list then it + * is added to the head of the list and its bit in the rr_bmask is set. + * If the fcf_index to be added is of a higher priority than the current + * head of the list then the rr_bmask is cleared, its bit is set in the + * rr_bmask and it is added to the head of the list. + * returns: + * 0=success 1=failure + **/ +int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index, + struct fcf_record *new_fcf_record) +{ + uint16_t current_fcf_pri; + uint16_t last_index; + struct lpfc_fcf_pri *fcf_pri; + struct lpfc_fcf_pri *next_fcf_pri; + struct lpfc_fcf_pri *new_fcf_pri; + int ret; + + new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "3059 adding idx x%x pri x%x flg x%x\n", + fcf_index, new_fcf_record->fip_priority, + new_fcf_pri->fcf_rec.flag); + spin_lock_irq(&phba->hbalock); + if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) + list_del_init(&new_fcf_pri->list); + new_fcf_pri->fcf_rec.fcf_index = fcf_index; + new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; + if (list_empty(&phba->fcf.fcf_pri_list)) { + list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); + ret = lpfc_sli4_fcf_rr_index_set(phba, + new_fcf_pri->fcf_rec.fcf_index); + goto out; + } + + last_index = find_first_bit(phba->fcf.fcf_rr_bmask, + LPFC_SLI4_FCF_TBL_INDX_MAX); + if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { + ret = 0; /* Empty rr list */ + goto out; + } + current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority; + if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) { + list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); + if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) { + memset(phba->fcf.fcf_rr_bmask, 0, + sizeof(*phba->fcf.fcf_rr_bmask)); + /* fcfs_at_this_priority_level = 1; */ + phba->fcf.eligible_fcf_cnt = 1; + } else + /* fcfs_at_this_priority_level++; */ + phba->fcf.eligible_fcf_cnt++; + ret = lpfc_sli4_fcf_rr_index_set(phba, + new_fcf_pri->fcf_rec.fcf_index); + goto out; + } + + list_for_each_entry_safe(fcf_pri, next_fcf_pri, + &phba->fcf.fcf_pri_list, list) { + if (new_fcf_pri->fcf_rec.priority <= + fcf_pri->fcf_rec.priority) { + if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list) + list_add(&new_fcf_pri->list, + &phba->fcf.fcf_pri_list); + else + list_add(&new_fcf_pri->list, + &((struct lpfc_fcf_pri *) + fcf_pri->list.prev)->list); + ret = 0; + goto out; + } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list + || new_fcf_pri->fcf_rec.priority < + next_fcf_pri->fcf_rec.priority) { + list_add(&new_fcf_pri->list, &fcf_pri->list); + ret = 0; + goto out; + } + if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority) + continue; + + } + ret = 1; +out: + /* we use = instead of |= to clear the FLOGI_FAILED flag. */ + new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST; + spin_unlock_irq(&phba->hbalock); + return ret; +} + +/** * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object. @@ -1958,6 +2156,9 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) * record for roundrobin FCF failover. */ if (!rc) { + lpfc_sli4_fcf_pri_list_del(phba, + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record)); lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2781 FCF (x%x) failed connection " "list check: (x%x/x%x)\n", @@ -2005,7 +2206,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) goto read_next_fcf; } else { fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); - rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); + rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, + new_fcf_record); if (rc) goto read_next_fcf; } @@ -2018,7 +2220,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) */ spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_IN_USE) { - if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, + if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && + lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, new_fcf_record, vlan_id)) { if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == phba->fcf.current_rec.fcf_indx) { @@ -2232,7 +2435,8 @@ read_next_fcf: (phba->fcf.fcf_flag & FCF_REDISC_PEND)) return; - if (phba->fcf.fcf_flag & FCF_IN_USE) { + if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && + phba->fcf.fcf_flag & FCF_IN_USE) { /* * In case the current in-use FCF record no * longer existed during FCF discovery that @@ -2247,7 +2451,6 @@ read_next_fcf: spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= FCF_REDISC_FOV; spin_unlock_irq(&phba->hbalock); - lpfc_sli4_mbox_cmd_free(phba, mboxq); lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); return; @@ -2424,7 +2627,8 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) /* Update the eligible FCF record index bmask */ fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); - rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); + + rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record); out: lpfc_sli4_mbox_cmd_free(phba, mboxq); @@ -2645,6 +2849,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) vport->vpi_state |= LPFC_VPI_REGISTERED; vport->fc_flag |= FC_VFI_REGISTERED; vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; + vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); if (vport->port_state == LPFC_FABRIC_CFG_LINK) { @@ -2893,8 +3098,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) goto out; } /* Reset FCF roundrobin bmask for new discovery */ - memset(phba->fcf.fcf_rr_bmask, 0, - sizeof(*phba->fcf.fcf_rr_bmask)); + lpfc_sli4_clear_fcf_rr_bmask(phba); } return; @@ -5592,7 +5796,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); /* Reset FCF roundrobin bmask for new discovery */ - memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); + lpfc_sli4_clear_fcf_rr_bmask(phba); rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 9059524cf22..046edc4ab35 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -2955,18 +2955,18 @@ typedef struct _SLI2_RDSC { typedef struct _PCB { #ifdef __BIG_ENDIAN_BITFIELD uint32_t type:8; -#define TYPE_NATIVE_SLI2 0x01; +#define TYPE_NATIVE_SLI2 0x01 uint32_t feature:8; -#define FEATURE_INITIAL_SLI2 0x01; +#define FEATURE_INITIAL_SLI2 0x01 uint32_t rsvd:12; uint32_t maxRing:4; #else /* __LITTLE_ENDIAN_BITFIELD */ uint32_t maxRing:4; uint32_t rsvd:12; uint32_t feature:8; -#define FEATURE_INITIAL_SLI2 0x01; +#define FEATURE_INITIAL_SLI2 0x01 uint32_t type:8; -#define TYPE_NATIVE_SLI2 0x01; +#define TYPE_NATIVE_SLI2 0x01 #endif uint32_t mailBoxSize; @@ -3470,11 +3470,16 @@ typedef struct { or CMD_IOCB_RCV_SEQ64_CX (0xB5) */ struct rcv_sli3 { - uint32_t word8Rsvd; #ifdef __BIG_ENDIAN_BITFIELD + uint16_t ox_id; + uint16_t seq_cnt; + uint16_t vpi; uint16_t word9Rsvd; #else /* __LITTLE_ENDIAN */ + uint16_t seq_cnt; + uint16_t ox_id; + uint16_t word9Rsvd; uint16_t vpi; #endif diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 11e26a26b5d..7f8003b5181 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -170,15 +170,8 @@ struct lpfc_sli_intf { #define LPFC_PCI_FUNC3 3 #define LPFC_PCI_FUNC4 4 -/* SLI4 interface type-2 control register offsets */ -#define LPFC_CTL_PORT_SEM_OFFSET 0x400 -#define LPFC_CTL_PORT_STA_OFFSET 0x404 -#define LPFC_CTL_PORT_CTL_OFFSET 0x408 -#define LPFC_CTL_PORT_ER1_OFFSET 0x40C -#define LPFC_CTL_PORT_ER2_OFFSET 0x410 +/* SLI4 interface type-2 PDEV_CTL register */ #define LPFC_CTL_PDEV_CTL_OFFSET 0x414 - -/* Some SLI4 interface type-2 PDEV_CTL register bits */ #define LPFC_CTL_PDEV_CTL_DRST 0x00000001 #define LPFC_CTL_PDEV_CTL_FRST 0x00000002 #define LPFC_CTL_PDEV_CTL_DD 0x00000004 @@ -337,6 +330,7 @@ struct lpfc_cqe { #define CQE_CODE_RELEASE_WQE 0x2 #define CQE_CODE_RECEIVE 0x4 #define CQE_CODE_XRI_ABORTED 0x5 +#define CQE_CODE_RECEIVE_V1 0x9 /* completion queue entry for wqe completions */ struct lpfc_wcqe_complete { @@ -440,7 +434,10 @@ struct lpfc_rcqe { #define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */ #define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */ #define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */ - uint32_t reserved1; + uint32_t word1; +#define lpfc_rcqe_fcf_id_v1_SHIFT 0 +#define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F +#define lpfc_rcqe_fcf_id_v1_WORD word1 uint32_t word2; #define lpfc_rcqe_length_SHIFT 16 #define lpfc_rcqe_length_MASK 0x0000FFFF @@ -451,6 +448,9 @@ struct lpfc_rcqe { #define lpfc_rcqe_fcf_id_SHIFT 0 #define lpfc_rcqe_fcf_id_MASK 0x0000003F #define lpfc_rcqe_fcf_id_WORD word2 +#define lpfc_rcqe_rq_id_v1_SHIFT 0 +#define lpfc_rcqe_rq_id_v1_MASK 0x0000FFFF +#define lpfc_rcqe_rq_id_v1_WORD word2 uint32_t word3; #define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT #define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK @@ -515,7 +515,7 @@ struct lpfc_register { /* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */ #define LPFC_SLI_INTF 0x0058 -#define LPFC_SLIPORT_IF2_SMPHR 0x0400 +#define LPFC_CTL_PORT_SEM_OFFSET 0x400 #define lpfc_port_smphr_perr_SHIFT 31 #define lpfc_port_smphr_perr_MASK 0x1 #define lpfc_port_smphr_perr_WORD word0 @@ -575,7 +575,7 @@ struct lpfc_register { #define LPFC_POST_STAGE_PORT_READY 0xC000 #define LPFC_POST_STAGE_PORT_UE 0xF000 -#define LPFC_SLIPORT_STATUS 0x0404 +#define LPFC_CTL_PORT_STA_OFFSET 0x404 #define lpfc_sliport_status_err_SHIFT 31 #define lpfc_sliport_status_err_MASK 0x1 #define lpfc_sliport_status_err_WORD word0 @@ -593,7 +593,7 @@ struct lpfc_register { #define lpfc_sliport_status_rdy_WORD word0 #define MAX_IF_TYPE_2_RESETS 1000 -#define LPFC_SLIPORT_CNTRL 0x0408 +#define LPFC_CTL_PORT_CTL_OFFSET 0x408 #define lpfc_sliport_ctrl_end_SHIFT 30 #define lpfc_sliport_ctrl_end_MASK 0x1 #define lpfc_sliport_ctrl_end_WORD word0 @@ -604,8 +604,8 @@ struct lpfc_register { #define lpfc_sliport_ctrl_ip_WORD word0 #define LPFC_SLIPORT_INIT_PORT 1 -#define LPFC_SLIPORT_ERR_1 0x040C -#define LPFC_SLIPORT_ERR_2 0x0410 +#define LPFC_CTL_PORT_ER1_OFFSET 0x40C +#define LPFC_CTL_PORT_ER2_OFFSET 0x410 /* The following Registers apply to SLI4 if_type 0 UCNAs. They typically * reside in BAR 2. @@ -3198,6 +3198,8 @@ struct lpfc_grp_hdr { #define lpfc_grp_hdr_id_MASK 0x000000FF #define lpfc_grp_hdr_id_WORD word2 uint8_t rev_name[128]; + uint8_t date[12]; + uint8_t revision[32]; }; #define FCP_COMMAND 0x0 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 148b98ddbb1..a3c820083c3 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2927,6 +2927,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost) sizeof fc_host_symbolic_name(shost)); fc_host_supported_speeds(shost) = 0; + if (phba->lmt & LMT_16Gb) + fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; if (phba->lmt & LMT_10Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; if (phba->lmt & LMT_8Gb) @@ -3632,8 +3634,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, lpfc_sli4_fcf_dead_failthrough(phba); } else { /* Reset FCF roundrobin bmask for new discovery */ - memset(phba->fcf.fcf_rr_bmask, 0, - sizeof(*phba->fcf.fcf_rr_bmask)); + lpfc_sli4_clear_fcf_rr_bmask(phba); /* * Handling fast FCF failover to a DEAD FCF event is * considered equalivant to receiving CVL to all vports. @@ -3647,7 +3648,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); vport = lpfc_find_vport_by_vpid(phba, - acqe_fip->index - phba->vpi_base); + acqe_fip->index); ndlp = lpfc_sli4_perform_vport_cvl(vport); if (!ndlp) break; @@ -3719,8 +3720,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, * Reset FCF roundrobin bmask for new * discovery. */ - memset(phba->fcf.fcf_rr_bmask, 0, - sizeof(*phba->fcf.fcf_rr_bmask)); + lpfc_sli4_clear_fcf_rr_bmask(phba); } break; default: @@ -4035,6 +4035,34 @@ lpfc_reset_hba(struct lpfc_hba *phba) } /** + * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions + * @phba: pointer to lpfc hba data structure. + * + * This function enables the PCI SR-IOV virtual functions to a physical + * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to + * enable the number of virtual functions to the physical function. As + * not all devices support SR-IOV, the return code from the pci_enable_sriov() + * API call does not considered as an error condition for most of the device. + **/ +uint16_t +lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) +{ + struct pci_dev *pdev = phba->pcidev; + uint16_t nr_virtfn; + int pos; + + if (!pdev->is_physfn) + return 0; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos == 0) + return 0; + + pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); + return nr_virtfn; +} + +/** * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions * @phba: pointer to lpfc hba data structure. * @nr_vfn: number of virtual functions to be enabled. @@ -4049,8 +4077,17 @@ int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) { struct pci_dev *pdev = phba->pcidev; + uint16_t max_nr_vfn; int rc; + max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); + if (nr_vfn > max_nr_vfn) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3057 Requested vfs (%d) greater than " + "supported vfs (%d)", nr_vfn, max_nr_vfn); + return -EINVAL; + } + rc = pci_enable_sriov(pdev, nr_vfn); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, @@ -4516,7 +4553,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) } } - return rc; + return 0; out_free_fcp_eq_hdl: kfree(phba->sli4_hba.fcp_eq_hdl); @@ -4966,17 +5003,14 @@ out_free_mem: * @phba: pointer to lpfc hba data structure. * * This routine is invoked to post rpi header templates to the - * HBA consistent with the SLI-4 interface spec. This routine + * port for those SLI4 ports that do not support extents. This routine * posts a PAGE_SIZE memory region to the port to hold up to - * PAGE_SIZE modulo 64 rpi context headers. - * No locks are held here because this is an initialization routine - * called only from probe or lpfc_online when interrupts are not - * enabled and the driver is reinitializing the device. + * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine + * and should be called only when interrupts are disabled. * * Return codes * 0 - successful - * -ENOMEM - No available memory - * -EIO - The mailbox failed to complete successfully. + * -ERROR - otherwise. **/ int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) @@ -5687,17 +5721,22 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) break; case LPFC_SLI_INTF_IF_TYPE_2: phba->sli4_hba.u.if_type2.ERR1regaddr = - phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1; + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_ER1_OFFSET; phba->sli4_hba.u.if_type2.ERR2regaddr = - phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2; + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_ER2_OFFSET; phba->sli4_hba.u.if_type2.CTRLregaddr = - phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL; + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_CTL_OFFSET; phba->sli4_hba.u.if_type2.STATUSregaddr = - phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS; + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_STA_OFFSET; phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; phba->sli4_hba.PSMPHRregaddr = - phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR; + phba->sli4_hba.conf_regs_memmap_p + + LPFC_CTL_PORT_SEM_OFFSET; phba->sli4_hba.RQDBregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; phba->sli4_hba.WQDBregaddr = @@ -8859,11 +8898,11 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) return -EINVAL; } lpfc_decode_firmware_rev(phba, fwrev, 1); - if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) { + if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3023 Updating Firmware. Current Version:%s " "New Version:%s\n", - fwrev, image->rev_name); + fwrev, image->revision); for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); @@ -8892,9 +8931,9 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) fw->size - offset); break; } - temp_offset += SLI4_PAGE_SIZE; memcpy(dmabuf->virt, fw->data + temp_offset, SLI4_PAGE_SIZE); + temp_offset += SLI4_PAGE_SIZE; } rc = lpfc_wr_object(phba, &dma_buffer_list, (fw->size - offset), &offset); @@ -9005,6 +9044,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) } INIT_LIST_HEAD(&phba->active_rrq_list); + INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); /* Set up common device driver resources */ error = lpfc_setup_driver_resource_phase2(phba); @@ -9112,7 +9152,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* Check if there are static vports to be created. */ lpfc_create_static_vport(phba); - return 0; out_disable_intr: @@ -9483,6 +9522,13 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev) } pci_restore_state(pdev); + + /* + * As the new kernel behavior of pci_restore_state() API call clears + * device saved_state flag, need to save the restored state again. + */ + pci_save_state(pdev); + if (pdev->is_busmaster) pci_set_master(pdev); diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 55676702835..83450cc5c4d 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -2031,7 +2031,7 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) bf_set(lpfc_init_vfi_vp, init_vfi, 1); bf_set(lpfc_init_vfi_vfi, init_vfi, vport->phba->sli4_hba.vfi_ids[vport->vfi]); - bf_set(lpfc_init_vpi_vpi, init_vfi, + bf_set(lpfc_init_vfi_vpi, init_vfi, vport->phba->vpi_ids[vport->vpi]); bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 3ccc97496eb..eadd241eeff 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1302,13 +1302,13 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, case SCSI_PROT_NORMAL: default: lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9063 BLKGRD: Bad op/guard:%d/%d combination\n", - scsi_get_prot_op(sc), guard_type); + "9063 BLKGRD: Bad op/guard:%d/IP combination\n", + scsi_get_prot_op(sc)); ret = 1; break; } - } else if (guard_type == SHOST_DIX_GUARD_CRC) { + } else { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: @@ -1324,17 +1324,18 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: + *txop = BG_OP_IN_CRC_OUT_NODIF; + *rxop = BG_OP_IN_NODIF_OUT_CRC; + break; + case SCSI_PROT_NORMAL: default: lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9075 BLKGRD: Bad op/guard:%d/%d combination\n", - scsi_get_prot_op(sc), guard_type); + "9075 BLKGRD: Bad op/guard:%d/CRC combination\n", + scsi_get_prot_op(sc)); ret = 1; break; } - } else { - /* unsupported format */ - BUG(); } return ret; @@ -1352,45 +1353,6 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc) return sc->device->sector_size; } -/** - * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command - * @sc: in: SCSI command - * @apptagmask: out: app tag mask - * @apptagval: out: app tag value - * @reftag: out: ref tag (reference tag) - * - * Description: - * Extract DIF parameters from the command if possible. Otherwise, - * use default parameters. - * - **/ -static inline void -lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask, - uint16_t *apptagval, uint32_t *reftag) -{ - struct scsi_dif_tuple *spt; - unsigned char op = scsi_get_prot_op(sc); - unsigned int protcnt = scsi_prot_sg_count(sc); - static int cnt; - - if (protcnt && (op == SCSI_PROT_WRITE_STRIP || - op == SCSI_PROT_WRITE_PASS)) { - - cnt++; - spt = page_address(sg_page(scsi_prot_sglist(sc))) + - scsi_prot_sglist(sc)[0].offset; - *apptagmask = 0; - *apptagval = 0; - *reftag = cpu_to_be32(spt->ref_tag); - - } else { - /* SBC defines ref tag to be lower 32bits of LBA */ - *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc)); - *apptagmask = 0; - *apptagval = 0; - } -} - /* * This function sets up buffer list for protection groups of * type LPFC_PG_TYPE_NO_DIF @@ -1427,9 +1389,8 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, dma_addr_t physaddr; int i = 0, num_bde = 0, status; int datadir = sc->sc_data_direction; - unsigned blksize; uint32_t reftag; - uint16_t apptagmask, apptagval; + unsigned blksize; uint8_t txop, rxop; status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); @@ -1438,17 +1399,16 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* extract some info from the scsi command for pde*/ blksize = lpfc_cmd_blksize(sc); - lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); + reftag = scsi_get_lba(sc) & 0xffffffff; /* setup PDE5 with what we have */ pde5 = (struct lpfc_pde5 *) bpl; memset(pde5, 0, sizeof(struct lpfc_pde5)); bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); - pde5->reftag = reftag; /* Endianness conversion if necessary for PDE5 */ pde5->word0 = cpu_to_le32(pde5->word0); - pde5->reftag = cpu_to_le32(pde5->reftag); + pde5->reftag = cpu_to_le32(reftag); /* advance bpl and increment bde count */ num_bde++; @@ -1463,10 +1423,10 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, if (datadir == DMA_FROM_DEVICE) { bf_set(pde6_ce, pde6, 1); bf_set(pde6_re, pde6, 1); - bf_set(pde6_ae, pde6, 1); } bf_set(pde6_ai, pde6, 1); - bf_set(pde6_apptagval, pde6, apptagval); + bf_set(pde6_ae, pde6, 0); + bf_set(pde6_apptagval, pde6, 0); /* Endianness conversion if necessary for PDE6 */ pde6->word0 = cpu_to_le32(pde6->word0); @@ -1551,7 +1511,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, unsigned char pgdone = 0, alldone = 0; unsigned blksize; uint32_t reftag; - uint16_t apptagmask, apptagval; uint8_t txop, rxop; int num_bde = 0; @@ -1571,7 +1530,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* extract some info from the scsi command */ blksize = lpfc_cmd_blksize(sc); - lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); + reftag = scsi_get_lba(sc) & 0xffffffff; split_offset = 0; do { @@ -1579,11 +1538,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, pde5 = (struct lpfc_pde5 *) bpl; memset(pde5, 0, sizeof(struct lpfc_pde5)); bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); - pde5->reftag = reftag; /* Endianness conversion if necessary for PDE5 */ pde5->word0 = cpu_to_le32(pde5->word0); - pde5->reftag = cpu_to_le32(pde5->reftag); + pde5->reftag = cpu_to_le32(reftag); /* advance bpl and increment bde count */ num_bde++; @@ -1597,9 +1555,9 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_oprx, pde6, rxop); bf_set(pde6_ce, pde6, 1); bf_set(pde6_re, pde6, 1); - bf_set(pde6_ae, pde6, 1); bf_set(pde6_ai, pde6, 1); - bf_set(pde6_apptagval, pde6, apptagval); + bf_set(pde6_ae, pde6, 0); + bf_set(pde6_apptagval, pde6, 0); /* Endianness conversion if necessary for PDE6 */ pde6->word0 = cpu_to_le32(pde6->word0); @@ -1621,8 +1579,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, memset(pde7, 0, sizeof(struct lpfc_pde7)); bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); - pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); - pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); + pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); + pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); protgrp_blks = protgroup_len / 8; protgrp_bytes = protgrp_blks * blksize; @@ -1632,7 +1590,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); protgroup_offset += protgroup_remainder; protgrp_blks = protgroup_remainder / 8; - protgrp_bytes = protgroup_remainder * blksize; + protgrp_bytes = protgrp_blks * blksize; } else { protgroup_offset = 0; curr_prot++; @@ -2006,16 +1964,21 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { /* * setup sense data descriptor 0 per SPC-4 as an information - * field, and put the failing LBA in it + * field, and put the failing LBA in it. + * This code assumes there was also a guard/app/ref tag error + * indication. */ - cmd->sense_buffer[8] = 0; /* Information */ - cmd->sense_buffer[9] = 0xa; /* Add. length */ + cmd->sense_buffer[7] = 0xc; /* Additional sense length */ + cmd->sense_buffer[8] = 0; /* Information descriptor type */ + cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ + cmd->sense_buffer[10] = 0x80; /* Validity bit */ bghm /= cmd->device->sector_size; failing_sector = scsi_get_lba(cmd); failing_sector += bghm; - put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]); + /* Descriptor Information */ + put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]); } if (!ret) { diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 98999bbd8cb..8b799f047a9 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -560,7 +560,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); if (rrq) { rrq->send_rrq = send_rrq; - rrq->xritag = phba->sli4_hba.xri_ids[xritag]; + rrq->xritag = xritag; rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); rrq->ndlp = ndlp; rrq->nlp_DID = ndlp->nlp_DID; @@ -2452,7 +2452,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* search continue save q for same XRI */ list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { - if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) { + if (iocbq->iocb.unsli3.rcvsli3.ox_id == + saveq->iocb.unsli3.rcvsli3.ox_id) { list_add_tail(&saveq->list, &iocbq->list); found = 1; break; @@ -3355,6 +3356,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, irspiocbq); break; case CQE_CODE_RECEIVE: + case CQE_CODE_RECEIVE_V1: dmabuf = container_of(cq_event, struct hbq_dmabuf, cq_event); lpfc_sli4_handle_received_buffer(phba, dmabuf); @@ -4712,10 +4714,15 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. * @phba: Pointer to HBA context object. * @type: The resource extent type. + * @extnt_count: buffer to hold port available extent count. + * @extnt_size: buffer to hold element count per extent. * - * This function allocates all SLI4 resource identifiers. + * This function calls the port and retrievs the number of available + * extents and their size for a particular extent type. + * + * Returns: 0 if successful. Nonzero otherwise. **/ -static int +int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, uint16_t *extnt_count, uint16_t *extnt_size) { @@ -4892,7 +4899,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt, req_len, *emb); if (alloc_len < req_len) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "9000 Allocated DMA memory size (x%x) is " + "2982 Allocated DMA memory size (x%x) is " "less than the requested DMA memory " "size (x%x)\n", alloc_len, req_len); return -ENOMEM; @@ -5506,6 +5513,154 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) } /** + * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. + * @phba: Pointer to HBA context object. + * @type: The resource extent type. + * @extnt_count: buffer to hold port extent count response + * @extnt_size: buffer to hold port extent size response. + * + * This function calls the port to read the host allocated extents + * for a particular type. + **/ +int +lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, + uint16_t *extnt_cnt, uint16_t *extnt_size) +{ + bool emb; + int rc = 0; + uint16_t curr_blks = 0; + uint32_t req_len, emb_len; + uint32_t alloc_len, mbox_tmo; + struct list_head *blk_list_head; + struct lpfc_rsrc_blks *rsrc_blk; + LPFC_MBOXQ_t *mbox; + void *virtaddr = NULL; + struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; + struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; + union lpfc_sli4_cfg_shdr *shdr; + + switch (type) { + case LPFC_RSC_TYPE_FCOE_VPI: + blk_list_head = &phba->lpfc_vpi_blk_list; + break; + case LPFC_RSC_TYPE_FCOE_XRI: + blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; + break; + case LPFC_RSC_TYPE_FCOE_VFI: + blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; + break; + case LPFC_RSC_TYPE_FCOE_RPI: + blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; + break; + default: + return -EIO; + } + + /* Count the number of extents currently allocatd for this type. */ + list_for_each_entry(rsrc_blk, blk_list_head, list) { + if (curr_blks == 0) { + /* + * The GET_ALLOCATED mailbox does not return the size, + * just the count. The size should be just the size + * stored in the current allocated block and all sizes + * for an extent type are the same so set the return + * value now. + */ + *extnt_size = rsrc_blk->rsrc_size; + } + curr_blks++; + } + + /* Calculate the total requested length of the dma memory. */ + req_len = curr_blks * sizeof(uint16_t); + + /* + * Calculate the size of an embedded mailbox. The uint32_t + * accounts for extents-specific word. + */ + emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - + sizeof(uint32_t); + + /* + * Presume the allocation and response will fit into an embedded + * mailbox. If not true, reconfigure to a non-embedded mailbox. + */ + emb = LPFC_SLI4_MBX_EMBED; + req_len = emb_len; + if (req_len > emb_len) { + req_len = curr_blks * sizeof(uint16_t) + + sizeof(union lpfc_sli4_cfg_shdr) + + sizeof(uint32_t); + emb = LPFC_SLI4_MBX_NEMBED; + } + + mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); + + alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, + req_len, emb); + if (alloc_len < req_len) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2983 Allocated DMA memory size (x%x) is " + "less than the requested DMA memory " + "size (x%x)\n", alloc_len, req_len); + rc = -ENOMEM; + goto err_exit; + } + rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); + if (unlikely(rc)) { + rc = -EIO; + goto err_exit; + } + + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + + if (unlikely(rc)) { + rc = -EIO; + goto err_exit; + } + + /* + * Figure out where the response is located. Then get local pointers + * to the response data. The port does not guarantee to respond to + * all extents counts request so update the local variable with the + * allocated count from the port. + */ + if (emb == LPFC_SLI4_MBX_EMBED) { + rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; + shdr = &rsrc_ext->header.cfg_shdr; + *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); + } else { + virtaddr = mbox->sge_array->addr[0]; + n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; + shdr = &n_rsrc->cfg_shdr; + *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); + } + + if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, + "2984 Failed to read allocated resources " + "for type %d - Status 0x%x Add'l Status 0x%x.\n", + type, + bf_get(lpfc_mbox_hdr_status, &shdr->response), + bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); + rc = -EIO; + goto err_exit; + } + err_exit: + lpfc_sli4_mbox_cmd_free(phba, mbox); + return rc; +} + +/** * lpfc_sli4_hba_setup - SLI4 device intialization PCI function * @phba: Pointer to HBA context object. * @@ -5837,6 +5992,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) "Advanced Error Reporting (AER)\n"); phba->cfg_aer_support = 0; } + rc = 0; } if (!(phba->hba_flag & HBA_FCOE_MODE)) { @@ -6634,6 +6790,9 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, unsigned long iflags; int rc; + /* dump from issue mailbox command if setup */ + lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); + rc = lpfc_mbox_dev_check(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, @@ -7318,12 +7477,12 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); - break; + break; case CMD_XMIT_SEQUENCE64_CX: bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, iocbq->iocb.un.ulpWord[3]); bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, - iocbq->iocb.ulpContext); + iocbq->iocb.unsli3.rcvsli3.ox_id); /* The entire sequence is transmitted for this IOCB */ xmit_len = total_len; cmnd = CMD_XMIT_SEQUENCE64_CR; @@ -7341,7 +7500,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); wqe->xmit_sequence.xmit_len = xmit_len; command_type = OTHER_COMMAND; - break; + break; case CMD_XMIT_BCAST64_CN: /* word3 iocb=iotag32 wqe=seq_payload_len */ wqe->xmit_bcast64.seq_payload_len = xmit_len; @@ -7355,7 +7514,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_LENLOC_WORD3); bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); - break; + break; case CMD_FCP_IWRITE64_CR: command_type = FCP_COMMAND_DATA_OUT; /* word3 iocb=iotag wqe=payload_offset_len */ @@ -7375,7 +7534,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, LPFC_WQE_LENLOC_WORD4); bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); - break; + break; case CMD_FCP_IREAD64_CR: /* word3 iocb=iotag wqe=payload_offset_len */ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ @@ -7394,7 +7553,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, LPFC_WQE_LENLOC_WORD4); bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); - break; + break; case CMD_FCP_ICMND64_CR: /* word3 iocb=IO_TAG wqe=reserved */ wqe->fcp_icmd.rsrvd3 = 0; @@ -7407,7 +7566,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); - break; + break; case CMD_GEN_REQUEST64_CR: /* For this command calculate the xmit length of the * request bde. @@ -7442,7 +7601,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); command_type = OTHER_COMMAND; - break; + break; case CMD_XMIT_ELS_RSP64_CX: ndlp = (struct lpfc_nodelist *)iocbq->context1; /* words0-2 BDE memcpy */ @@ -7457,7 +7616,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, - iocbq->iocb.ulpContext); + iocbq->iocb.unsli3.rcvsli3.ox_id); if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, phba->vpi_ids[iocbq->vport->vpi]); @@ -7470,7 +7629,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); command_type = OTHER_COMMAND; - break; + break; case CMD_CLOSE_XRI_CN: case CMD_ABORT_XRI_CN: case CMD_ABORT_XRI_CX: @@ -7509,7 +7668,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, cmnd = CMD_ABORT_XRI_CX; command_type = OTHER_COMMAND; xritag = 0; - break; + break; case CMD_XMIT_BLS_RSP64_CX: /* As BLS ABTS RSP WQE is very different from other WQEs, * we re-construct this WQE here based on information in @@ -7553,7 +7712,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); } - break; + break; case CMD_XRI_ABORTED_CX: case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ @@ -7565,7 +7724,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, "2014 Invalid command 0x%x\n", iocbq->iocb.ulpCommand); return IOCB_ERROR; - break; + break; } bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); @@ -10481,10 +10640,14 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; struct lpfc_queue *drq = phba->sli4_hba.dat_rq; struct hbq_dmabuf *dma_buf; - uint32_t status; + uint32_t status, rq_id; unsigned long iflags; - if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id) + if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) + rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); + else + rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); + if (rq_id != hrq->queue_id) goto out; status = bf_get(lpfc_rcqe_status, rcqe); @@ -10563,6 +10726,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, (struct sli4_wcqe_xri_aborted *)&cqevt); break; case CQE_CODE_RECEIVE: + case CQE_CODE_RECEIVE_V1: /* Process the RQ event */ phba->last_completion_time = jiffies; workposted = lpfc_sli4_sp_handle_rcqe(phba, @@ -12345,19 +12509,18 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba, } /** - * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port + * lpfc_sli4_alloc_xri - Get an available rpi in the device's range * @phba: pointer to lpfc hba data structure. * * This routine is invoked to post rpi header templates to the - * port for those SLI4 ports that do not support extents. This routine - * posts a PAGE_SIZE memory region to the port to hold up to - * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine - * and should be called only when interrupts are disabled. + * HBA consistent with the SLI-4 interface spec. This routine + * posts a SLI4_PAGE_SIZE memory region to the port to hold up to + * SLI4_PAGE_SIZE modulo 64 rpi context headers. * - * Return codes - * 0 - successful - * -ERROR - otherwise. - */ + * Returns + * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful + * LPFC_RPI_ALLOC_ERROR if no rpis are available. + **/ uint16_t lpfc_sli4_alloc_xri(struct lpfc_hba *phba) { @@ -13406,7 +13569,7 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, * This function validates the xri maps to the known range of XRIs allocated an * used by the driver. **/ -static uint16_t +uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *phba, uint16_t xri) { @@ -13643,10 +13806,12 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf) static struct lpfc_iocbq * lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) { + struct hbq_dmabuf *hbq_buf; struct lpfc_dmabuf *d_buf, *n_buf; struct lpfc_iocbq *first_iocbq, *iocbq; struct fc_frame_header *fc_hdr; uint32_t sid; + uint32_t len, tot_len; struct ulp_bde64 *pbde; fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; @@ -13655,6 +13820,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) lpfc_update_rcv_time_stamp(vport); /* get the Remote Port's SID */ sid = sli4_sid_from_fc_hdr(fc_hdr); + tot_len = 0; /* Get an iocbq struct to fill in. */ first_iocbq = lpfc_sli_get_iocbq(vport->phba); if (first_iocbq) { @@ -13662,9 +13828,12 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; - first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); - /* iocbq is prepped for internal consumption. Logical vpi. */ - first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi; + first_iocbq->iocb.ulpContext = NO_XRI; + first_iocbq->iocb.unsli3.rcvsli3.ox_id = + be16_to_cpu(fc_hdr->fh_ox_id); + /* iocbq is prepped for internal consumption. Physical vpi. */ + first_iocbq->iocb.unsli3.rcvsli3.vpi = + vport->phba->vpi_ids[vport->vpi]; /* put the first buffer into the first IOCBq */ first_iocbq->context2 = &seq_dmabuf->dbuf; first_iocbq->context3 = NULL; @@ -13672,9 +13841,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = LPFC_DATA_BUF_SIZE; first_iocbq->iocb.un.rcvels.remoteID = sid; - first_iocbq->iocb.unsli3.rcvsli3.acc_len += - bf_get(lpfc_rcqe_length, + tot_len = bf_get(lpfc_rcqe_length, &seq_dmabuf->cq_event.cqe.rcqe_cmpl); + first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; } iocbq = first_iocbq; /* @@ -13692,9 +13861,13 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) pbde = (struct ulp_bde64 *) &iocbq->iocb.unsli3.sli3Words[4]; pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; - first_iocbq->iocb.unsli3.rcvsli3.acc_len += - bf_get(lpfc_rcqe_length, - &seq_dmabuf->cq_event.cqe.rcqe_cmpl); + + /* We need to get the size out of the right CQE */ + hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); + len = bf_get(lpfc_rcqe_length, + &hbq_buf->cq_event.cqe.rcqe_cmpl); + iocbq->iocb.unsli3.rcvsli3.acc_len += len; + tot_len += len; } else { iocbq = lpfc_sli_get_iocbq(vport->phba); if (!iocbq) { @@ -13712,9 +13885,14 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) iocbq->iocb.ulpBdeCount = 1; iocbq->iocb.un.cont64[0].tus.f.bdeSize = LPFC_DATA_BUF_SIZE; - first_iocbq->iocb.unsli3.rcvsli3.acc_len += - bf_get(lpfc_rcqe_length, - &seq_dmabuf->cq_event.cqe.rcqe_cmpl); + + /* We need to get the size out of the right CQE */ + hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); + len = bf_get(lpfc_rcqe_length, + &hbq_buf->cq_event.cqe.rcqe_cmpl); + tot_len += len; + iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; + iocbq->iocb.un.rcvels.remoteID = sid; list_add_tail(&iocbq->list, &first_iocbq->list); } @@ -13787,7 +13965,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, lpfc_in_buf_free(phba, &dmabuf->dbuf); return; } - fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); + if ((bf_get(lpfc_cqe_code, + &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) + fcfi = bf_get(lpfc_rcqe_fcf_id_v1, + &dmabuf->cq_event.cqe.rcqe_cmpl); + else + fcfi = bf_get(lpfc_rcqe_fcf_id, + &dmabuf->cq_event.cqe.rcqe_cmpl); vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) { /* throw out the frame */ @@ -14451,6 +14635,92 @@ fail_fcf_read: } /** + * lpfc_check_next_fcf_pri + * phba pointer to the lpfc_hba struct for this port. + * This routine is called from the lpfc_sli4_fcf_rr_next_index_get + * routine when the rr_bmask is empty. The FCF indecies are put into the + * rr_bmask based on their priority level. Starting from the highest priority + * to the lowest. The most likely FCF candidate will be in the highest + * priority group. When this routine is called it searches the fcf_pri list for + * next lowest priority group and repopulates the rr_bmask with only those + * fcf_indexes. + * returns: + * 1=success 0=failure + **/ +int +lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) +{ + uint16_t next_fcf_pri; + uint16_t last_index; + struct lpfc_fcf_pri *fcf_pri; + int rc; + int ret = 0; + + last_index = find_first_bit(phba->fcf.fcf_rr_bmask, + LPFC_SLI4_FCF_TBL_INDX_MAX); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "3060 Last IDX %d\n", last_index); + if (list_empty(&phba->fcf.fcf_pri_list)) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + "3061 Last IDX %d\n", last_index); + return 0; /* Empty rr list */ + } + next_fcf_pri = 0; + /* + * Clear the rr_bmask and set all of the bits that are at this + * priority. + */ + memset(phba->fcf.fcf_rr_bmask, 0, + sizeof(*phba->fcf.fcf_rr_bmask)); + spin_lock_irq(&phba->hbalock); + list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { + if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) + continue; + /* + * the 1st priority that has not FLOGI failed + * will be the highest. + */ + if (!next_fcf_pri) + next_fcf_pri = fcf_pri->fcf_rec.priority; + spin_unlock_irq(&phba->hbalock); + if (fcf_pri->fcf_rec.priority == next_fcf_pri) { + rc = lpfc_sli4_fcf_rr_index_set(phba, + fcf_pri->fcf_rec.fcf_index); + if (rc) + return 0; + } + spin_lock_irq(&phba->hbalock); + } + /* + * if next_fcf_pri was not set above and the list is not empty then + * we have failed flogis on all of them. So reset flogi failed + * and start at the begining. + */ + if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { + list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { + fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; + /* + * the 1st priority that has not FLOGI failed + * will be the highest. + */ + if (!next_fcf_pri) + next_fcf_pri = fcf_pri->fcf_rec.priority; + spin_unlock_irq(&phba->hbalock); + if (fcf_pri->fcf_rec.priority == next_fcf_pri) { + rc = lpfc_sli4_fcf_rr_index_set(phba, + fcf_pri->fcf_rec.fcf_index); + if (rc) + return 0; + } + spin_lock_irq(&phba->hbalock); + } + } else + ret = 1; + spin_unlock_irq(&phba->hbalock); + + return ret; +} +/** * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index * @phba: pointer to lpfc hba data structure. * @@ -14466,6 +14736,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) uint16_t next_fcf_index; /* Search start from next bit of currently registered FCF index */ +next_priority: next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, @@ -14473,17 +14744,46 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) next_fcf_index); /* Wrap around condition on phba->fcf.fcf_rr_bmask */ - if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) + if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { + /* + * If we have wrapped then we need to clear the bits that + * have been tested so that we can detect when we should + * change the priority level. + */ next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, LPFC_SLI4_FCF_TBL_INDX_MAX, 0); + } + /* Check roundrobin failover list empty condition */ - if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { + if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || + next_fcf_index == phba->fcf.current_rec.fcf_indx) { + /* + * If next fcf index is not found check if there are lower + * Priority level fcf's in the fcf_priority list. + * Set up the rr_bmask with all of the avaiable fcf bits + * at that level and continue the selection process. + */ + if (lpfc_check_next_fcf_pri_level(phba)) + goto next_priority; lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2844 No roundrobin failover FCF available\n"); - return LPFC_FCOE_FCF_NEXT_NONE; + if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) + return LPFC_FCOE_FCF_NEXT_NONE; + else { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "3063 Only FCF available idx %d, flag %x\n", + next_fcf_index, + phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); + return next_fcf_index; + } } + if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && + phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & + LPFC_FCF_FLOGI_FAILED) + goto next_priority; + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2845 Get next roundrobin failover FCF (x%x)\n", next_fcf_index); @@ -14535,6 +14835,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) { + struct lpfc_fcf_pri *fcf_pri; if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2762 FCF (x%x) reached driver's book " @@ -14543,6 +14844,14 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) return; } /* Clear the eligible FCF record index bmask */ + spin_lock_irq(&phba->hbalock); + list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { + if (fcf_pri->fcf_rec.fcf_index == fcf_index) { + list_del_init(&fcf_pri->list); + break; + } + } + spin_unlock_irq(&phba->hbalock); clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 4b1703554a2..19bb87ae859 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -81,6 +81,8 @@ (fc_hdr)->fh_f_ctl[1] << 8 | \ (fc_hdr)->fh_f_ctl[2]) +#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000 + enum lpfc_sli4_queue_type { LPFC_EQ, LPFC_GCQ, @@ -157,6 +159,25 @@ struct lpfc_fcf_rec { #define RECORD_VALID 0x02 }; +struct lpfc_fcf_pri_rec { + uint16_t fcf_index; +#define LPFC_FCF_ON_PRI_LIST 0x0001 +#define LPFC_FCF_FLOGI_FAILED 0x0002 + uint16_t flag; + uint32_t priority; +}; + +struct lpfc_fcf_pri { + struct list_head list; + struct lpfc_fcf_pri_rec fcf_rec; +}; + +/* + * Maximum FCF table index, it is for driver internal book keeping, it + * just needs to be no less than the supported HBA's FCF table size. + */ +#define LPFC_SLI4_FCF_TBL_INDX_MAX 32 + struct lpfc_fcf { uint16_t fcfi; uint32_t fcf_flag; @@ -176,15 +197,13 @@ struct lpfc_fcf { uint32_t eligible_fcf_cnt; struct lpfc_fcf_rec current_rec; struct lpfc_fcf_rec failover_rec; + struct list_head fcf_pri_list; + struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX]; + uint32_t current_fcf_scan_pri; struct timer_list redisc_wait; unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */ }; -/* - * Maximum FCF table index, it is for driver internal book keeping, it - * just needs to be no less than the supported HBA's FCF table size. - */ -#define LPFC_SLI4_FCF_TBL_INDX_MAX 32 #define LPFC_REGION23_SIGNATURE "RG23" #define LPFC_REGION23_VERSION 1 diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index c03921b1232..c1e0ae94d9f 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.23" +#define LPFC_DRIVER_VERSION "8.3.25" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index bf2a1c51629..af3a6af97cc 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -215,13 +215,6 @@ static int __init mac_scsi_setup(char *str) { __setup("mac5380=", mac_scsi_setup); /* - * If you want to find the instance with (k)gdb ... - */ -#if NDEBUG -static struct Scsi_Host *default_instance; -#endif - -/* * Function : int macscsi_detect(struct scsi_host_template * tpnt) * * Purpose : initializes mac NCR5380 driver based on the @@ -233,7 +226,7 @@ static struct Scsi_Host *default_instance; * */ -int macscsi_detect(struct scsi_host_template * tpnt) +int __init macscsi_detect(struct scsi_host_template * tpnt) { static int called = 0; int flags = 0; @@ -268,10 +261,7 @@ int macscsi_detect(struct scsi_host_template * tpnt) /* Once we support multiple 5380s (e.g. DuoDock) we'll do something different here */ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); -#if NDEBUG - default_instance = instance; -#endif - + if (macintosh_config->ident == MAC_MODEL_IIFX) { mac_scsi_regp = via1+0x8000; mac_scsi_drq = via1+0xE000; diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 7370c084b17..3948a00d81f 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -33,9 +33,9 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "00.00.05.38-rc1" -#define MEGASAS_RELDATE "May. 11, 2011" -#define MEGASAS_EXT_VERSION "Wed. May. 11 17:00:00 PDT 2011" +#define MEGASAS_VERSION "00.00.05.40-rc1" +#define MEGASAS_RELDATE "Jul. 26, 2011" +#define MEGASAS_EXT_VERSION "Tue. Jul. 26 17:00:00 PDT 2011" /* * Device IDs diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 2d8cdce7b2f..776d0198866 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -18,7 +18,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * FILE: megaraid_sas_base.c - * Version : v00.00.05.38-rc1 + * Version : v00.00.05.40-rc1 * * Authors: LSI Corporation * Sreenivas Bagalkote @@ -54,6 +54,7 @@ #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> +#include <scsi/scsi_tcq.h> #include "megaraid_sas_fusion.h" #include "megaraid_sas.h" @@ -2057,6 +2058,20 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) } } +static int megasas_change_queue_depth(struct scsi_device *sdev, + int queue_depth, int reason) +{ + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + + if (queue_depth > sdev->host->can_queue) + queue_depth = sdev->host->can_queue; + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), + queue_depth); + + return queue_depth; +} + /* * Scsi host template for megaraid_sas driver */ @@ -2074,6 +2089,7 @@ static struct scsi_host_template megasas_template = { .eh_timed_out = megasas_reset_timer, .bios_param = megasas_bios_param, .use_clustering = ENABLE_CLUSTERING, + .change_queue_depth = megasas_change_queue_depth, }; /** diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 8fe3a45794f..5a5af1fe758 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -288,7 +288,6 @@ u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock, /* Get dev handle from Pd */ *pDevHandle = MR_PdDevHandleGet(pd, map); } - retval = FALSE; } *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h index a3e60385787..3105d5e8d90 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2.h @@ -8,7 +8,7 @@ * scatter/gather formats. * Creation Date: June 21, 2006 * - * mpi2.h Version: 02.00.17 + * mpi2.h Version: 02.00.18 * * Version History * --------------- @@ -64,6 +64,8 @@ * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. * Added alternative defines for the SGE Direction bit. * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define. * -------------------------------------------------------------------------- */ @@ -89,7 +91,7 @@ #define MPI2_VERSION_02_00 (0x0200) /* versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x11) +#define MPI2_HEADER_VERSION_UNIT (0x12) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) @@ -1060,10 +1062,14 @@ typedef struct _MPI2_IEEE_SGE_UNION #define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03) #define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) + /* IEEE Simple Element only */ #define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) + /* IEEE Simple Element only */ #define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02) #define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) - + /* IEEE Simple Element only */ +#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR (0x03) + /* IEEE Chain Element only */ /**************************************************************************** * IEEE SGE operation Macros diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h index f5b9c766e28..61475a6480e 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h @@ -6,7 +6,7 @@ * Title: MPI Configuration messages and pages * Creation Date: November 10, 2006 * - * mpi2_cnfg.h Version: 02.00.16 + * mpi2_cnfg.h Version: 02.00.17 * * Version History * --------------- @@ -127,6 +127,13 @@ * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing) * defines. + * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to + * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for + * the Pinout field. + * Added BoardTemperature and BoardTemperatureUnits fields + * to MPI2_CONFIG_PAGE_IO_UNIT_7. + * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define + * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure. * -------------------------------------------------------------------------- */ @@ -210,6 +217,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION #define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17) #define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) #define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19) +#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A) /***************************************************************************** @@ -612,23 +620,31 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO U32 Pinout; /* 0x00 */ U8 Connector[16]; /* 0x04 */ U8 Location; /* 0x14 */ - U8 Reserved1; /* 0x15 */ + U8 ReceptacleID; /* 0x15 */ U16 Slot; /* 0x16 */ U32 Reserved2; /* 0x18 */ } MPI2_MANPAGE7_CONNECTOR_INFO, MPI2_POINTER PTR_MPI2_MANPAGE7_CONNECTOR_INFO, Mpi2ManPage7ConnectorInfo_t, MPI2_POINTER pMpi2ManPage7ConnectorInfo_t; /* defines for the Pinout field */ -#define MPI2_MANPAGE7_PINOUT_SFF_8484_L4 (0x00080000) -#define MPI2_MANPAGE7_PINOUT_SFF_8484_L3 (0x00040000) -#define MPI2_MANPAGE7_PINOUT_SFF_8484_L2 (0x00020000) -#define MPI2_MANPAGE7_PINOUT_SFF_8484_L1 (0x00010000) -#define MPI2_MANPAGE7_PINOUT_SFF_8470_L4 (0x00000800) -#define MPI2_MANPAGE7_PINOUT_SFF_8470_L3 (0x00000400) -#define MPI2_MANPAGE7_PINOUT_SFF_8470_L2 (0x00000200) -#define MPI2_MANPAGE7_PINOUT_SFF_8470_L1 (0x00000100) -#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x00000002) -#define MPI2_MANPAGE7_PINOUT_CONNECTION_UNKNOWN (0x00000001) +#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00) +#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8) + +#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF) +#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00) +#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01) +#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02) +#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03) +#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04) +#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05) +#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06) +#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07) +#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08) +#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C) +#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D) /* defines for the Location field */ #define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01) @@ -662,7 +678,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_7, Mpi2ManufacturingPage7_t, MPI2_POINTER pMpi2ManufacturingPage7_t; -#define MPI2_MANUFACTURING7_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING7_PAGEVERSION (0x01) /* defines for the Flags field */ #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) @@ -849,11 +865,13 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { U16 IOCTemperature; /* 0x10 */ U8 IOCTemperatureUnits; /* 0x12 */ U8 IOCSpeed; /* 0x13 */ - U32 Reserved3; /* 0x14 */ + U16 BoardTemperature; /* 0x14 */ + U8 BoardTemperatureUnits; /* 0x16 */ + U8 Reserved3; /* 0x17 */ } MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t; -#define MPI2_IOUNITPAGE7_PAGEVERSION (0x01) +#define MPI2_IOUNITPAGE7_PAGEVERSION (0x02) /* defines for IO Unit Page 7 PCIeWidth field */ #define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01) @@ -881,7 +899,6 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { #define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) #define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) - /* defines for IO Unit Page 7 IOCTemperatureUnits field */ #define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00) #define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01) @@ -893,6 +910,11 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { #define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04) #define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08) +/* defines for IO Unit Page 7 BoardTemperatureUnits field */ +#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01) +#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02) + /**************************************************************************** @@ -2799,5 +2821,25 @@ typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 { #define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03) +/**************************************************************************** +* Extended Manufacturing Config Pages +****************************************************************************/ + +/* + * Generic structure to use for product-specific extended manufacturing pages + * (currently Extended Manufacturing Page 40 through Extended Manufacturing + * Page 60). + */ + +typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 ProductSpecificInfo; /* 0x08 */ +} MPI2_CONFIG_PAGE_EXT_MAN_PS, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS, + Mpi2ExtManufacturingPagePS_t, + MPI2_POINTER pMpi2ExtManufacturingPagePS_t; + +/* PageVersion should be provided by product-specific code */ + #endif diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h index 165454d5259..de90162413c 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h @@ -6,7 +6,7 @@ * Title: MPI SCSI initiator mode messages and structures * Creation Date: June 23, 2006 * - * mpi2_init.h Version: 02.00.10 + * mpi2_init.h Version: 02.00.11 * * Version History * --------------- @@ -33,6 +33,7 @@ * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request. + * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define. * -------------------------------------------------------------------------- */ @@ -139,6 +140,9 @@ typedef struct _MPI2_SCSI_IO_REQUEST #define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4) #define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0) +/* number of SGLOffset fields */ +#define MPI2_SCSIIO_NUM_SGLOFFSETS (4) + /* SCSI IO IoFlags bits */ /* Large CDB Address Space */ diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h index 761cbdb8a03..1f0c190d336 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h @@ -6,7 +6,7 @@ * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages * Creation Date: October 11, 2006 * - * mpi2_ioc.h Version: 02.00.15 + * mpi2_ioc.h Version: 02.00.16 * * Version History * --------------- @@ -103,6 +103,7 @@ * defines. * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. + * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC. * -------------------------------------------------------------------------- */ @@ -1032,6 +1033,7 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST #define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09) #define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A) #define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) +#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) /* FWDownload TransactionContext Element */ typedef struct _MPI2_FW_DOWNLOAD_TCSGE diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index efa0255491c..83035bd1c48 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -94,7 +94,7 @@ module_param(diag_buffer_enable, int, 0); MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers " "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); -int mpt2sas_fwfault_debug; +static int mpt2sas_fwfault_debug; MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault " "and halt firmware - (default=0)"); @@ -857,7 +857,7 @@ _base_interrupt(int irq, void *bus_id) completed_cmds = 0; cb_idx = 0xFF; do { - rd.word = rpf->Words; + rd.word = le64_to_cpu(rpf->Words); if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) goto out; reply = 0; @@ -906,7 +906,7 @@ _base_interrupt(int irq, void *bus_id) next: - rpf->Words = ULLONG_MAX; + rpf->Words = cpu_to_le64(ULLONG_MAX); ioc->reply_post_host_index = (ioc->reply_post_host_index == (ioc->reply_post_queue_depth - 1)) ? 0 : ioc->reply_post_host_index + 1; @@ -1740,9 +1740,11 @@ _base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc) static void _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc) { - if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_INTEL && - ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008) { + if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) + return; + switch (ioc->pdev->device) { + case MPI2_MFGPAGE_DEVID_SAS2008: switch (ioc->pdev->subsystem_device) { case MPT2SAS_INTEL_RMS2LL080_SSDID: printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, @@ -1752,7 +1754,20 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc) printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, MPT2SAS_INTEL_RMS2LL040_BRANDING); break; + default: + break; + } + case MPI2_MFGPAGE_DEVID_SAS2308_2: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_INTEL_RS25GB008_SSDID: + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RS25GB008_BRANDING); + break; + default: + break; } + default: + break; } } @@ -1817,7 +1832,9 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) char desc[16]; u8 revision; u32 iounit_pg1_flags; + u32 bios_version; + bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); strncpy(desc, ioc->manu_pg0.ChipName, 16); printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), " @@ -1828,10 +1845,10 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, ioc->facts.FWVersion.Word & 0x000000FF, revision, - (ioc->bios_pg3.BiosVersion & 0xFF000000) >> 24, - (ioc->bios_pg3.BiosVersion & 0x00FF0000) >> 16, - (ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8, - ioc->bios_pg3.BiosVersion & 0x000000FF); + (bios_version & 0xFF000000) >> 24, + (bios_version & 0x00FF0000) >> 16, + (bios_version & 0x0000FF00) >> 8, + bios_version & 0x000000FF); _base_display_dell_branding(ioc); _base_display_intel_branding(ioc); @@ -2150,7 +2167,7 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc) static int _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) { - Mpi2IOCFactsReply_t *facts; + struct mpt2sas_facts *facts; u32 queue_size, queue_diff; u16 max_sge_elements; u16 num_of_reply_frames; @@ -2783,7 +2800,7 @@ _base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes, int i; u8 failed; u16 dummy; - u32 *mfp; + __le32 *mfp; /* make sure doorbell is not in use */ if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { @@ -2871,7 +2888,7 @@ _base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes, writel(0, &ioc->chip->HostInterruptStatus); if (ioc->logging_level & MPT_DEBUG_INIT) { - mfp = (u32 *)reply; + mfp = (__le32 *)reply; printk(KERN_INFO "\toffset:data\n"); for (i = 0; i < reply_bytes/4; i++) printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4, @@ -3097,7 +3114,8 @@ static int _base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag) { Mpi2PortFactsRequest_t mpi_request; - Mpi2PortFactsReply_t mpi_reply, *pfacts; + Mpi2PortFactsReply_t mpi_reply; + struct mpt2sas_port_facts *pfacts; int mpi_reply_sz, mpi_request_sz, r; dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, @@ -3139,7 +3157,8 @@ static int _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) { Mpi2IOCFactsRequest_t mpi_request; - Mpi2IOCFactsReply_t mpi_reply, *facts; + Mpi2IOCFactsReply_t mpi_reply; + struct mpt2sas_facts *facts; int mpi_reply_sz, mpi_request_sz, r; dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, @@ -3225,17 +3244,6 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION); mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); - /* In MPI Revision I (0xA), the SystemReplyFrameSize(offset 0x18) was - * removed and made reserved. For those with older firmware will need - * this fix. It was decided that the Reply and Request frame sizes are - * the same. - */ - if ((ioc->facts.HeaderVersion >> 8) < 0xA) { - mpi_request.Reserved7 = cpu_to_le16(ioc->reply_sz); -/* mpi_request.SystemReplyFrameSize = - * cpu_to_le16(ioc->reply_sz); - */ - } mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); mpi_request.ReplyDescriptorPostQueueDepth = @@ -3243,25 +3251,17 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) mpi_request.ReplyFreeQueueDepth = cpu_to_le16(ioc->reply_free_queue_depth); -#if BITS_PER_LONG > 32 mpi_request.SenseBufferAddressHigh = - cpu_to_le32(ioc->sense_dma >> 32); + cpu_to_le32((u64)ioc->sense_dma >> 32); mpi_request.SystemReplyAddressHigh = - cpu_to_le32(ioc->reply_dma >> 32); + cpu_to_le32((u64)ioc->reply_dma >> 32); mpi_request.SystemRequestFrameBaseAddress = - cpu_to_le64(ioc->request_dma); + cpu_to_le64((u64)ioc->request_dma); mpi_request.ReplyFreeQueueAddress = - cpu_to_le64(ioc->reply_free_dma); + cpu_to_le64((u64)ioc->reply_free_dma); mpi_request.ReplyDescriptorPostQueueAddress = - cpu_to_le64(ioc->reply_post_free_dma); -#else - mpi_request.SystemRequestFrameBaseAddress = - cpu_to_le32(ioc->request_dma); - mpi_request.ReplyFreeQueueAddress = - cpu_to_le32(ioc->reply_free_dma); - mpi_request.ReplyDescriptorPostQueueAddress = - cpu_to_le32(ioc->reply_post_free_dma); -#endif + cpu_to_le64((u64)ioc->reply_post_free_dma); + /* This time stamp specifies number of milliseconds * since epoch ~ midnight January 1, 1970. @@ -3271,10 +3271,10 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) (current_time.tv_usec / 1000)); if (ioc->logging_level & MPT_DEBUG_INIT) { - u32 *mfp; + __le32 *mfp; int i; - mfp = (u32 *)&mpi_request; + mfp = (__le32 *)&mpi_request; printk(KERN_INFO "\toffset:data\n"); for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4, @@ -3759,7 +3759,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) /* initialize Reply Post Free Queue */ for (i = 0; i < ioc->reply_post_queue_depth; i++) - ioc->reply_post_free[i].Words = ULLONG_MAX; + ioc->reply_post_free[i].Words = cpu_to_le64(ULLONG_MAX); r = _base_send_ioc_init(ioc, sleep_flag); if (r) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index dcc289c2545..8d5be2120c6 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -69,11 +69,11 @@ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "08.100.00.02" -#define MPT2SAS_MAJOR_VERSION 08 +#define MPT2SAS_DRIVER_VERSION "09.100.00.00" +#define MPT2SAS_MAJOR_VERSION 09 #define MPT2SAS_MINOR_VERSION 100 #define MPT2SAS_BUILD_VERSION 00 -#define MPT2SAS_RELEASE_VERSION 02 +#define MPT2SAS_RELEASE_VERSION 00 /* * Set MPT2SAS_SG_DEPTH value based on user input. @@ -161,12 +161,15 @@ "Intel Integrated RAID Module RMS2LL080" #define MPT2SAS_INTEL_RMS2LL040_BRANDING \ "Intel Integrated RAID Module RMS2LL040" +#define MPT2SAS_INTEL_RS25GB008_BRANDING \ + "Intel(R) RAID Controller RS25GB008" /* * Intel HBA SSDIDs */ #define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E #define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F +#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000 /* @@ -541,6 +544,63 @@ struct _tr_list { typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); +/* IOC Facts and Port Facts converted from little endian to cpu */ +union mpi2_version_union { + MPI2_VERSION_STRUCT Struct; + u32 Word; +}; + +struct mpt2sas_facts { + u16 MsgVersion; + u16 HeaderVersion; + u8 IOCNumber; + u8 VP_ID; + u8 VF_ID; + u16 IOCExceptions; + u16 IOCStatus; + u32 IOCLogInfo; + u8 MaxChainDepth; + u8 WhoInit; + u8 NumberOfPorts; + u8 MaxMSIxVectors; + u16 RequestCredit; + u16 ProductID; + u32 IOCCapabilities; + union mpi2_version_union FWVersion; + u16 IOCRequestFrameSize; + u16 Reserved3; + u16 MaxInitiators; + u16 MaxTargets; + u16 MaxSasExpanders; + u16 MaxEnclosures; + u16 ProtocolFlags; + u16 HighPriorityCredit; + u16 MaxReplyDescriptorPostQueueDepth; + u8 ReplyFrameSize; + u8 MaxVolumes; + u16 MaxDevHandle; + u16 MaxPersistentEntries; + u16 MinDevHandle; +}; + +struct mpt2sas_port_facts { + u8 PortNumber; + u8 VP_ID; + u8 VF_ID; + u8 PortType; + u16 MaxPostedCmdBuffers; +}; + +/** + * enum mutex_type - task management mutex type + * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it + * @TM_MUTEX_ON: mutex is required + */ +enum mutex_type { + TM_MUTEX_OFF = 0, + TM_MUTEX_ON = 1, +}; + /** * struct MPT2SAS_ADAPTER - per adapter struct * @list: ioc_list @@ -703,6 +763,7 @@ struct MPT2SAS_ADAPTER { /* misc flags */ int aen_event_read_flag; u8 broadcast_aen_busy; + u16 broadcast_aen_pending; u8 shost_recovery; struct mutex reset_in_progress_mutex; @@ -749,8 +810,8 @@ struct MPT2SAS_ADAPTER { u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; /* static config pages */ - Mpi2IOCFactsReply_t facts; - Mpi2PortFactsReply_t *pfacts; + struct mpt2sas_facts facts; + struct mpt2sas_port_facts *pfacts; Mpi2ManufacturingPage0_t manu_pg0; Mpi2BiosPage2_t bios_pg2; Mpi2BiosPage3_t bios_pg3; @@ -840,7 +901,7 @@ struct MPT2SAS_ADAPTER { /* reply free queue */ u16 reply_free_queue_depth; - u32 *reply_free; + __le32 *reply_free; dma_addr_t reply_free_dma; struct dma_pool *reply_free_dma_pool; u32 reply_free_host_index; @@ -932,8 +993,8 @@ void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc); u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply); int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, - uint channel, uint id, uint lun, u8 type, u16 smid_task, - ulong timeout, struct scsi_cmnd *scmd); + uint channel, uint id, uint lun, u8 type, u16 smid_task, + ulong timeout, unsigned long serial_number, enum mutex_type m_type); void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address); diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 437c2d94c45..38ed0260959 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -994,7 +994,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, mpt2sas_scsih_issue_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10, - NULL); + 0, TM_MUTEX_ON); ioc->tm_cmds.status = MPT2_CMD_NOT_USED; } else mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, @@ -2706,13 +2706,13 @@ static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL); struct DIAG_BUFFER_START { - u32 Size; - u32 DiagVersion; + __le32 Size; + __le32 DiagVersion; u8 BufferType; u8 Reserved[3]; - u32 Reserved1; - u32 Reserved2; - u32 Reserved3; + __le32 Reserved1; + __le32 Reserved2; + __le32 Reserved3; }; /** * _ctl_host_trace_buffer_size_show - host buffer size (trace only) diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h index 3dcddfeb6f4..9731f8e661b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_debug.h +++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h @@ -164,7 +164,7 @@ static inline void _debug_dump_mf(void *mpi_request, int sz) { int i; - u32 *mfp = (u32 *)mpi_request; + __le32 *mfp = (__le32 *)mpi_request; printk(KERN_INFO "mf:\n\t"); for (i = 0; i < sz; i++) { diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index a7dbc6825f5..6abd2fcc43e 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -94,6 +94,10 @@ static u32 logging_level; MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info " "(default=0)"); +static ushort max_sectors = 0xFFFF; +module_param(max_sectors, ushort, 0); +MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 8192 default=8192"); + /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ #define MPT2SAS_MAX_LUN (16895) static int max_lun = MPT2SAS_MAX_LUN; @@ -1956,7 +1960,7 @@ _scsih_slave_configure(struct scsi_device *sdev) case MPI2_RAID_VOL_TYPE_RAID1E: qdepth = MPT2SAS_RAID_QUEUE_DEPTH; if (ioc->manu_pg10.OEMIdentifier && - (ioc->manu_pg10.GenericFlags0 & + (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & MFG10_GF0_R10_DISPLAY) && !(raid_device->num_pds % 2)) r_level = "RAID10"; @@ -2236,6 +2240,8 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle) * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) * @smid_task: smid assigned to the task * @timeout: timeout in seconds + * @serial_number: the serial_number from scmd + * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF * Context: user * * A generic API for sending task management requests to firmware. @@ -2247,17 +2253,18 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle) int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout, - struct scsi_cmnd *scmd) + unsigned long serial_number, enum mutex_type m_type) { Mpi2SCSITaskManagementRequest_t *mpi_request; Mpi2SCSITaskManagementReply_t *mpi_reply; u16 smid = 0; u32 ioc_state; unsigned long timeleft; - struct scsi_cmnd *scmd_lookup; + struct scsiio_tracker *scsi_lookup = NULL; int rc; - mutex_lock(&ioc->tm_cmds.mutex); + if (m_type == TM_MUTEX_ON) + mutex_lock(&ioc->tm_cmds.mutex); if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) { printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n", __func__, ioc->name); @@ -2277,18 +2284,18 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, if (ioc_state & MPI2_DOORBELL_USED) { dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell " "active!\n", ioc->name)); - mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, + rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); - rc = SUCCESS; + rc = (!rc) ? SUCCESS : FAILED; goto err_out; } if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { mpt2sas_base_fault_info(ioc, ioc_state & MPI2_DOORBELL_DATA_MASK); - mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, + rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); - rc = SUCCESS; + rc = (!rc) ? SUCCESS : FAILED; goto err_out; } @@ -2300,6 +2307,9 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, goto err_out; } + if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + scsi_lookup = &ioc->scsi_lookup[smid_task - 1]; + dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "sending tm: handle(0x%04x)," " task_type(0x%02x), smid(%d)\n", ioc->name, handle, type, smid_task)); @@ -2307,6 +2317,7 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); ioc->tm_cmds.smid = smid; memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); + memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t)); mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; mpi_request->DevHandle = cpu_to_le16(handle); mpi_request->TaskType = type; @@ -2322,9 +2333,9 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, _debug_dump_mf(mpi_request, sizeof(Mpi2SCSITaskManagementRequest_t)/4); if (!(ioc->tm_cmds.status & MPT2_CMD_RESET)) { - mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, + rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); - rc = SUCCESS; + rc = (!rc) ? SUCCESS : FAILED; ioc->tm_cmds.status = MPT2_CMD_NOT_USED; mpt2sas_scsih_clear_tm_flag(ioc, handle); goto err_out; @@ -2346,20 +2357,12 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, } } - /* sanity check: - * Check to see the commands were terminated. - * This is only needed for eh callbacks, hence the scmd check. - */ - rc = FAILED; - if (scmd == NULL) - goto bypass_sanity_checks; switch (type) { case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: - scmd_lookup = _scsih_scsi_lookup_get(ioc, smid_task); - if (scmd_lookup) - rc = FAILED; - else - rc = SUCCESS; + rc = SUCCESS; + if (scsi_lookup->scmd == NULL) + break; + rc = FAILED; break; case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: @@ -2369,24 +2372,31 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, rc = SUCCESS; break; + case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel)) rc = FAILED; else rc = SUCCESS; break; + case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: + rc = SUCCESS; + break; + default: + rc = FAILED; + break; } - bypass_sanity_checks: - mpt2sas_scsih_clear_tm_flag(ioc, handle); ioc->tm_cmds.status = MPT2_CMD_NOT_USED; - mutex_unlock(&ioc->tm_cmds.mutex); + if (m_type == TM_MUTEX_ON) + mutex_unlock(&ioc->tm_cmds.mutex); return rc; err_out: - mutex_unlock(&ioc->tm_cmds.mutex); + if (m_type == TM_MUTEX_ON) + mutex_unlock(&ioc->tm_cmds.mutex); return rc; } @@ -2496,7 +2506,8 @@ _scsih_abort(struct scsi_cmnd *scmd) handle = sas_device_priv_data->sas_target->handle; r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, scmd->device->lun, - MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, scmd); + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, + scmd->serial_number, TM_MUTEX_ON); out: sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", @@ -2557,7 +2568,8 @@ _scsih_dev_reset(struct scsi_cmnd *scmd) r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, scmd->device->lun, - MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, scmd); + MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0, + TM_MUTEX_ON); out: sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", @@ -2617,7 +2629,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd) r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, - 30, scmd); + 30, 0, TM_MUTEX_ON); out: starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", @@ -2750,6 +2762,31 @@ _scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc) } /** + * _scsih_ublock_io_all_device - unblock every device + * @ioc: per adapter object + * + * change the device state from block to running + */ +static void +_scsih_ublock_io_all_device(struct MPT2SAS_ADAPTER *ioc) +{ + struct MPT2SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (!sas_device_priv_data->block) + continue; + sas_device_priv_data->block = 0; + dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_running, " + "handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle)); + scsi_internal_device_unblock(sdev); + } +} +/** * _scsih_ublock_io_device - set the device state to SDEV_RUNNING * @ioc: per adapter object * @handle: device handle @@ -2779,6 +2816,34 @@ _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) } /** + * _scsih_block_io_all_device - set the device state to SDEV_BLOCK + * @ioc: per adapter object + * @handle: device handle + * + * During device pull we need to appropiately set the sdev state. + */ +static void +_scsih_block_io_all_device(struct MPT2SAS_ADAPTER *ioc) +{ + struct MPT2SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->block) + continue; + sas_device_priv_data->block = 1; + dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_blocked, " + "handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle)); + scsi_internal_device_block(sdev); + } +} + + +/** * _scsih_block_io_device - set the device state to SDEV_BLOCK * @ioc: per adapter object * @handle: device handle @@ -3698,7 +3763,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) return 0; } - if (ioc->pci_error_recovery) { + if (ioc->pci_error_recovery || ioc->remove_host) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; @@ -4193,6 +4258,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) u32 log_info; struct MPT2SAS_DEVICE *sas_device_priv_data; u32 response_code = 0; + unsigned long flags; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); scmd = _scsih_scsi_lookup_get_clear(ioc, smid); @@ -4217,6 +4283,9 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) * the failed direct I/O should be redirected to volume */ if (_scsih_scsi_direct_io_get(ioc, smid)) { + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->scsi_lookup[smid - 1].scmd = scmd; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); _scsih_scsi_direct_io_set(ioc, smid, 0); memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); mpi_request->DevHandle = @@ -4598,7 +4667,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) Mpi2SasEnclosurePage0_t enclosure_pg0; u32 ioc_status; u16 parent_handle; - __le64 sas_address, sas_address_parent = 0; + u64 sas_address, sas_address_parent = 0; int i; unsigned long flags; struct _sas_port *mpt2sas_port = NULL; @@ -5380,9 +5449,10 @@ _scsih_sas_device_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc, break; } printk(MPT2SAS_INFO_FMT "device status change: (%s)\n" - "\thandle(0x%04x), sas address(0x%016llx)", ioc->name, - reason_str, le16_to_cpu(event_data->DevHandle), - (unsigned long long)le64_to_cpu(event_data->SASAddress)); + "\thandle(0x%04x), sas address(0x%016llx), tag(%d)", + ioc->name, reason_str, le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->SASAddress), + le16_to_cpu(event_data->TaskTag)); if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) printk(MPT2SAS_INFO_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name, event_data->ASC, event_data->ASCQ); @@ -5404,7 +5474,7 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc, { struct MPT2SAS_TARGET *target_priv_data; struct _sas_device *sas_device; - __le64 sas_address; + u64 sas_address; unsigned long flags; Mpi2EventDataSasDeviceStatusChange_t *event_data = fw_event->event_data; @@ -5522,25 +5592,38 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, u32 termination_count; u32 query_count; Mpi2SCSITaskManagementReply_t *mpi_reply; -#ifdef CONFIG_SCSI_MPT2SAS_LOGGING Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; -#endif u16 ioc_status; unsigned long flags; int r; + u8 max_retries = 0; + u8 task_abort_retries; + + mutex_lock(&ioc->tm_cmds.mutex); + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: phy number(%d), " + "width(%d)\n", ioc->name, __func__, event_data->PhyNum, + event_data->PortWidth)); - dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primitive: " - "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum, - event_data->PortWidth)); - dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, - __func__)); + _scsih_block_io_all_device(ioc); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); - ioc->broadcast_aen_busy = 0; + mpi_reply = ioc->tm_cmds.reply; +broadcast_aen_retry: + + /* sanity checks for retrying this loop */ + if (max_retries++ == 5) { + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: giving up\n", + ioc->name, __func__)); + goto out; + } else if (max_retries > 1) + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: %d retry\n", + ioc->name, __func__, max_retries - 1)); + termination_count = 0; query_count = 0; - mpi_reply = ioc->tm_cmds.reply; for (smid = 1; smid <= ioc->scsiio_depth; smid++) { + if (ioc->ioc_reset_in_progress_status) + goto out; scmd = _scsih_scsi_lookup_get(ioc, smid); if (!scmd) continue; @@ -5561,34 +5644,90 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, lun = sas_device_priv_data->lun; query_count++; + if (ioc->ioc_reset_in_progress_status) + goto out; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); - mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, - MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL); - ioc->tm_cmds.status = MPT2_CMD_NOT_USED; + r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0, + TM_MUTEX_OFF); + if (r == FAILED) { + sdev_printk(KERN_WARNING, sdev, + "mpt2sas_scsih_issue_tm: FAILED when sending " + "QUERY_TASK: scmd(%p)\n", scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; - if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) && - (mpi_reply->ResponseCode == + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + sdev_printk(KERN_WARNING, sdev, "query task: FAILED " + "with IOCSTATUS(0x%04x), scmd(%p)\n", ioc_status, + scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + + /* see if IO is still owned by IOC and target */ + if (mpi_reply->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || mpi_reply->ResponseCode == - MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) { + MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); continue; } + task_abort_retries = 0; + tm_retry: + if (task_abort_retries++ == 60) { + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT + "%s: ABORT_TASK: giving up\n", ioc->name, + __func__)); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + + if (ioc->ioc_reset_in_progress_status) + goto out_no_lock; + r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, - scmd); - if (r == FAILED) - sdev_printk(KERN_WARNING, sdev, "task abort: FAILED " + scmd->serial_number, TM_MUTEX_OFF); + if (r == FAILED) { + sdev_printk(KERN_WARNING, sdev, + "mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : " "scmd(%p)\n", scmd); + goto tm_retry; + } + + if (task_abort_retries > 1) + sdev_printk(KERN_WARNING, sdev, + "mpt2sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):" + " scmd(%p)\n", + task_abort_retries - 1, scmd); + termination_count += le32_to_cpu(mpi_reply->TerminationCount); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); } + + if (ioc->broadcast_aen_pending) { + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: loop back due to" + " pending AEN\n", ioc->name, __func__)); + ioc->broadcast_aen_pending = 0; + goto broadcast_aen_retry; + } + + out: spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + out_no_lock: - dtmprintk(ioc, printk(MPT2SAS_INFO_FMT + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - exit, query_count = %d termination_count = %d\n", ioc->name, __func__, query_count, termination_count)); + + ioc->broadcast_aen_busy = 0; + if (!ioc->ioc_reset_in_progress_status) + _scsih_ublock_io_all_device(ioc); + mutex_unlock(&ioc->tm_cmds.mutex); } /** @@ -6566,7 +6705,7 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc) Mpi2ExpanderPage0_t expander_pg0; Mpi2ConfigReply_t mpi_reply; u16 ioc_status; - __le64 sas_address; + u64 sas_address; u16 handle; printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__); @@ -6862,10 +7001,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, mpi_reply->EventData; if (baen_data->Primitive != - MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT || - ioc->broadcast_aen_busy) + MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) return 1; - ioc->broadcast_aen_busy = 1; + + if (ioc->broadcast_aen_busy) { + ioc->broadcast_aen_pending++; + return 1; + } else + ioc->broadcast_aen_busy = 1; break; } @@ -7211,7 +7354,6 @@ _scsih_remove(struct pci_dev *pdev) } sas_remove_host(shost); - _scsih_shutdown(pdev); list_del(&ioc->list); scsi_remove_host(shost); scsi_host_put(shost); @@ -7436,6 +7578,25 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) shost->transportt = mpt2sas_transport_template; shost->unique_id = ioc->id; + if (max_sectors != 0xFFFF) { + if (max_sectors < 64) { + shost->max_sectors = 64; + printk(MPT2SAS_WARN_FMT "Invalid value %d passed " + "for max_sectors, range is 64 to 8192. Assigning " + "value of 64.\n", ioc->name, max_sectors); + } else if (max_sectors > 8192) { + shost->max_sectors = 8192; + printk(MPT2SAS_WARN_FMT "Invalid value %d passed " + "for max_sectors, range is 64 to 8192. Assigning " + "default value of 8192.\n", ioc->name, + max_sectors); + } else { + shost->max_sectors = max_sectors & 0xFFFE; + printk(MPT2SAS_INFO_FMT "The max_sectors value is " + "set to %d\n", ioc->name, shost->max_sectors); + } + } + if ((scsi_add_host(shost, &pdev->dev))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); @@ -7505,7 +7666,7 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); - u32 device_state; + pci_power_t device_state; mpt2sas_base_stop_watchdog(ioc); scsi_block_requests(shost); @@ -7532,7 +7693,7 @@ _scsih_resume(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); - u32 device_state = pdev->current_state; + pci_power_t device_state = pdev->current_state; int r; printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, previous " diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index cb1cdecbe0f..15c79802621 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -299,7 +299,6 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, void *data_out = NULL; dma_addr_t data_out_dma; u32 sz; - u64 *sas_address_le; u16 wait_state_count; if (ioc->shost_recovery || ioc->pci_error_recovery) { @@ -372,8 +371,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, mpi_request->PhysicalPort = 0xFF; mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; - sas_address_le = (u64 *)&mpi_request->SASAddress; - *sas_address_le = cpu_to_le64(sas_address); + mpi_request->SASAddress = cpu_to_le64(sas_address); mpi_request->RequestDataLength = cpu_to_le16(sizeof(struct rep_manu_request)); psge = &mpi_request->SGL; @@ -1049,14 +1047,14 @@ struct phy_error_log_reply{ u8 function; /* 0x11 */ u8 function_result; u8 response_length; - u16 expander_change_count; + __be16 expander_change_count; u8 reserved_1[3]; u8 phy_identifier; u8 reserved_2[2]; - u32 invalid_dword; - u32 running_disparity_error; - u32 loss_of_dword_sync; - u32 phy_reset_problem; + __be32 invalid_dword; + __be32 running_disparity_error; + __be32 loss_of_dword_sync; + __be32 phy_reset_problem; }; /** @@ -1085,7 +1083,6 @@ _transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc, void *data_out = NULL; dma_addr_t data_out_dma; u32 sz; - u64 *sas_address_le; u16 wait_state_count; if (ioc->shost_recovery || ioc->pci_error_recovery) { @@ -1160,8 +1157,7 @@ _transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc, mpi_request->PhysicalPort = 0xFF; mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; - sas_address_le = (u64 *)&mpi_request->SASAddress; - *sas_address_le = cpu_to_le64(phy->identify.sas_address); + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); mpi_request->RequestDataLength = cpu_to_le16(sizeof(struct phy_error_log_request)); psge = &mpi_request->SGL; @@ -1406,7 +1402,6 @@ _transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc, void *data_out = NULL; dma_addr_t data_out_dma; u32 sz; - u64 *sas_address_le; u16 wait_state_count; if (ioc->shost_recovery) { @@ -1486,8 +1481,7 @@ _transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc, mpi_request->PhysicalPort = 0xFF; mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; - sas_address_le = (u64 *)&mpi_request->SASAddress; - *sas_address_le = cpu_to_le64(phy->identify.sas_address); + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); mpi_request->RequestDataLength = cpu_to_le16(sizeof(struct phy_error_log_request)); psge = &mpi_request->SGL; @@ -1914,7 +1908,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, mpi_request->PhysicalPort = 0xFF; mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; - *((u64 *)&mpi_request->SASAddress) = (rphy) ? + mpi_request->SASAddress = (rphy) ? cpu_to_le64(rphy->identify.sas_address) : cpu_to_le64(ioc->sas_hba.sas_address); mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig index c82b012aba3..78f7e20a0c1 100644 --- a/drivers/scsi/mvsas/Kconfig +++ b/drivers/scsi/mvsas/Kconfig @@ -3,7 +3,7 @@ # # Copyright 2007 Red Hat, Inc. # Copyright 2008 Marvell. <kewei@marvell.com> -# Copyright 2009-20011 Marvell. <yuxiangl@marvell.com> +# Copyright 2009-2011 Marvell. <yuxiangl@marvell.com> # # This file is licensed under GPLv2. # @@ -41,3 +41,10 @@ config SCSI_MVSAS_DEBUG help Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode, the driver prints some messages to the console. +config SCSI_MVSAS_TASKLET + bool "Support for interrupt tasklet" + default n + depends on SCSI_MVSAS + help + Compiles the 88SE64xx/88SE94xx driver in interrupt tasklet mode.In this mode, + the interrupt will schedule a tasklet. diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c index 13c96048139..8ba47229049 100644 --- a/drivers/scsi/mvsas/mv_64xx.c +++ b/drivers/scsi/mvsas/mv_64xx.c @@ -33,7 +33,6 @@ static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i) u32 reg; struct mvs_phy *phy = &mvi->phy[i]; - /* TODO check & save device type */ reg = mr32(MVS_GBL_PORT_TYPE); phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); if (reg & MODE_SAS_SATA & (1 << i)) @@ -48,7 +47,7 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id) u32 tmp; tmp = mr32(MVS_PCS); - if (mvi->chip->n_phy <= 4) + if (mvi->chip->n_phy <= MVS_SOC_PORTS) tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT); else tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); @@ -58,24 +57,16 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id) static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi) { void __iomem *regs = mvi->regs; + int i; mvs_phy_hacks(mvi); if (!(mvi->flags & MVF_FLAG_SOC)) { - /* TEST - for phy decoding error, adjust voltage levels */ - mw32(MVS_P0_VSR_ADDR + 0, 0x8); - mw32(MVS_P0_VSR_DATA + 0, 0x2F0); - - mw32(MVS_P0_VSR_ADDR + 8, 0x8); - mw32(MVS_P0_VSR_DATA + 8, 0x2F0); - - mw32(MVS_P0_VSR_ADDR + 16, 0x8); - mw32(MVS_P0_VSR_DATA + 16, 0x2F0); - - mw32(MVS_P0_VSR_ADDR + 24, 0x8); - mw32(MVS_P0_VSR_DATA + 24, 0x2F0); + for (i = 0; i < MVS_SOC_PORTS; i++) { + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE8); + mvs_write_port_vsr_data(mvi, i, 0x2F0); + } } else { - int i; /* disable auto port detection */ mw32(MVS_GBL_PORT_TYPE, 0); for (i = 0; i < mvi->chip->n_phy; i++) { @@ -95,7 +86,7 @@ static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id) u32 reg, tmp; if (!(mvi->flags & MVF_FLAG_SOC)) { - if (phy_id < 4) + if (phy_id < MVS_SOC_PORTS) pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, ®); else pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, ®); @@ -104,13 +95,13 @@ static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id) reg = mr32(MVS_PHY_CTL); tmp = reg; - if (phy_id < 4) + if (phy_id < MVS_SOC_PORTS) tmp |= (1U << phy_id) << PCTL_LINK_OFFS; else - tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS; + tmp |= (1U << (phy_id - MVS_SOC_PORTS)) << PCTL_LINK_OFFS; if (!(mvi->flags & MVF_FLAG_SOC)) { - if (phy_id < 4) { + if (phy_id < MVS_SOC_PORTS) { pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); mdelay(10); pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg); @@ -133,9 +124,9 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) tmp &= ~PHYEV_RDY_CH; mvs_write_port_irq_stat(mvi, phy_id, tmp); tmp = mvs_read_phy_ctl(mvi, phy_id); - if (hard == 1) + if (hard == MVS_HARD_RESET) tmp |= PHY_RST_HARD; - else if (hard == 0) + else if (hard == MVS_SOFT_RESET) tmp |= PHY_RST; mvs_write_phy_ctl(mvi, phy_id, tmp); if (hard) { @@ -321,6 +312,11 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi) /* init phys */ mvs_64xx_phy_hacks(mvi); + tmp = mvs_cr32(mvi, CMD_PHY_MODE_21); + tmp &= 0x0000ffff; + tmp |= 0x00fa0000; + mvs_cw32(mvi, CMD_PHY_MODE_21, tmp); + /* enable auto port detection */ mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN); @@ -346,7 +342,7 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi) mvs_64xx_enable_xmt(mvi, i); - mvs_64xx_phy_reset(mvi, i, 1); + mvs_64xx_phy_reset(mvi, i, MVS_HARD_RESET); msleep(500); mvs_64xx_detect_porttype(mvi, i); } @@ -377,13 +373,7 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi) mvs_update_phyinfo(mvi, i, 1); } - /* FIXME: update wide port bitmaps */ - /* little endian for open address and command table, etc. */ - /* - * it seems that ( from the spec ) turning on big-endian won't - * do us any good on big-endian machines, need further confirmation - */ cctl = mr32(MVS_CTL); cctl |= CCTL_ENDIAN_CMD; cctl |= CCTL_ENDIAN_DATA; @@ -394,15 +384,19 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi) /* reset CMD queue */ tmp = mr32(MVS_PCS); tmp |= PCS_CMD_RST; + tmp &= ~PCS_SELF_CLEAR; mw32(MVS_PCS, tmp); - /* interrupt coalescing may cause missing HW interrput in some case, - * and the max count is 0x1ff, while our max slot is 0x200, + /* + * the max count is 0x1ff, while our max slot is 0x200, * it will make count 0. */ tmp = 0; - mw32(MVS_INT_COAL, tmp); + if (MVS_CHIP_SLOT_SZ > 0x1ff) + mw32(MVS_INT_COAL, 0x1ff | COAL_EN); + else + mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN); - tmp = 0x100; + tmp = 0x10000 | interrupt_coalescing; mw32(MVS_INT_COAL_TMOUT, tmp); /* ladies and gentlemen, start your engines */ @@ -477,13 +471,11 @@ static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat) /* clear CMD_CMPLT ASAP */ mw32_f(MVS_INT_STAT, CINT_DONE); -#ifndef MVS_USE_TASKLET + spin_lock(&mvi->lock); -#endif mvs_int_full(mvi); -#ifndef MVS_USE_TASKLET spin_unlock(&mvi->lock); -#endif + return IRQ_HANDLED; } @@ -630,7 +622,6 @@ static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i) { u32 tmp; struct mvs_phy *phy = &mvi->phy[i]; - /* workaround for HW phy decoding error on 1.5g disk drive */ mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); tmp = mvs_read_port_vsr_data(mvi, i); if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> @@ -661,7 +652,7 @@ void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, tmp |= lrmax; } mvs_write_phy_ctl(mvi, phy_id, tmp); - mvs_64xx_phy_reset(mvi, phy_id, 1); + mvs_64xx_phy_reset(mvi, phy_id, MVS_HARD_RESET); } static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi) @@ -744,11 +735,13 @@ int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) return -1; } -#ifndef DISABLE_HOTPLUG_DMA_FIX -void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) +void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask, + int buf_len, int from, void *prd) { int i; struct mvs_prd *buf_prd = prd; + dma_addr_t buf_dma = mvi->bulk_buffer_dma; + buf_prd += from; for (i = 0; i < MAX_SG_ENTRY - from; i++) { buf_prd->addr = cpu_to_le64(buf_dma); @@ -756,7 +749,28 @@ void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) ++buf_prd; } } -#endif + +static void mvs_64xx_tune_interrupt(struct mvs_info *mvi, u32 time) +{ + void __iomem *regs = mvi->regs; + u32 tmp = 0; + /* + * the max count is 0x1ff, while our max slot is 0x200, + * it will make count 0. + */ + if (time == 0) { + mw32(MVS_INT_COAL, 0); + mw32(MVS_INT_COAL_TMOUT, 0x10000); + } else { + if (MVS_CHIP_SLOT_SZ > 0x1ff) + mw32(MVS_INT_COAL, 0x1ff|COAL_EN); + else + mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN); + + tmp = 0x10000 | time; + mw32(MVS_INT_COAL_TMOUT, tmp); + } +} const struct mvs_dispatch mvs_64xx_dispatch = { "mv64xx", @@ -780,7 +794,6 @@ const struct mvs_dispatch mvs_64xx_dispatch = { mvs_write_port_irq_stat, mvs_read_port_irq_mask, mvs_write_port_irq_mask, - mvs_get_sas_addr, mvs_64xx_command_active, mvs_64xx_clear_srs_irq, mvs_64xx_issue_stop, @@ -808,8 +821,8 @@ const struct mvs_dispatch mvs_64xx_dispatch = { mvs_64xx_spi_buildcmd, mvs_64xx_spi_issuecmd, mvs_64xx_spi_waitdataready, -#ifndef DISABLE_HOTPLUG_DMA_FIX mvs_64xx_fix_dma, -#endif + mvs_64xx_tune_interrupt, + NULL, }; diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c index 78162c3c36e..3501291618f 100644 --- a/drivers/scsi/mvsas/mv_94xx.c +++ b/drivers/scsi/mvsas/mv_94xx.c @@ -48,6 +48,216 @@ static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i) } } +void set_phy_tuning(struct mvs_info *mvi, int phy_id, + struct phy_tuning phy_tuning) +{ + u32 tmp, setting_0 = 0, setting_1 = 0; + u8 i; + + /* Remap information for B0 chip: + * + * R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient) + * R0Dh -> R118h[31:16] (Generation 1 Setting 0) + * R0Eh -> R11Ch[15:0] (Generation 1 Setting 1) + * R0Fh -> R11Ch[31:16] (Generation 2 Setting 0) + * R10h -> R120h[15:0] (Generation 2 Setting 1) + * R11h -> R120h[31:16] (Generation 3 Setting 0) + * R12h -> R124h[15:0] (Generation 3 Setting 1) + * R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved)) + */ + + /* A0 has a different set of registers */ + if (mvi->pdev->revision == VANIR_A0_REV) + return; + + for (i = 0; i < 3; i++) { + /* loop 3 times, set Gen 1, Gen 2, Gen 3 */ + switch (i) { + case 0: + setting_0 = GENERATION_1_SETTING; + setting_1 = GENERATION_1_2_SETTING; + break; + case 1: + setting_0 = GENERATION_1_2_SETTING; + setting_1 = GENERATION_2_3_SETTING; + break; + case 2: + setting_0 = GENERATION_2_3_SETTING; + setting_1 = GENERATION_3_4_SETTING; + break; + } + + /* Set: + * + * Transmitter Emphasis Enable + * Transmitter Emphasis Amplitude + * Transmitter Amplitude + */ + mvs_write_port_vsr_addr(mvi, phy_id, setting_0); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp &= ~(0xFBE << 16); + tmp |= (((phy_tuning.trans_emp_en << 11) | + (phy_tuning.trans_emp_amp << 7) | + (phy_tuning.trans_amp << 1)) << 16); + mvs_write_port_vsr_data(mvi, phy_id, tmp); + + /* Set Transmitter Amplitude Adjust */ + mvs_write_port_vsr_addr(mvi, phy_id, setting_1); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp &= ~(0xC000); + tmp |= (phy_tuning.trans_amp_adj << 14); + mvs_write_port_vsr_data(mvi, phy_id, tmp); + } +} + +void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id, + struct ffe_control ffe) +{ + u32 tmp; + + /* Don't run this if A0/B0 */ + if ((mvi->pdev->revision == VANIR_A0_REV) + || (mvi->pdev->revision == VANIR_B0_REV)) + return; + + /* FFE Resistor and Capacitor */ + /* R10Ch DFE Resolution Control/Squelch and FFE Setting + * + * FFE_FORCE [7] + * FFE_RES_SEL [6:4] + * FFE_CAP_SEL [3:0] + */ + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_FFE_CONTROL); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp &= ~0xFF; + + /* Read from HBA_Info_Page */ + tmp |= ((0x1 << 7) | + (ffe.ffe_rss_sel << 4) | + (ffe.ffe_cap_sel << 0)); + + mvs_write_port_vsr_data(mvi, phy_id, tmp); + + /* R064h PHY Mode Register 1 + * + * DFE_DIS 18 + */ + mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp &= ~0x40001; + /* Hard coding */ + /* No defines in HBA_Info_Page */ + tmp |= (0 << 18); + mvs_write_port_vsr_data(mvi, phy_id, tmp); + + /* R110h DFE F0-F1 Coefficient Control/DFE Update Control + * + * DFE_UPDATE_EN [11:6] + * DFE_FX_FORCE [5:0] + */ + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_DFE_UPDATE_CRTL); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp &= ~0xFFF; + /* Hard coding */ + /* No defines in HBA_Info_Page */ + tmp |= ((0x3F << 6) | (0x0 << 0)); + mvs_write_port_vsr_data(mvi, phy_id, tmp); + + /* R1A0h Interface and Digital Reference Clock Control/Reserved_50h + * + * FFE_TRAIN_EN 3 + */ + mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp &= ~0x8; + /* Hard coding */ + /* No defines in HBA_Info_Page */ + tmp |= (0 << 3); + mvs_write_port_vsr_data(mvi, phy_id, tmp); +} + +/*Notice: this function must be called when phy is disabled*/ +void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate) +{ + union reg_phy_cfg phy_cfg, phy_cfg_tmp; + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); + phy_cfg_tmp.v = mvs_read_port_vsr_data(mvi, phy_id); + phy_cfg.v = 0; + phy_cfg.u.disable_phy = phy_cfg_tmp.u.disable_phy; + phy_cfg.u.sas_support = 1; + phy_cfg.u.sata_support = 1; + phy_cfg.u.sata_host_mode = 1; + + switch (rate) { + case 0x0: + /* support 1.5 Gbps */ + phy_cfg.u.speed_support = 1; + phy_cfg.u.snw_3_support = 0; + phy_cfg.u.tx_lnk_parity = 1; + phy_cfg.u.tx_spt_phs_lnk_rate = 0x30; + break; + case 0x1: + + /* support 1.5, 3.0 Gbps */ + phy_cfg.u.speed_support = 3; + phy_cfg.u.tx_spt_phs_lnk_rate = 0x3c; + phy_cfg.u.tx_lgcl_lnk_rate = 0x08; + break; + case 0x2: + default: + /* support 1.5, 3.0, 6.0 Gbps */ + phy_cfg.u.speed_support = 7; + phy_cfg.u.snw_3_support = 1; + phy_cfg.u.tx_lnk_parity = 1; + phy_cfg.u.tx_spt_phs_lnk_rate = 0x3f; + phy_cfg.u.tx_lgcl_lnk_rate = 0x09; + break; + } + mvs_write_port_vsr_data(mvi, phy_id, phy_cfg.v); +} + +static void __devinit +mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id) +{ + u32 temp; + temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]); + if (temp == 0xFFFFFFFFL) { + mvi->hba_info_param.phy_tuning[phy_id].trans_emp_amp = 0x6; + mvi->hba_info_param.phy_tuning[phy_id].trans_amp = 0x1A; + mvi->hba_info_param.phy_tuning[phy_id].trans_amp_adj = 0x3; + } + + temp = (u8)(*(u8 *)&mvi->hba_info_param.ffe_ctl[phy_id]); + if (temp == 0xFFL) { + switch (mvi->pdev->revision) { + case VANIR_A0_REV: + case VANIR_B0_REV: + mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7; + mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0x7; + break; + case VANIR_C0_REV: + case VANIR_C1_REV: + case VANIR_C2_REV: + default: + mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7; + mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0xC; + break; + } + } + + temp = (u8)(*(u8 *)&mvi->hba_info_param.phy_rate[phy_id]); + if (temp == 0xFFL) + /*set default phy_rate = 6Gbps*/ + mvi->hba_info_param.phy_rate[phy_id] = 0x2; + + set_phy_tuning(mvi, phy_id, + mvi->hba_info_param.phy_tuning[phy_id]); + set_phy_ffe_tuning(mvi, phy_id, + mvi->hba_info_param.ffe_ctl[phy_id]); + set_phy_rate(mvi, phy_id, + mvi->hba_info_param.phy_rate[phy_id]); +} + static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id) { void __iomem *regs = mvi->regs; @@ -61,7 +271,14 @@ static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id) static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) { u32 tmp; - + u32 delay = 5000; + if (hard == MVS_PHY_TUNE) { + mvs_write_port_cfg_addr(mvi, phy_id, PHYR_SATA_CTL); + tmp = mvs_read_port_cfg_data(mvi, phy_id); + mvs_write_port_cfg_data(mvi, phy_id, tmp|0x20000000); + mvs_write_port_cfg_data(mvi, phy_id, tmp|0x100000); + return; + } tmp = mvs_read_port_irq_stat(mvi, phy_id); tmp &= ~PHYEV_RDY_CH; mvs_write_port_irq_stat(mvi, phy_id, tmp); @@ -71,12 +288,15 @@ static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) mvs_write_phy_ctl(mvi, phy_id, tmp); do { tmp = mvs_read_phy_ctl(mvi, phy_id); - } while (tmp & PHY_RST_HARD); + udelay(10); + delay--; + } while ((tmp & PHY_RST_HARD) && delay); + if (!delay) + mv_dprintk("phy hard reset failed.\n"); } else { - mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT); - tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp = mvs_read_phy_ctl(mvi, phy_id); tmp |= PHY_RST; - mvs_write_port_vsr_data(mvi, phy_id, tmp); + mvs_write_phy_ctl(mvi, phy_id, tmp); } } @@ -90,12 +310,25 @@ static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id) static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id) { - mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4); - mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1); - mvs_write_port_vsr_addr(mvi, phy_id, 0x104); - mvs_write_port_vsr_data(mvi, phy_id, 0x00018080); + u32 tmp; + u8 revision = 0; + + revision = mvi->pdev->revision; + if (revision == VANIR_A0_REV) { + mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA); + mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1); + } + if (revision == VANIR_B0_REV) { + mvs_write_port_vsr_addr(mvi, phy_id, CMD_APP_MEM_CTL); + mvs_write_port_vsr_data(mvi, phy_id, 0x08001006); + mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA); + mvs_write_port_vsr_data(mvi, phy_id, 0x0000705f); + } + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); - mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp |= bit(0); + mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff); } static int __devinit mvs_94xx_init(struct mvs_info *mvi) @@ -103,7 +336,9 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi) void __iomem *regs = mvi->regs; int i; u32 tmp, cctl; + u8 revision; + revision = mvi->pdev->revision; mvs_show_pcie_usage(mvi); if (mvi->flags & MVF_FLAG_SOC) { tmp = mr32(MVS_PHY_CTL); @@ -133,6 +368,28 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi) msleep(100); } + /* disable Multiplexing, enable phy implemented */ + mw32(MVS_PORTS_IMP, 0xFF); + + if (revision == VANIR_A0_REV) { + mw32(MVS_PA_VSR_ADDR, CMD_CMWK_OOB_DET); + mw32(MVS_PA_VSR_PORT, 0x00018080); + } + mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE2); + if (revision == VANIR_A0_REV || revision == VANIR_B0_REV) + /* set 6G/3G/1.5G, multiplexing, without SSC */ + mw32(MVS_PA_VSR_PORT, 0x0084d4fe); + else + /* set 6G/3G/1.5G, multiplexing, with and without SSC */ + mw32(MVS_PA_VSR_PORT, 0x0084fffe); + + if (revision == VANIR_B0_REV) { + mw32(MVS_PA_VSR_ADDR, CMD_APP_MEM_CTL); + mw32(MVS_PA_VSR_PORT, 0x08001006); + mw32(MVS_PA_VSR_ADDR, CMD_HOST_RD_DATA); + mw32(MVS_PA_VSR_PORT, 0x0000705f); + } + /* reset control */ mw32(MVS_PCS, 0); /* MVS_PCS */ mw32(MVS_STP_REG_SET_0, 0); @@ -141,17 +398,8 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi) /* init phys */ mvs_phy_hacks(mvi); - /* disable Multiplexing, enable phy implemented */ - mw32(MVS_PORTS_IMP, 0xFF); - - - mw32(MVS_PA_VSR_ADDR, 0x00000104); - mw32(MVS_PA_VSR_PORT, 0x00018080); - mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8); - mw32(MVS_PA_VSR_PORT, 0x0084ffff); - /* set LED blink when IO*/ - mw32(MVS_PA_VSR_ADDR, 0x00000030); + mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED); tmp = mr32(MVS_PA_VSR_PORT); tmp &= 0xFFFF00FF; tmp |= 0x00003300; @@ -175,12 +423,13 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi) mvs_94xx_phy_disable(mvi, i); /* set phy local SAS address */ mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4, - (mvi->phy[i].dev_sas_addr)); + cpu_to_le64(mvi->phy[i].dev_sas_addr)); mvs_94xx_enable_xmt(mvi, i); + mvs_94xx_config_reg_from_hba(mvi, i); mvs_94xx_phy_enable(mvi, i); - mvs_94xx_phy_reset(mvi, i, 1); + mvs_94xx_phy_reset(mvi, i, PHY_RST_HARD); msleep(500); mvs_94xx_detect_porttype(mvi, i); } @@ -211,16 +460,9 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi) mvs_update_phyinfo(mvi, i, 1); } - /* FIXME: update wide port bitmaps */ - /* little endian for open address and command table, etc. */ - /* - * it seems that ( from the spec ) turning on big-endian won't - * do us any good on big-endian machines, need further confirmation - */ cctl = mr32(MVS_CTL); cctl |= CCTL_ENDIAN_CMD; - cctl |= CCTL_ENDIAN_DATA; cctl &= ~CCTL_ENDIAN_OPEN; cctl |= CCTL_ENDIAN_RSP; mw32_f(MVS_CTL, cctl); @@ -228,15 +470,20 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi) /* reset CMD queue */ tmp = mr32(MVS_PCS); tmp |= PCS_CMD_RST; + tmp &= ~PCS_SELF_CLEAR; mw32(MVS_PCS, tmp); - /* interrupt coalescing may cause missing HW interrput in some case, - * and the max count is 0x1ff, while our max slot is 0x200, + /* + * the max count is 0x1ff, while our max slot is 0x200, * it will make count 0. */ tmp = 0; - mw32(MVS_INT_COAL, tmp); + if (MVS_CHIP_SLOT_SZ > 0x1ff) + mw32(MVS_INT_COAL, 0x1ff | COAL_EN); + else + mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN); - tmp = 0x100; + /* default interrupt coalescing time is 128us */ + tmp = 0x10000 | interrupt_coalescing; mw32(MVS_INT_COAL_TMOUT, tmp); /* ladies and gentlemen, start your engines */ @@ -249,7 +496,7 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi) /* enable completion queue interrupt */ tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | - CINT_DMA_PCIE); + CINT_DMA_PCIE | CINT_NON_SPEC_NCQ_ERROR); tmp |= CINT_PHY_MASK; mw32(MVS_INT_MASK, tmp); @@ -332,13 +579,10 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat) if (((stat & IRQ_SAS_A) && mvi->id == 0) || ((stat & IRQ_SAS_B) && mvi->id == 1)) { mw32_f(MVS_INT_STAT, CINT_DONE); - #ifndef MVS_USE_TASKLET + spin_lock(&mvi->lock); - #endif mvs_int_full(mvi); - #ifndef MVS_USE_TASKLET spin_unlock(&mvi->lock); - #endif } return IRQ_HANDLED; } @@ -346,10 +590,48 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat) static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) { u32 tmp; - mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32)); - do { - tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3)); - } while (tmp & 1 << (slot_idx % 32)); + tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3)); + if (tmp && 1 << (slot_idx % 32)) { + mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx); + mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3), + 1 << (slot_idx % 32)); + do { + tmp = mvs_cr32(mvi, + MVS_COMMAND_ACTIVE + (slot_idx >> 3)); + } while (tmp & 1 << (slot_idx % 32)); + } +} + +void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + if (clear_all) { + tmp = mr32(MVS_INT_STAT_SRS_0); + if (tmp) { + mv_dprintk("check SRS 0 %08X.\n", tmp); + mw32(MVS_INT_STAT_SRS_0, tmp); + } + tmp = mr32(MVS_INT_STAT_SRS_1); + if (tmp) { + mv_dprintk("check SRS 1 %08X.\n", tmp); + mw32(MVS_INT_STAT_SRS_1, tmp); + } + } else { + if (reg_set > 31) + tmp = mr32(MVS_INT_STAT_SRS_1); + else + tmp = mr32(MVS_INT_STAT_SRS_0); + + if (tmp & (1 << (reg_set % 32))) { + mv_dprintk("register set 0x%x was stopped.\n", reg_set); + if (reg_set > 31) + mw32(MVS_INT_STAT_SRS_1, 1 << (reg_set % 32)); + else + mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32)); + } + } } static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, @@ -357,37 +639,56 @@ static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, { void __iomem *regs = mvi->regs; u32 tmp; + mvs_94xx_clear_srs_irq(mvi, 0, 1); - if (type == PORT_TYPE_SATA) { - tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs); - mw32(MVS_INT_STAT_SRS_0, tmp); - } - mw32(MVS_INT_STAT, CINT_CI_STOP); + tmp = mr32(MVS_INT_STAT); + mw32(MVS_INT_STAT, tmp | CINT_CI_STOP); tmp = mr32(MVS_PCS) | 0xFF00; mw32(MVS_PCS, tmp); } +static void mvs_94xx_non_spec_ncq_error(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 err_0, err_1; + u8 i; + struct mvs_device *device; + + err_0 = mr32(MVS_NON_NCQ_ERR_0); + err_1 = mr32(MVS_NON_NCQ_ERR_1); + + mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n", + err_0, err_1); + for (i = 0; i < 32; i++) { + if (err_0 & bit(i)) { + device = mvs_find_dev_by_reg_set(mvi, i); + if (device) + mvs_release_task(mvi, device->sas_device); + } + if (err_1 & bit(i)) { + device = mvs_find_dev_by_reg_set(mvi, i+32); + if (device) + mvs_release_task(mvi, device->sas_device); + } + } + + mw32(MVS_NON_NCQ_ERR_0, err_0); + mw32(MVS_NON_NCQ_ERR_1, err_1); +} + static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) { void __iomem *regs = mvi->regs; - u32 tmp; u8 reg_set = *tfs; if (*tfs == MVS_ID_NOT_MAPPED) return; mvi->sata_reg_set &= ~bit(reg_set); - if (reg_set < 32) { + if (reg_set < 32) w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set); - tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set; - if (tmp) - mw32(MVS_INT_STAT_SRS_0, tmp); - } else { - w_reg_set_enable(reg_set, mvi->sata_reg_set); - tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set; - if (tmp) - mw32(MVS_INT_STAT_SRS_1, tmp); - } + else + w_reg_set_enable(reg_set, (u32)(mvi->sata_reg_set >> 32)); *tfs = MVS_ID_NOT_MAPPED; @@ -403,7 +704,7 @@ static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs) return 0; i = mv_ffc64(mvi->sata_reg_set); - if (i > 32) { + if (i >= 32) { mvi->sata_reg_set |= bit(i); w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32)); *tfs = i; @@ -422,9 +723,12 @@ static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd) int i; struct scatterlist *sg; struct mvs_prd *buf_prd = prd; + struct mvs_prd_imt im_len; + *(u32 *)&im_len = 0; for_each_sg(scatter, sg, nr, i) { buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg)); + im_len.len = sg_dma_len(sg); + buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len); buf_prd++; } } @@ -433,7 +737,7 @@ static int mvs_94xx_oob_done(struct mvs_info *mvi, int i) { u32 phy_st; phy_st = mvs_read_phy_ctl(mvi, i); - if (phy_st & PHY_READY_MASK) /* phy ready */ + if (phy_st & PHY_READY_MASK) return 1; return 0; } @@ -447,7 +751,7 @@ static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id, for (i = 0; i < 7; i++) { mvs_write_port_cfg_addr(mvi, port_id, CONFIG_ID_FRAME0 + i * 4); - id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); + id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id)); } memcpy(id, id_frame, 28); } @@ -458,15 +762,13 @@ static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id, int i; u32 id_frame[7]; - /* mvs_hexdump(28, (u8 *)id_frame, 0); */ for (i = 0; i < 7; i++) { mvs_write_port_cfg_addr(mvi, port_id, CONFIG_ATT_ID_FRAME0 + i * 4); - id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); + id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id)); mv_dprintk("94xx phy %d atta frame %d %x.\n", port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]); } - /* mvs_hexdump(28, (u8 *)id_frame, 0); */ memcpy(id, id_frame, 28); } @@ -526,7 +828,18 @@ static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i, void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, struct sas_phy_linkrates *rates) { - /* TODO */ + u32 lrmax = 0; + u32 tmp; + + tmp = mvs_read_phy_ctl(mvi, phy_id); + lrmax = (rates->maximum_linkrate - SAS_LINK_RATE_1_5_GBPS) << 12; + + if (lrmax) { + tmp &= ~(0x3 << 12); + tmp |= lrmax; + } + mvs_write_phy_ctl(mvi, phy_id, tmp); + mvs_94xx_phy_reset(mvi, phy_id, PHY_RST_HARD); } static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi) @@ -603,27 +916,59 @@ int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) return -1; } -#ifndef DISABLE_HOTPLUG_DMA_FIX -void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) +void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask, + int buf_len, int from, void *prd) { int i; struct mvs_prd *buf_prd = prd; + dma_addr_t buf_dma; + struct mvs_prd_imt im_len; + + *(u32 *)&im_len = 0; buf_prd += from; - for (i = 0; i < MAX_SG_ENTRY - from; i++) { - buf_prd->addr = cpu_to_le64(buf_dma); - buf_prd->im_len.len = cpu_to_le32(buf_len); - ++buf_prd; + +#define PRD_CHAINED_ENTRY 0x01 + if ((mvi->pdev->revision == VANIR_A0_REV) || + (mvi->pdev->revision == VANIR_B0_REV)) + buf_dma = (phy_mask <= 0x08) ? + mvi->bulk_buffer_dma : mvi->bulk_buffer_dma1; + else + return; + + for (i = from; i < MAX_SG_ENTRY; i++, ++buf_prd) { + if (i == MAX_SG_ENTRY - 1) { + buf_prd->addr = cpu_to_le64(virt_to_phys(buf_prd - 1)); + im_len.len = 2; + im_len.misc_ctl = PRD_CHAINED_ENTRY; + } else { + buf_prd->addr = cpu_to_le64(buf_dma); + im_len.len = buf_len; + } + buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len); } } -#endif -/* - * FIXME JEJB: temporary nop clear_srs_irq to make 94xx still work - * with 64xx fixes - */ -static void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, - u8 clear_all) +static void mvs_94xx_tune_interrupt(struct mvs_info *mvi, u32 time) { + void __iomem *regs = mvi->regs; + u32 tmp = 0; + /* + * the max count is 0x1ff, while our max slot is 0x200, + * it will make count 0. + */ + if (time == 0) { + mw32(MVS_INT_COAL, 0); + mw32(MVS_INT_COAL_TMOUT, 0x10000); + } else { + if (MVS_CHIP_SLOT_SZ > 0x1ff) + mw32(MVS_INT_COAL, 0x1ff|COAL_EN); + else + mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN); + + tmp = 0x10000 | time; + mw32(MVS_INT_COAL_TMOUT, tmp); + } + } const struct mvs_dispatch mvs_94xx_dispatch = { @@ -648,7 +993,6 @@ const struct mvs_dispatch mvs_94xx_dispatch = { mvs_write_port_irq_stat, mvs_read_port_irq_mask, mvs_write_port_irq_mask, - mvs_get_sas_addr, mvs_94xx_command_active, mvs_94xx_clear_srs_irq, mvs_94xx_issue_stop, @@ -676,8 +1020,8 @@ const struct mvs_dispatch mvs_94xx_dispatch = { mvs_94xx_spi_buildcmd, mvs_94xx_spi_issuecmd, mvs_94xx_spi_waitdataready, -#ifndef DISABLE_HOTPLUG_DMA_FIX mvs_94xx_fix_dma, -#endif + mvs_94xx_tune_interrupt, + mvs_94xx_non_spec_ncq_error, }; diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h index 8835befe2c0..8f7eb4f2114 100644 --- a/drivers/scsi/mvsas/mv_94xx.h +++ b/drivers/scsi/mvsas/mv_94xx.h @@ -30,6 +30,14 @@ #define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS +enum VANIR_REVISION_ID { + VANIR_A0_REV = 0xA0, + VANIR_B0_REV = 0x01, + VANIR_C0_REV = 0x02, + VANIR_C1_REV = 0x03, + VANIR_C2_REV = 0xC2, +}; + enum hw_registers { MVS_GBL_CTL = 0x04, /* global control */ MVS_GBL_INT_STAT = 0x00, /* global irq status */ @@ -101,6 +109,7 @@ enum hw_registers { MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */ MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */ MVS_PA_VSR_PORT = 0x294, /* All port VSR data */ + MVS_COMMAND_ACTIVE = 0x300, }; enum pci_cfg_registers { @@ -112,26 +121,29 @@ enum pci_cfg_registers { /* SAS/SATA Vendor Specific Port Registers */ enum sas_sata_vsp_regs { - VSR_PHY_STAT = 0x00 * 4, /* Phy Status */ - VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */ - VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */ - VSR_PHY_MODE3 = 0x03 * 4, /* pll */ - VSR_PHY_MODE4 = 0x04 * 4, /* VCO */ - VSR_PHY_MODE5 = 0x05 * 4, /* Rx */ - VSR_PHY_MODE6 = 0x06 * 4, /* CDR */ - VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */ - VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */ - VSR_PHY_MODE9 = 0x09 * 4, /* Test */ - VSR_PHY_MODE10 = 0x0A * 4, /* Power */ - VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */ - VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */ - VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */ + VSR_PHY_STAT = 0x00 * 4, /* Phy Interrupt Status */ + VSR_PHY_MODE1 = 0x01 * 4, /* phy Interrupt Enable */ + VSR_PHY_MODE2 = 0x02 * 4, /* Phy Configuration */ + VSR_PHY_MODE3 = 0x03 * 4, /* Phy Status */ + VSR_PHY_MODE4 = 0x04 * 4, /* Phy Counter 0 */ + VSR_PHY_MODE5 = 0x05 * 4, /* Phy Counter 1 */ + VSR_PHY_MODE6 = 0x06 * 4, /* Event Counter Control */ + VSR_PHY_MODE7 = 0x07 * 4, /* Event Counter Select */ + VSR_PHY_MODE8 = 0x08 * 4, /* Event Counter 0 */ + VSR_PHY_MODE9 = 0x09 * 4, /* Event Counter 1 */ + VSR_PHY_MODE10 = 0x0A * 4, /* Event Counter 2 */ + VSR_PHY_MODE11 = 0x0B * 4, /* Event Counter 3 */ + VSR_PHY_ACT_LED = 0x0C * 4, /* Activity LED control */ + + VSR_PHY_FFE_CONTROL = 0x10C, + VSR_PHY_DFE_UPDATE_CRTL = 0x110, + VSR_REF_CLOCK_CRTL = 0x1A0, }; enum chip_register_bits { PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), - PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12), + PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 12), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), PHY_NEG_SPP_PHYS_LINK_RATE_MASK = (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), }; @@ -169,22 +181,75 @@ enum pci_interrupt_cause { IRQ_PCIE_ERR = (1 << 31), }; +union reg_phy_cfg { + u32 v; + struct { + u32 phy_reset:1; + u32 sas_support:1; + u32 sata_support:1; + u32 sata_host_mode:1; + /* + * bit 2: 6Gbps support + * bit 1: 3Gbps support + * bit 0: 1.5Gbps support + */ + u32 speed_support:3; + u32 snw_3_support:1; + u32 tx_lnk_parity:1; + /* + * bit 5: G1 (1.5Gbps) Without SSC + * bit 4: G1 (1.5Gbps) with SSC + * bit 3: G2 (3.0Gbps) Without SSC + * bit 2: G2 (3.0Gbps) with SSC + * bit 1: G3 (6.0Gbps) without SSC + * bit 0: G3 (6.0Gbps) with SSC + */ + u32 tx_spt_phs_lnk_rate:6; + /* 8h: 1.5Gbps 9h: 3Gbps Ah: 6Gbps */ + u32 tx_lgcl_lnk_rate:4; + u32 tx_ssc_type:1; + u32 sata_spin_up_spt:1; + u32 sata_spin_up_en:1; + u32 bypass_oob:1; + u32 disable_phy:1; + u32 rsvd:8; + } u; +}; + #define MAX_SG_ENTRY 255 struct mvs_prd_imt { +#ifndef __BIG_ENDIAN __le32 len:22; u8 _r_a:2; u8 misc_ctl:4; u8 inter_sel:4; +#else + u32 inter_sel:4; + u32 misc_ctl:4; + u32 _r_a:2; + u32 len:22; +#endif }; struct mvs_prd { /* 64-bit buffer address */ __le64 addr; /* 22-bit length */ - struct mvs_prd_imt im_len; + __le32 im_len; } __attribute__ ((packed)); +/* + * these registers are accessed through port vendor + * specific address/data registers + */ +enum sas_sata_phy_regs { + GENERATION_1_SETTING = 0x118, + GENERATION_1_2_SETTING = 0x11C, + GENERATION_2_3_SETTING = 0x120, + GENERATION_3_4_SETTING = 0x124, +}; + #define SPI_CTRL_REG_94XX 0xc800 #define SPI_ADDR_REG_94XX 0xc804 #define SPI_WR_DATA_REG_94XX 0xc808 diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h index 1753a6fc42d..bcc408042ce 100644 --- a/drivers/scsi/mvsas/mv_chips.h +++ b/drivers/scsi/mvsas/mv_chips.h @@ -164,7 +164,6 @@ static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi) { u32 tmp; - /* workaround for SATA R-ERR, to ignore phy glitch */ tmp = mvs_cr32(mvi, CMD_PHY_TIMER); tmp &= ~(1 << 9); tmp |= (1 << 10); @@ -179,23 +178,10 @@ static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi) tmp |= 0x3fff; mvs_cw32(mvi, CMD_SAS_CTL0, tmp); - /* workaround for WDTIMEOUT , set to 550 ms */ mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000); /* not to halt for different port op during wideport link change */ mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d); - - /* workaround for Seagate disk not-found OOB sequence, recv - * COMINIT before sending out COMWAKE */ - tmp = mvs_cr32(mvi, CMD_PHY_MODE_21); - tmp &= 0x0000ffff; - tmp |= 0x00fa0000; - mvs_cw32(mvi, CMD_PHY_MODE_21, tmp); - - tmp = mvs_cr32(mvi, CMD_PHY_TIMER); - tmp &= 0x1fffffff; - tmp |= (2U << 29); /* 8 ms retry */ - mvs_cw32(mvi, CMD_PHY_TIMER, tmp); } static inline void mvs_int_sata(struct mvs_info *mvi) @@ -223,6 +209,9 @@ static inline void mvs_int_full(struct mvs_info *mvi) mvs_int_port(mvi, i, tmp); } + if (stat & CINT_NON_SPEC_NCQ_ERROR) + MVS_CHIP_DISP->non_spec_ncq_error(mvi); + if (stat & CINT_SRS) mvs_int_sata(mvi); diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h index bc00c940743..dec7cadb748 100644 --- a/drivers/scsi/mvsas/mv_defs.h +++ b/drivers/scsi/mvsas/mv_defs.h @@ -43,7 +43,6 @@ enum chip_flavors { /* driver compile-time configuration */ enum driver_configuration { - MVS_SLOTS = 512, /* command slots */ MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ /* software requires power-of-2 @@ -56,8 +55,7 @@ enum driver_configuration { MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ MVS_OAF_SZ = 64, /* Open address frame buffer size */ - MVS_QUEUE_SIZE = 32, /* Support Queue depth */ - MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */ + MVS_QUEUE_SIZE = 64, /* Support Queue depth */ MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2, }; @@ -144,6 +142,7 @@ enum hw_register_bits { CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ CINT_MEM = (1U << 26), /* int mem parity err */ CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ + CINT_NON_SPEC_NCQ_ERROR = (1U << 25), /* Non specific NCQ error */ CINT_SRS = (1U << 3), /* SRS event */ CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ CINT_DONE = (1U << 0), /* cmd completion */ @@ -161,7 +160,7 @@ enum hw_register_bits { TXQ_CMD_SSP = 1, /* SSP protocol */ TXQ_CMD_SMP = 2, /* SMP protocol */ TXQ_CMD_STP = 3, /* STP/SATA protocol */ - TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ + TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP target free list */ TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ TXQ_MODE_TARGET = 0, @@ -391,15 +390,15 @@ enum sas_cmd_port_registers { }; enum mvs_info_flags { - MVF_MSI = (1U << 0), /* MSI is enabled */ MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */ }; enum mvs_event_flags { - PHY_PLUG_EVENT = (3U), + PHY_PLUG_EVENT = (3U), PHY_PLUG_IN = (1U << 0), /* phy plug in */ PHY_PLUG_OUT = (1U << 1), /* phy plug out */ + EXP_BRCT_CHG = (1U << 2), /* broadcast change */ }; enum mvs_port_type { diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 90b636611cd..4e9af66fd1d 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -34,22 +34,25 @@ MODULE_PARM_DESC(collector, "\n" "\tThe mvsas SAS LLDD supports both modes.\n" "\tDefault: 1 (Direct Mode).\n"); +int interrupt_coalescing = 0x80; + static struct scsi_transport_template *mvs_stt; struct kmem_cache *mvs_task_list_cache; static const struct mvs_chip_info mvs_chips[] = { - [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, - [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, - [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, }, - [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, - [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, - [chip_9445] = { 1, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, }, - [chip_9485] = { 2, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, }, - [chip_1300] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, - [chip_1320] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, + [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, + [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, + [chip_6485] = { 1, 8, 0x800, 33, 32, 6, 10, &mvs_64xx_dispatch, }, + [chip_9180] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, + [chip_9480] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, + [chip_9445] = { 1, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, }, + [chip_9485] = { 2, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, }, + [chip_1300] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, + [chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, }; +struct device_attribute *mvst_host_attrs[]; + #define SOC_SAS_NUM 2 -#define SG_MX 64 static struct scsi_host_template mvs_sht = { .module = THIS_MODULE, @@ -66,7 +69,7 @@ static struct scsi_host_template mvs_sht = { .can_queue = 1, .cmd_per_lun = 1, .this_id = -1, - .sg_tablesize = SG_MX, + .sg_tablesize = SG_ALL, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = sas_eh_device_reset_handler, @@ -74,6 +77,7 @@ static struct scsi_host_template mvs_sht = { .slave_alloc = mvs_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, + .shost_attrs = mvst_host_attrs, }; static struct sas_domain_function_template mvs_transport_ops = { @@ -100,6 +104,7 @@ static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) struct asd_sas_phy *sas_phy = &phy->sas_phy; phy->mvi = mvi; + phy->port = NULL; init_timer(&phy->timer); sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; sas_phy->class = SAS; @@ -128,7 +133,7 @@ static void mvs_free(struct mvs_info *mvi) if (mvi->flags & MVF_FLAG_SOC) slot_nr = MVS_SOC_SLOTS; else - slot_nr = MVS_SLOTS; + slot_nr = MVS_CHIP_SLOT_SZ; if (mvi->dma_pool) pci_pool_destroy(mvi->dma_pool); @@ -148,25 +153,26 @@ static void mvs_free(struct mvs_info *mvi) dma_free_coherent(mvi->dev, sizeof(*mvi->slot) * slot_nr, mvi->slot, mvi->slot_dma); -#ifndef DISABLE_HOTPLUG_DMA_FIX + if (mvi->bulk_buffer) dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, mvi->bulk_buffer, mvi->bulk_buffer_dma); -#endif + if (mvi->bulk_buffer1) + dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, + mvi->bulk_buffer1, mvi->bulk_buffer_dma1); MVS_CHIP_DISP->chip_iounmap(mvi); if (mvi->shost) scsi_host_put(mvi->shost); list_for_each_entry(mwq, &mvi->wq_list, entry) cancel_delayed_work(&mwq->work_q); + kfree(mvi->tags); kfree(mvi); } -#ifdef MVS_USE_TASKLET -struct tasklet_struct mv_tasklet; +#ifdef CONFIG_SCSI_MVSAS_TASKLET static void mvs_tasklet(unsigned long opaque) { - unsigned long flags; u32 stat; u16 core_nr, i = 0; @@ -179,35 +185,49 @@ static void mvs_tasklet(unsigned long opaque) if (unlikely(!mvi)) BUG_ON(1); + stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq); + if (!stat) + goto out; + for (i = 0; i < core_nr; i++) { mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; - stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq); - if (stat) - MVS_CHIP_DISP->isr(mvi, mvi->irq, stat); + MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat); } +out: + MVS_CHIP_DISP->interrupt_enable(mvi); } #endif static irqreturn_t mvs_interrupt(int irq, void *opaque) { - u32 core_nr, i = 0; + u32 core_nr; u32 stat; struct mvs_info *mvi; struct sas_ha_struct *sha = opaque; +#ifndef CONFIG_SCSI_MVSAS_TASKLET + u32 i; +#endif core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; if (unlikely(!mvi)) return IRQ_NONE; +#ifdef CONFIG_SCSI_MVSAS_TASKLET + MVS_CHIP_DISP->interrupt_disable(mvi); +#endif stat = MVS_CHIP_DISP->isr_status(mvi, irq); - if (!stat) + if (!stat) { + #ifdef CONFIG_SCSI_MVSAS_TASKLET + MVS_CHIP_DISP->interrupt_enable(mvi); + #endif return IRQ_NONE; + } -#ifdef MVS_USE_TASKLET - tasklet_schedule(&mv_tasklet); +#ifdef CONFIG_SCSI_MVSAS_TASKLET + tasklet_schedule(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet); #else for (i = 0; i < core_nr; i++) { mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; @@ -225,7 +245,7 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) if (mvi->flags & MVF_FLAG_SOC) slot_nr = MVS_SOC_SLOTS; else - slot_nr = MVS_SLOTS; + slot_nr = MVS_CHIP_SLOT_SZ; spin_lock_init(&mvi->lock); for (i = 0; i < mvi->chip->n_phy; i++) { @@ -273,13 +293,18 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) goto err_out; memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr); -#ifndef DISABLE_HOTPLUG_DMA_FIX mvi->bulk_buffer = dma_alloc_coherent(mvi->dev, TRASH_BUCKET_SIZE, &mvi->bulk_buffer_dma, GFP_KERNEL); if (!mvi->bulk_buffer) goto err_out; -#endif + + mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev, + TRASH_BUCKET_SIZE, + &mvi->bulk_buffer_dma1, GFP_KERNEL); + if (!mvi->bulk_buffer1) + goto err_out; + sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id); mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0); if (!mvi->dma_pool) { @@ -354,11 +379,12 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev, const struct pci_device_id *ent, struct Scsi_Host *shost, unsigned int id) { - struct mvs_info *mvi; + struct mvs_info *mvi = NULL; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); - mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info), - GFP_KERNEL); + mvi = kzalloc(sizeof(*mvi) + + (1L << mvs_chips[ent->driver_data].slot_width) * + sizeof(struct mvs_slot_info), GFP_KERNEL); if (!mvi) return NULL; @@ -367,7 +393,6 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev, mvi->chip_id = ent->driver_data; mvi->chip = &mvs_chips[mvi->chip_id]; INIT_LIST_HEAD(&mvi->wq_list); - mvi->irq = pdev->irq; ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi; ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy; @@ -375,9 +400,10 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev, mvi->id = id; mvi->sas = sha; mvi->shost = shost; -#ifdef MVS_USE_TASKLET - tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha); -#endif + + mvi->tags = kzalloc(MVS_CHIP_SLOT_SZ>>3, GFP_KERNEL); + if (!mvi->tags) + goto err_out; if (MVS_CHIP_DISP->chip_ioremap(mvi)) goto err_out; @@ -388,7 +414,6 @@ err_out: return NULL; } -/* move to PCI layer or libata core? */ static int pci_go_64(struct pci_dev *pdev) { int rc; @@ -450,7 +475,7 @@ static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost, ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr; shost->transportt = mvs_stt; - shost->max_id = 128; + shost->max_id = MVS_MAX_DEVICES; shost->max_lun = ~0; shost->max_channel = 1; shost->max_cmd_len = 16; @@ -493,11 +518,12 @@ static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost, if (mvi->flags & MVF_FLAG_SOC) can_queue = MVS_SOC_CAN_QUEUE; else - can_queue = MVS_CAN_QUEUE; + can_queue = MVS_CHIP_SLOT_SZ; sha->lldd_queue_size = can_queue; + shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG); shost->can_queue = can_queue; - mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys; + mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE; sha->core.shost = mvi->shost; } @@ -518,6 +544,7 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev, { unsigned int rc, nhost = 0; struct mvs_info *mvi; + struct mvs_prv_info *mpi; irq_handler_t irq_handler = mvs_interrupt; struct Scsi_Host *shost = NULL; const struct mvs_chip_info *chip; @@ -569,6 +596,9 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev, goto err_out_regions; } + memset(&mvi->hba_info_param, 0xFF, + sizeof(struct hba_info_page)); + mvs_init_sas_add(mvi); mvi->instance = nhost; @@ -579,8 +609,9 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev, } nhost++; } while (nhost < chip->n_host); -#ifdef MVS_USE_TASKLET - tasklet_init(&mv_tasklet, mvs_tasklet, + mpi = (struct mvs_prv_info *)(SHOST_TO_SAS_HA(shost)->lldd_ha); +#ifdef CONFIG_SCSI_MVSAS_TASKLET + tasklet_init(&(mpi->mv_tasklet), mvs_tasklet, (unsigned long)SHOST_TO_SAS_HA(shost)); #endif @@ -625,8 +656,8 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev) core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; -#ifdef MVS_USE_TASKLET - tasklet_kill(&mv_tasklet); +#ifdef CONFIG_SCSI_MVSAS_TASKLET + tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet); #endif pci_set_drvdata(pdev, NULL); @@ -635,7 +666,7 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev) scsi_remove_host(mvi->shost); MVS_CHIP_DISP->interrupt_disable(mvi); - free_irq(mvi->irq, sha); + free_irq(mvi->pdev->irq, sha); for (i = 0; i < core_nr; i++) { mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; mvs_free(mvi); @@ -703,6 +734,70 @@ static struct pci_driver mvs_pci_driver = { .remove = __devexit_p(mvs_pci_remove), }; +static ssize_t +mvs_show_driver_version(struct device *cdev, + struct device_attribute *attr, char *buffer) +{ + return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION); +} + +static DEVICE_ATTR(driver_version, + S_IRUGO, + mvs_show_driver_version, + NULL); + +static ssize_t +mvs_store_interrupt_coalescing(struct device *cdev, + struct device_attribute *attr, + const char *buffer, size_t size) +{ + int val = 0; + struct mvs_info *mvi = NULL; + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + u8 i, core_nr; + if (buffer == NULL) + return size; + + if (sscanf(buffer, "%d", &val) != 1) + return -EINVAL; + + if (val >= 0x10000) { + mv_dprintk("interrupt coalescing timer %d us is" + "too long\n", val); + return strlen(buffer); + } + + interrupt_coalescing = val; + + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; + + if (unlikely(!mvi)) + return -EINVAL; + + for (i = 0; i < core_nr; i++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; + if (MVS_CHIP_DISP->tune_interrupt) + MVS_CHIP_DISP->tune_interrupt(mvi, + interrupt_coalescing); + } + mv_dprintk("set interrupt coalescing time to %d us\n", + interrupt_coalescing); + return strlen(buffer); +} + +static ssize_t mvs_show_interrupt_coalescing(struct device *cdev, + struct device_attribute *attr, char *buffer) +{ + return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing); +} + +static DEVICE_ATTR(interrupt_coalescing, + S_IRUGO|S_IWUSR, + mvs_show_interrupt_coalescing, + mvs_store_interrupt_coalescing); + /* task handler */ struct task_struct *mvs_th; static int __init mvs_init(void) @@ -739,6 +834,12 @@ static void __exit mvs_exit(void) kmem_cache_destroy(mvs_task_list_cache); } +struct device_attribute *mvst_host_attrs[] = { + &dev_attr_driver_version, + &dev_attr_interrupt_coalescing, + NULL, +}; + module_init(mvs_init); module_exit(mvs_exit); diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 0ef27425c44..4958fefff36 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -38,7 +38,7 @@ static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) void mvs_tag_clear(struct mvs_info *mvi, u32 tag) { - void *bitmap = &mvi->tags; + void *bitmap = mvi->tags; clear_bit(tag, bitmap); } @@ -49,14 +49,14 @@ void mvs_tag_free(struct mvs_info *mvi, u32 tag) void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) { - void *bitmap = &mvi->tags; + void *bitmap = mvi->tags; set_bit(tag, bitmap); } inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) { unsigned int index, tag; - void *bitmap = &mvi->tags; + void *bitmap = mvi->tags; index = find_first_zero_bit(bitmap, mvi->tags_num); tag = index; @@ -74,126 +74,6 @@ void mvs_tag_init(struct mvs_info *mvi) mvs_tag_clear(mvi, i); } -void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) -{ - u32 i; - u32 run; - u32 offset; - - offset = 0; - while (size) { - printk(KERN_DEBUG"%08X : ", baseaddr + offset); - if (size >= 16) - run = 16; - else - run = size; - size -= run; - for (i = 0; i < 16; i++) { - if (i < run) - printk(KERN_DEBUG"%02X ", (u32)data[i]); - else - printk(KERN_DEBUG" "); - } - printk(KERN_DEBUG": "); - for (i = 0; i < run; i++) - printk(KERN_DEBUG"%c", - isalnum(data[i]) ? data[i] : '.'); - printk(KERN_DEBUG"\n"); - data = &data[16]; - offset += run; - } - printk(KERN_DEBUG"\n"); -} - -#if (_MV_DUMP > 1) -static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, - enum sas_protocol proto) -{ - u32 offset; - struct mvs_slot_info *slot = &mvi->slot_info[tag]; - - offset = slot->cmd_size + MVS_OAF_SZ + - MVS_CHIP_DISP->prd_size() * slot->n_elem; - dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n", - tag); - mvs_hexdump(32, (u8 *) slot->response, - (u32) slot->buf_dma + offset); -} -#endif - -static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, - enum sas_protocol proto) -{ -#if (_MV_DUMP > 1) - u32 sz, w_ptr; - u64 addr; - struct mvs_slot_info *slot = &mvi->slot_info[tag]; - - /*Delivery Queue */ - sz = MVS_CHIP_SLOT_SZ; - w_ptr = slot->tx; - addr = mvi->tx_dma; - dev_printk(KERN_DEBUG, mvi->dev, - "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr); - dev_printk(KERN_DEBUG, mvi->dev, - "Delivery Queue Base Address=0x%llX (PA)" - "(tx_dma=0x%llX), Entry=%04d\n", - addr, (unsigned long long)mvi->tx_dma, w_ptr); - mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), - (u32) mvi->tx_dma + sizeof(u32) * w_ptr); - /*Command List */ - addr = mvi->slot_dma; - dev_printk(KERN_DEBUG, mvi->dev, - "Command List Base Address=0x%llX (PA)" - "(slot_dma=0x%llX), Header=%03d\n", - addr, (unsigned long long)slot->buf_dma, tag); - dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag); - /*mvs_cmd_hdr */ - mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), - (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr)); - /*1.command table area */ - dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n"); - mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma); - /*2.open address frame area */ - dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n"); - mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size, - (u32) slot->buf_dma + slot->cmd_size); - /*3.status buffer */ - mvs_hba_sb_dump(mvi, tag, proto); - /*4.PRD table */ - dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n"); - mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem, - (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ, - (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ); -#endif -} - -static void mvs_hba_cq_dump(struct mvs_info *mvi) -{ -#if (_MV_DUMP > 2) - u64 addr; - void __iomem *regs = mvi->regs; - u32 entry = mvi->rx_cons + 1; - u32 rx_desc = le32_to_cpu(mvi->rx[entry]); - - /*Completion Queue */ - addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); - dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n", - mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); - dev_printk(KERN_DEBUG, mvi->dev, - "Completion List Base Address=0x%llX (PA), " - "CQ_Entry=%04d, CQ_WP=0x%08X\n", - addr, entry - 1, mvi->rx[0]); - mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc), - mvi->rx_dma + sizeof(u32) * entry); -#endif -} - -void mvs_get_sas_addr(void *buf, u32 buflen) -{ - /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/ -} - struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) { unsigned long i = 0, j = 0, hi = 0; @@ -222,7 +102,6 @@ struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) } -/* FIXME */ int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) { unsigned long i = 0, j = 0, n = 0, num = 0; @@ -253,6 +132,20 @@ int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) return num; } +struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, + u8 reg_set) +{ + u32 dev_no; + for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) { + if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED) + continue; + + if (mvi->devices[dev_no].taskfileset == reg_set) + return &mvi->devices[dev_no]; + } + return NULL; +} + static inline void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_device *dev) { @@ -283,7 +176,6 @@ void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard) } } -/* FIXME: locking? */ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata) { @@ -309,12 +201,12 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); if (tmp & PHY_RST_HARD) break; - MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1); + MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET); break; case PHY_FUNC_LINK_RESET: MVS_CHIP_DISP->phy_enable(mvi, phy_id); - MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0); + MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET); break; case PHY_FUNC_DISABLE: @@ -406,14 +298,10 @@ int mvs_slave_configure(struct scsi_device *sdev) if (ret) return ret; - if (dev_is_sata(dev)) { - /* may set PIO mode */ - #if MV_DISABLE_NCQ - struct ata_port *ap = dev->sata_dev.ap; - struct ata_device *adev = ap->link.device; - adev->flags |= ATA_DFLAG_NCQ_OFF; - scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); - #endif + if (!dev_is_sata(dev)) { + sas_change_queue_depth(sdev, + MVS_QUEUE_SIZE, + SCSI_QDEPTH_DEFAULT); } return 0; } @@ -424,6 +312,7 @@ void mvs_scan_start(struct Scsi_Host *shost) unsigned short core_nr; struct mvs_info *mvi; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct mvs_prv_info *mvs_prv = sha->lldd_ha; core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; @@ -432,15 +321,17 @@ void mvs_scan_start(struct Scsi_Host *shost) for (i = 0; i < mvi->chip->n_phy; ++i) mvs_bytes_dmaed(mvi, i); } + mvs_prv->scan_finished = 1; } int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) { - /* give the phy enabling interrupt event time to come in (1s - * is empirically about all it takes) */ - if (time < HZ) + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct mvs_prv_info *mvs_prv = sha->lldd_ha; + + if (mvs_prv->scan_finished == 0) return 0; - /* Wait for discovery to finish */ + scsi_flush_work(shost); return 1; } @@ -461,10 +352,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, void *buf_prd; struct mvs_slot_info *slot = &mvi->slot_info[tag]; u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); -#if _MV_DUMP - u8 *buf_cmd; - void *from; -#endif + /* * DMA-map SMP request, response buffers */ @@ -496,15 +384,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, buf_tmp = slot->buf; buf_tmp_dma = slot->buf_dma; -#if _MV_DUMP - buf_cmd = buf_tmp; - hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); - buf_tmp += req_len; - buf_tmp_dma += req_len; - slot->cmd_size = req_len; -#else hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); -#endif /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ buf_oaf = buf_tmp; @@ -553,12 +433,6 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, /* fill in PRD (scatter/gather) table, if any */ MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); -#if _MV_DUMP - /* copy cmd table */ - from = kmap_atomic(sg_page(sg_req), KM_IRQ0); - memcpy(buf_cmd, from + sg_req->offset, req_len); - kunmap_atomic(from, KM_IRQ0); -#endif return 0; err_out_2: @@ -616,14 +490,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, (mvi_dev->taskfileset << TXQ_SRS_SHIFT); mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); -#ifndef DISABLE_HOTPLUG_DMA_FIX if (task->data_dir == DMA_FROM_DEVICE) flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); else flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); -#else - flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); -#endif + if (task->ata_task.use_ncq) flags |= MCH_FPDMA; if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { @@ -631,11 +502,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, flags |= MCH_ATAPI; } - /* FIXME: fill in port multiplier number */ - hdr->flags = cpu_to_le32(flags); - /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); else @@ -657,9 +525,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, buf_tmp += MVS_ATA_CMD_SZ; buf_tmp_dma += MVS_ATA_CMD_SZ; -#if _MV_DUMP - slot->cmd_size = MVS_ATA_CMD_SZ; -#endif /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ /* used for STP. unused for SATA? */ @@ -682,9 +547,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, buf_tmp_dma += i; /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ - /* FIXME: probably unused, for SATA. kept here just in case - * we get a STP/SATA error information record - */ slot->response = buf_tmp; hdr->status_buf = cpu_to_le64(buf_tmp_dma); if (mvi->flags & MVF_FLAG_SOC) @@ -715,11 +577,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, /* fill in PRD (scatter/gather) table, if any */ MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); -#ifndef DISABLE_HOTPLUG_DMA_FIX + if (task->data_dir == DMA_FROM_DEVICE) - MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma, + MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask, TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); -#endif + return 0; } @@ -761,6 +623,9 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, } if (is_tmf) flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT); + else + flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT); + hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); hdr->tags = cpu_to_le32(tag); hdr->data_len = cpu_to_le32(task->total_xfer_len); @@ -777,9 +642,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, buf_tmp += MVS_SSP_CMD_SZ; buf_tmp_dma += MVS_SSP_CMD_SZ; -#if _MV_DUMP - slot->cmd_size = MVS_SSP_CMD_SZ; -#endif /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ buf_oaf = buf_tmp; @@ -986,7 +848,6 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf task->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock(&task->task_state_lock); - mvs_hba_memory_dump(mvi, tag, task->task_proto); mvi_dev->running_req++; ++(*pass); mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); @@ -1189,9 +1050,9 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, mvs_slot_free(mvi, slot_idx); } -static void mvs_update_wideport(struct mvs_info *mvi, int i) +static void mvs_update_wideport(struct mvs_info *mvi, int phy_no) { - struct mvs_phy *phy = &mvi->phy[i]; + struct mvs_phy *phy = &mvi->phy[phy_no]; struct mvs_port *port = phy->port; int j, no; @@ -1246,18 +1107,17 @@ static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) return NULL; MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); - s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); + s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); - s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); + s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); - s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); + s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); - s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); + s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); - /* Workaround: take some ATAPI devices for ATA */ if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); @@ -1269,6 +1129,13 @@ static u32 mvs_is_sig_fis_received(u32 irq_status) return irq_status & PHYEV_SIG_FIS; } +static void mvs_sig_remove_timer(struct mvs_phy *phy) +{ + if (phy->timer.function) + del_timer(&phy->timer); + phy->timer.function = NULL; +} + void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) { struct mvs_phy *phy = &mvi->phy[i]; @@ -1291,6 +1158,7 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) if (phy->phy_type & PORT_TYPE_SATA) { phy->identify.target_port_protocols = SAS_PROTOCOL_STP; if (mvs_is_sig_fis_received(phy->irq_status)) { + mvs_sig_remove_timer(phy); phy->phy_attached = 1; phy->att_dev_sas_addr = i + mvi->id * mvi->chip->n_phy; @@ -1308,7 +1176,6 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) tmp | PHYEV_SIG_FIS); phy->phy_attached = 0; phy->phy_type &= ~PORT_TYPE_SATA; - MVS_CHIP_DISP->phy_reset(mvi, i, 0); goto out_done; } } else if (phy->phy_type & PORT_TYPE_SAS @@ -1334,9 +1201,9 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) if (MVS_CHIP_DISP->phy_work_around) MVS_CHIP_DISP->phy_work_around(mvi, i); } - mv_dprintk("port %d attach dev info is %x\n", + mv_dprintk("phy %d attach dev info is %x\n", i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); - mv_dprintk("port %d attach sas addr is %llx\n", + mv_dprintk("phy %d attach sas addr is %llx\n", i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); out_done: if (get_st) @@ -1361,10 +1228,10 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock) } hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; - if (sas_port->id >= mvi->chip->n_phy) - port = &mvi->port[sas_port->id - mvi->chip->n_phy]; + if (i >= mvi->chip->n_phy) + port = &mvi->port[i - mvi->chip->n_phy]; else - port = &mvi->port[sas_port->id]; + port = &mvi->port[i]; if (lock) spin_lock_irqsave(&mvi->lock, flags); port->port_attached = 1; @@ -1393,7 +1260,7 @@ static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock) return; } list_for_each_entry(dev, &port->dev_list, dev_list_node) - mvs_do_release_task(phy->mvi, phy_no, NULL); + mvs_do_release_task(phy->mvi, phy_no, dev); } @@ -1457,6 +1324,7 @@ int mvs_dev_found_notify(struct domain_device *dev, int lock) mvi_device->dev_status = MVS_DEV_NORMAL; mvi_device->dev_type = dev->dev_type; mvi_device->mvi_info = mvi; + mvi_device->sas_device = dev; if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { int phy_id; u8 phy_num = parent_dev->ex_dev.num_phys; @@ -1508,6 +1376,7 @@ void mvs_dev_gone_notify(struct domain_device *dev) mv_dprintk("found dev has gone.\n"); } dev->lldd_dev = NULL; + mvi_dev->sas_device = NULL; spin_unlock_irqrestore(&mvi->lock, flags); } @@ -1555,7 +1424,6 @@ static void mvs_tmf_timedout(unsigned long data) complete(&task->completion); } -/* XXX */ #define MVS_TASK_TIMEOUT 20 static int mvs_exec_internal_tmf_task(struct domain_device *dev, void *parameter, u32 para_len, struct mvs_tmf_task *tmf) @@ -1588,7 +1456,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev, } wait_for_completion(&task->completion); - res = -TMF_RESP_FUNC_FAILED; + res = TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { @@ -1638,11 +1506,10 @@ static int mvs_debug_issue_ssp_tmf(struct domain_device *dev, u8 *lun, struct mvs_tmf_task *tmf) { struct sas_ssp_task ssp_task; - DECLARE_COMPLETION_ONSTACK(completion); if (!(dev->tproto & SAS_PROTOCOL_SSP)) return TMF_RESP_FUNC_ESUPP; - strncpy((u8 *)&ssp_task.LUN, lun, 8); + memcpy(ssp_task.LUN, lun, 8); return mvs_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task), tmf); @@ -1666,7 +1533,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) int mvs_lu_reset(struct domain_device *dev, u8 *lun) { unsigned long flags; - int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; + int rc = TMF_RESP_FUNC_FAILED; struct mvs_tmf_task tmf_task; struct mvs_device * mvi_dev = dev->lldd_dev; struct mvs_info *mvi = mvi_dev->mvi_info; @@ -1675,10 +1542,8 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun) mvi_dev->dev_status = MVS_DEV_EH; rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); if (rc == TMF_RESP_FUNC_COMPLETE) { - num = mvs_find_dev_phyno(dev, phyno); spin_lock_irqsave(&mvi->lock, flags); - for (i = 0; i < num; i++) - mvs_release_task(mvi, dev); + mvs_release_task(mvi, dev); spin_unlock_irqrestore(&mvi->lock, flags); } /* If failed, fall-through I_T_Nexus reset */ @@ -1696,11 +1561,12 @@ int mvs_I_T_nexus_reset(struct domain_device *dev) if (mvi_dev->dev_status != MVS_DEV_EH) return TMF_RESP_FUNC_COMPLETE; + else + mvi_dev->dev_status = MVS_DEV_NORMAL; rc = mvs_debug_I_T_nexus_reset(dev); mv_printk("%s for device[%x]:rc= %d\n", __func__, mvi_dev->device_id, rc); - /* housekeeper */ spin_lock_irqsave(&mvi->lock, flags); mvs_release_task(mvi, dev); spin_unlock_irqrestore(&mvi->lock, flags); @@ -1739,9 +1605,6 @@ int mvs_query_task(struct sas_task *task) case TMF_RESP_FUNC_FAILED: case TMF_RESP_FUNC_COMPLETE: break; - default: - rc = TMF_RESP_FUNC_COMPLETE; - break; } } mv_printk("%s:rc= %d\n", __func__, rc); @@ -1761,8 +1624,8 @@ int mvs_abort_task(struct sas_task *task) u32 tag; if (!mvi_dev) { - mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__, __LINE__); - rc = TMF_RESP_FUNC_FAILED; + mv_printk("Device has removed\n"); + return TMF_RESP_FUNC_FAILED; } mvi = mvi_dev->mvi_info; @@ -1807,25 +1670,17 @@ int mvs_abort_task(struct sas_task *task) } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { - /* to do free register_set */ if (SATA_DEV == dev->dev_type) { struct mvs_slot_info *slot = task->lldd_task; - struct task_status_struct *tstat; u32 slot_idx = (u32)(slot - mvi->slot_info); - tstat = &task->task_status; - mv_dprintk(KERN_DEBUG "mv_abort_task() mvi=%p task=%p " + mv_dprintk("mvs_abort_task() mvi=%p task=%p " "slot=%p slot_idx=x%x\n", mvi, task, slot, slot_idx); - tstat->stat = SAS_ABORTED_TASK; - if (mvi_dev && mvi_dev->running_req) - mvi_dev->running_req--; - if (sas_protocol_ata(task->task_proto)) - mvs_free_reg_set(mvi, mvi_dev); + mvs_tmf_timedout((unsigned long)task); mvs_slot_task_free(mvi, task, slot, slot_idx); - return -1; + rc = TMF_RESP_FUNC_COMPLETE; + goto out; } - } else { - /* SMP */ } out: @@ -1891,12 +1746,63 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, return stat; } +void mvs_set_sense(u8 *buffer, int len, int d_sense, + int key, int asc, int ascq) +{ + memset(buffer, 0, len); + + if (d_sense) { + /* Descriptor format */ + if (len < 4) { + mv_printk("Length %d of sense buffer too small to " + "fit sense %x:%x:%x", len, key, asc, ascq); + } + + buffer[0] = 0x72; /* Response Code */ + if (len > 1) + buffer[1] = key; /* Sense Key */ + if (len > 2) + buffer[2] = asc; /* ASC */ + if (len > 3) + buffer[3] = ascq; /* ASCQ */ + } else { + if (len < 14) { + mv_printk("Length %d of sense buffer too small to " + "fit sense %x:%x:%x", len, key, asc, ascq); + } + + buffer[0] = 0x70; /* Response Code */ + if (len > 2) + buffer[2] = key; /* Sense Key */ + if (len > 7) + buffer[7] = 0x0a; /* Additional Sense Length */ + if (len > 12) + buffer[12] = asc; /* ASC */ + if (len > 13) + buffer[13] = ascq; /* ASCQ */ + } + + return; +} + +void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu, + u8 key, u8 asc, u8 asc_q) +{ + iu->datapres = 2; + iu->response_data_len = 0; + iu->sense_data_len = 17; + iu->status = 02; + mvs_set_sense(iu->sense_data, 17, 0, + key, asc, asc_q); +} + static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, u32 slot_idx) { struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; int stat; - u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); + u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response); + u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1)); u32 tfs = 0; enum mvs_port_type type = PORT_TYPE_SAS; @@ -1908,8 +1814,19 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, stat = SAM_STAT_CHECK_CONDITION; switch (task->task_proto) { case SAS_PROTOCOL_SSP: + { stat = SAS_ABORTED_TASK; + if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) { + struct ssp_response_iu *iu = slot->response + + sizeof(struct mvs_err_info); + mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01); + sas_ssp_task_response(mvi->dev, task, iu); + stat = SAM_STAT_CHECK_CONDITION; + } + if (err_dw1 & bit(31)) + mv_printk("reuse same slot, retry command.\n"); break; + } case SAS_PROTOCOL_SMP: stat = SAM_STAT_CHECK_CONDITION; break; @@ -1918,10 +1835,8 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { - if (err_dw0 == 0x80400002) - mv_printk("find reserved error, why?\n"); - task->ata_task.use_ncq = 0; + stat = SAS_PROTO_RESPONSE; mvs_sata_done(mvi, task, slot_idx, err_dw0); } break; @@ -1945,8 +1860,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) void *to; enum exec_status sts; - if (mvi->exp_req) - mvi->exp_req--; if (unlikely(!task || !task->lldd_task || !task->dev)) return -1; @@ -1954,8 +1867,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) dev = task->dev; mvi_dev = dev->lldd_dev; - mvs_hba_cq_dump(mvi); - spin_lock(&task->task_state_lock); task->task_state_flags &= ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); @@ -1978,6 +1889,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) return -1; } + /* when no device attaching, go ahead and complete by error handling*/ if (unlikely(!mvi_dev || flags)) { if (!mvi_dev) mv_dprintk("port has not device.\n"); @@ -1987,6 +1899,9 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) /* error info record present */ if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { + mv_dprintk("port %d slot %d rx_desc %X has error info" + "%016llX.\n", slot->port->sas_port.id, slot_idx, + rx_desc, (u64)(*(u64 *)slot->response)); tstat->stat = mvs_slot_err(mvi, task, slot_idx); tstat->resp = SAS_TASK_COMPLETE; goto out; @@ -2048,8 +1963,7 @@ out: spin_unlock(&mvi->lock); if (task->task_done) task->task_done(task); - else - mv_dprintk("why has not task_done.\n"); + spin_lock(&mvi->lock); return sts; @@ -2092,7 +2006,6 @@ void mvs_release_task(struct mvs_info *mvi, struct domain_device *dev) { int i, phyno[WIDE_PORT_MAX_PHY], num; - /* housekeeper */ num = mvs_find_dev_phyno(dev, phyno); for (i = 0; i < num; i++) mvs_do_release_task(mvi, phyno[i], dev); @@ -2111,13 +2024,13 @@ static void mvs_work_queue(struct work_struct *work) struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); struct mvs_info *mvi = mwq->mvi; unsigned long flags; + u32 phy_no = (unsigned long) mwq->data; + struct sas_ha_struct *sas_ha = mvi->sas; + struct mvs_phy *phy = &mvi->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; spin_lock_irqsave(&mvi->lock, flags); if (mwq->handler & PHY_PLUG_EVENT) { - u32 phy_no = (unsigned long) mwq->data; - struct sas_ha_struct *sas_ha = mvi->sas; - struct mvs_phy *phy = &mvi->phy[phy_no]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; if (phy->phy_event & PHY_PLUG_OUT) { u32 tmp; @@ -2139,6 +2052,11 @@ static void mvs_work_queue(struct work_struct *work) mv_dprintk("phy%d Attached Device\n", phy_no); } } + } else if (mwq->handler & EXP_BRCT_CHG) { + phy->phy_event &= ~EXP_BRCT_CHG; + sas_ha->notify_port_event(sas_phy, + PORTE_BROADCAST_RCVD); + mv_dprintk("phy%d Got Broadcast Change\n", phy_no); } list_del(&mwq->entry); spin_unlock_irqrestore(&mvi->lock, flags); @@ -2174,29 +2092,21 @@ static void mvs_sig_time_out(unsigned long tphy) if (&mvi->phy[phy_no] == phy) { mv_dprintk("Get signature time out, reset phy %d\n", phy_no+mvi->id*mvi->chip->n_phy); - MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1); + MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET); } } } -static void mvs_sig_remove_timer(struct mvs_phy *phy) -{ - if (phy->timer.function) - del_timer(&phy->timer); - phy->timer.function = NULL; -} - void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) { u32 tmp; - struct sas_ha_struct *sas_ha = mvi->sas; struct mvs_phy *phy = &mvi->phy[phy_no]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); - mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy, + MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status); + mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy, MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); - mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy, + mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy, phy->irq_status); /* @@ -2205,11 +2115,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) */ if (phy->irq_status & PHYEV_DCDR_ERR) { - mv_dprintk("port %d STP decoding error.\n", + mv_dprintk("phy %d STP decoding error.\n", phy_no + mvi->id*mvi->chip->n_phy); } if (phy->irq_status & PHYEV_POOF) { + mdelay(500); if (!(phy->phy_event & PHY_PLUG_OUT)) { int dev_sata = phy->phy_type & PORT_TYPE_SATA; int ready; @@ -2220,17 +2131,13 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) (void *)(unsigned long)phy_no, PHY_PLUG_EVENT); ready = mvs_is_phy_ready(mvi, phy_no); - if (!ready) - mv_dprintk("phy%d Unplug Notice\n", - phy_no + - mvi->id * mvi->chip->n_phy); if (ready || dev_sata) { if (MVS_CHIP_DISP->stp_reset) MVS_CHIP_DISP->stp_reset(mvi, phy_no); else MVS_CHIP_DISP->phy_reset(mvi, - phy_no, 0); + phy_no, MVS_SOFT_RESET); return; } } @@ -2243,13 +2150,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) if (phy->timer.function == NULL) { phy->timer.data = (unsigned long)phy; phy->timer.function = mvs_sig_time_out; - phy->timer.expires = jiffies + 10*HZ; + phy->timer.expires = jiffies + 5*HZ; add_timer(&phy->timer); } } if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { phy->phy_status = mvs_is_phy_ready(mvi, phy_no); - mvs_sig_remove_timer(phy); mv_dprintk("notify plug in on phy[%d]\n", phy_no); if (phy->phy_status) { mdelay(10); @@ -2263,14 +2169,14 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) } mvs_update_phyinfo(mvi, phy_no, 0); if (phy->phy_type & PORT_TYPE_SAS) { - MVS_CHIP_DISP->phy_reset(mvi, phy_no, 2); + MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE); mdelay(10); } mvs_bytes_dmaed(mvi, phy_no); /* whether driver is going to handle hot plug */ if (phy->phy_event & PHY_PLUG_OUT) { - mvs_port_notify_formed(sas_phy, 0); + mvs_port_notify_formed(&phy->sas_phy, 0); phy->phy_event &= ~PHY_PLUG_OUT; } } else { @@ -2278,13 +2184,11 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) phy_no + mvi->id*mvi->chip->n_phy); } } else if (phy->irq_status & PHYEV_BROAD_CH) { - mv_dprintk("port %d broadcast change.\n", + mv_dprintk("phy %d broadcast change.\n", phy_no + mvi->id*mvi->chip->n_phy); - /* exception for Samsung disk drive*/ - mdelay(1000); - sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + mvs_handle_event(mvi, (void *)(unsigned long)phy_no, + EXP_BRCT_CHG); } - MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status); } int mvs_int_rx(struct mvs_info *mvi, bool self_clear) diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 1367d8b9350..44d7885a4a1 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -48,12 +48,8 @@ #define DRV_NAME "mvsas" #define DRV_VERSION "0.8.2" -#define _MV_DUMP 0 #define MVS_ID_NOT_MAPPED 0x7f -/* #define DISABLE_HOTPLUG_DMA_FIX */ -// #define MAX_EXP_RUNNING_REQ 2 #define WIDE_PORT_MAX_PHY 4 -#define MV_DISABLE_NCQ 0 #define mv_printk(fmt, arg ...) \ printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg) #ifdef MV_DEBUG @@ -64,6 +60,7 @@ #endif #define MV_MAX_U32 0xffffffff +extern int interrupt_coalescing; extern struct mvs_tgt_initiator mvs_tgt; extern struct mvs_info *tgt_mvi; extern const struct mvs_dispatch mvs_64xx_dispatch; @@ -99,6 +96,11 @@ enum dev_status { MVS_DEV_EH = 0x1, }; +enum dev_reset { + MVS_SOFT_RESET = 0, + MVS_HARD_RESET = 1, + MVS_PHY_TUNE = 2, +}; struct mvs_info; @@ -130,7 +132,6 @@ struct mvs_dispatch { u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port); void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val); - void (*get_sas_addr)(void *buf, u32 buflen); void (*command_active)(struct mvs_info *mvi, u32 slot_idx); void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all); void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type, @@ -167,9 +168,10 @@ struct mvs_dispatch { ); int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd); int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout); -#ifndef DISABLE_HOTPLUG_DMA_FIX - void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd); -#endif + void (*dma_fix)(struct mvs_info *mvi, u32 phy_mask, + int buf_len, int from, void *prd); + void (*tune_interrupt)(struct mvs_info *mvi, u32 time); + void (*non_spec_ncq_error)(struct mvs_info *mvi); }; @@ -179,9 +181,11 @@ struct mvs_chip_info { u32 fis_offs; u32 fis_count; u32 srs_sz; + u32 sg_width; u32 slot_width; const struct mvs_dispatch *dispatch; }; +#define MVS_MAX_SG (1U << mvi->chip->sg_width) #define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) #define MVS_RX_FISL_SZ \ (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100)) @@ -249,6 +253,73 @@ struct mvs_device { u16 reserved; }; +/* Generate PHY tunning parameters */ +struct phy_tuning { + /* 1 bit, transmitter emphasis enable */ + u8 trans_emp_en:1; + /* 4 bits, transmitter emphasis amplitude */ + u8 trans_emp_amp:4; + /* 3 bits, reserved space */ + u8 Reserved_2bit_1:3; + /* 5 bits, transmitter amplitude */ + u8 trans_amp:5; + /* 2 bits, transmitter amplitude adjust */ + u8 trans_amp_adj:2; + /* 1 bit, reserved space */ + u8 resv_2bit_2:1; + /* 2 bytes, reserved space */ + u8 reserved[2]; +}; + +struct ffe_control { + /* 4 bits, FFE Capacitor Select (value range 0~F) */ + u8 ffe_cap_sel:4; + /* 3 bits, FFE Resistor Select (value range 0~7) */ + u8 ffe_rss_sel:3; + /* 1 bit reserve*/ + u8 reserved:1; +}; + +/* + * HBA_Info_Page is saved in Flash/NVRAM, total 256 bytes. + * The data area is valid only Signature="MRVL". + * If any member fills with 0xFF, the member is invalid. + */ +struct hba_info_page { + /* Dword 0 */ + /* 4 bytes, structure signature,should be "MRVL" at first initial */ + u8 signature[4]; + + /* Dword 1-13 */ + u32 reserved1[13]; + + /* Dword 14-29 */ + /* 64 bytes, SAS address for each port */ + u64 sas_addr[8]; + + /* Dword 30-31 */ + /* 8 bytes for vanir 8 port PHY FFE seeting + * BIT 0~3 : FFE Capacitor select(value range 0~F) + * BIT 4~6 : FFE Resistor select(value range 0~7) + * BIT 7: reserve. + */ + + struct ffe_control ffe_ctl[8]; + /* Dword 32 -43 */ + u32 reserved2[12]; + + /* Dword 44-45 */ + /* 8 bytes, 0: 1.5G, 1: 3.0G, should be 0x01 at first initial */ + u8 phy_rate[8]; + + /* Dword 46-53 */ + /* 32 bytes, PHY tuning parameters for each PHY*/ + struct phy_tuning phy_tuning[8]; + + /* Dword 54-63 */ + u32 reserved3[10]; +}; /* total 256 bytes */ + struct mvs_slot_info { struct list_head entry; union { @@ -264,9 +335,6 @@ struct mvs_slot_info { */ void *buf; dma_addr_t buf_dma; -#if _MV_DUMP - u32 cmd_size; -#endif void *response; struct mvs_port *port; struct mvs_device *device; @@ -320,12 +388,10 @@ struct mvs_info { const struct mvs_chip_info *chip; int tags_num; - DECLARE_BITMAP(tags, MVS_SLOTS); + unsigned long *tags; /* further per-slot information */ struct mvs_phy phy[MVS_MAX_PHYS]; struct mvs_port port[MVS_MAX_PHYS]; - u32 irq; - u32 exp_req; u32 id; u64 sata_reg_set; struct list_head *hba_list; @@ -337,12 +403,13 @@ struct mvs_info { u32 flashsectSize; void *addon; + struct hba_info_page hba_info_param; struct mvs_device devices[MVS_MAX_DEVICES]; -#ifndef DISABLE_HOTPLUG_DMA_FIX void *bulk_buffer; dma_addr_t bulk_buffer_dma; + void *bulk_buffer1; + dma_addr_t bulk_buffer_dma1; #define TRASH_BUCKET_SIZE 0x20000 -#endif void *dma_pool; struct mvs_slot_info slot_info[0]; }; @@ -350,8 +417,10 @@ struct mvs_info { struct mvs_prv_info{ u8 n_host; u8 n_phy; - u16 reserve; + u8 scan_finished; + u8 reserve; struct mvs_info *mvi[2]; + struct tasklet_struct mv_tasklet; }; struct mvs_wq { @@ -415,6 +484,6 @@ void mvs_do_release_task(struct mvs_info *mvi, int phy_no, void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events); void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); int mvs_int_rx(struct mvs_info *mvi, bool self_clear); -void mvs_hexdump(u32 size, u8 *data, u32 baseaddr); +struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, u8 reg_set); #endif diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index aa05e661d11..b97c8ab0c20 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -54,7 +54,7 @@ #include <scsi/libsas.h> #include <scsi/scsi_tcq.h> #include <scsi/sas_ata.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include "pm8001_defs.h" #define DRV_NAME "pm8001" diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index fca6a895307..d079f9a3c6b 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -3871,6 +3871,9 @@ static long pmcraid_ioctl_passthrough( pmcraid_err("couldn't build passthrough ioadls\n"); goto out_free_buffer; } + } else if (request_size < 0) { + rc = -EINVAL; + goto out_free_buffer; } /* If data is being written into the device, copy the data from user diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 532313e0725..7836eb01c7f 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -42,8 +42,8 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, int reading; if (IS_QLA82XX(ha)) { - DEBUG2(qla_printk(KERN_INFO, ha, - "Firmware dump not supported for ISP82xx\n")); + ql_dbg(ql_dbg_user, vha, 0x705b, + "Firmware dump not supported for ISP82xx\n"); return count; } @@ -56,7 +56,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, if (!ha->fw_dump_reading) break; - qla_printk(KERN_INFO, ha, + ql_log(ql_log_info, vha, 0x705d, "Firmware dump cleared on (%ld).\n", vha->host_no); ha->fw_dump_reading = 0; @@ -66,7 +66,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, if (ha->fw_dumped && !ha->fw_dump_reading) { ha->fw_dump_reading = 1; - qla_printk(KERN_INFO, ha, + ql_log(ql_log_info, vha, 0x705e, "Raw firmware dump ready for read on (%ld).\n", vha->host_no); } @@ -148,7 +148,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, } if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x705f, "HBA not online, failing NVRAM update.\n"); return -EAGAIN; } @@ -158,6 +158,8 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base, count); + ql_dbg(ql_dbg_user, vha, 0x7060, + "Setting ISP_ABORT_NEEDED\n"); /* NVRAM settings take effect immediately. */ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); @@ -255,9 +257,9 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, ha->optrom_state = QLA_SWAITING; - DEBUG2(qla_printk(KERN_INFO, ha, + ql_dbg(ql_dbg_user, vha, 0x7061, "Freeing flash region allocation -- 0x%x bytes.\n", - ha->optrom_region_size)); + ha->optrom_region_size); vfree(ha->optrom_buffer); ha->optrom_buffer = NULL; @@ -273,7 +275,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, ha->optrom_state = QLA_SREADING; ha->optrom_buffer = vmalloc(ha->optrom_region_size); if (ha->optrom_buffer == NULL) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x7062, "Unable to allocate memory for optrom retrieval " "(%x).\n", ha->optrom_region_size); @@ -282,14 +284,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, } if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "HBA not online, failing NVRAM update.\n"); + ql_log(ql_log_warn, vha, 0x7063, + "HBA not online, failing NVRAM update.\n"); return -EAGAIN; } - DEBUG2(qla_printk(KERN_INFO, ha, + ql_dbg(ql_dbg_user, vha, 0x7064, "Reading flash region -- 0x%x/0x%x.\n", - ha->optrom_region_start, ha->optrom_region_size)); + ha->optrom_region_start, ha->optrom_region_size); memset(ha->optrom_buffer, 0, ha->optrom_region_size); ha->isp_ops->read_optrom(vha, ha->optrom_buffer, @@ -328,7 +330,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha)) valid = 1; if (!valid) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x7065, "Invalid start region 0x%x/0x%x.\n", start, size); return -EINVAL; } @@ -340,17 +342,17 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, ha->optrom_state = QLA_SWRITING; ha->optrom_buffer = vmalloc(ha->optrom_region_size); if (ha->optrom_buffer == NULL) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x7066, "Unable to allocate memory for optrom update " - "(%x).\n", ha->optrom_region_size); + "(%x)\n", ha->optrom_region_size); ha->optrom_state = QLA_SWAITING; return count; } - DEBUG2(qla_printk(KERN_INFO, ha, + ql_dbg(ql_dbg_user, vha, 0x7067, "Staging flash region write -- 0x%x/0x%x.\n", - ha->optrom_region_start, ha->optrom_region_size)); + ha->optrom_region_start, ha->optrom_region_size); memset(ha->optrom_buffer, 0, ha->optrom_region_size); break; @@ -359,14 +361,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, break; if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x7068, "HBA not online, failing flash update.\n"); return -EAGAIN; } - DEBUG2(qla_printk(KERN_INFO, ha, + ql_dbg(ql_dbg_user, vha, 0x7069, "Writing flash region -- 0x%x/0x%x.\n", - ha->optrom_region_start, ha->optrom_region_size)); + ha->optrom_region_start, ha->optrom_region_size); ha->isp_ops->write_optrom(vha, ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); @@ -425,7 +427,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, return 0; if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x706a, "HBA not online, failing VPD update.\n"); return -EAGAIN; } @@ -440,7 +442,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, tmp_data = vmalloc(256); if (!tmp_data) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x706b, "Unable to allocate memory for VPD information update.\n"); goto done; } @@ -480,7 +482,7 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj, ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &ha->sfp_data_dma); if (!ha->sfp_data) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x706c, "Unable to allocate memory for SFP read-data.\n"); return 0; } @@ -499,9 +501,10 @@ do_read: rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data, addr, offset, SFP_BLOCK_SIZE, 0); if (rval != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x706d, "Unable to read SFP data (%x/%x/%x).\n", rval, addr, offset); + count = 0; break; } @@ -538,8 +541,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, type = simple_strtol(buf, NULL, 10); switch (type) { case 0x2025c: - qla_printk(KERN_INFO, ha, - "Issuing ISP reset on (%ld).\n", vha->host_no); + ql_log(ql_log_info, vha, 0x706e, + "Issuing ISP reset.\n"); scsi_block_requests(vha->host); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -551,8 +554,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, if (!IS_QLA81XX(ha)) break; - qla_printk(KERN_INFO, ha, - "Issuing MPI reset on (%ld).\n", vha->host_no); + ql_log(ql_log_info, vha, 0x706f, + "Issuing MPI reset.\n"); /* Make sure FC side is not in reset */ qla2x00_wait_for_hba_online(vha); @@ -560,20 +563,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, /* Issue MPI reset */ scsi_block_requests(vha->host); if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) - qla_printk(KERN_WARNING, ha, - "MPI reset failed on (%ld).\n", vha->host_no); + ql_log(ql_log_warn, vha, 0x7070, + "MPI reset failed.\n"); scsi_unblock_requests(vha->host); break; case 0x2025e: if (!IS_QLA82XX(ha) || vha != base_vha) { - qla_printk(KERN_INFO, ha, - "FCoE ctx reset not supported for host%ld.\n", - vha->host_no); + ql_log(ql_log_info, vha, 0x7071, + "FCoE ctx reset no supported.\n"); return count; } - qla_printk(KERN_INFO, ha, - "Issuing FCoE CTX reset on host%ld.\n", vha->host_no); + ql_log(ql_log_info, vha, 0x7072, + "Issuing FCoE ctx reset.\n"); set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); qla2x00_wait_for_fcoe_ctx_reset(vha); @@ -611,8 +613,8 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj, ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &ha->edc_data_dma); if (!ha->edc_data) { - DEBUG2(qla_printk(KERN_INFO, ha, - "Unable to allocate memory for EDC write.\n")); + ql_log(ql_log_warn, vha, 0x7073, + "Unable to allocate memory for EDC write.\n"); return 0; } } @@ -631,9 +633,9 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj, rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data, dev, adr, len, opt); if (rval != QLA_SUCCESS) { - DEBUG2(qla_printk(KERN_INFO, ha, - "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n", - rval, dev, adr, opt, len, buf[8])); + ql_log(ql_log_warn, vha, 0x7074, + "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n", + rval, dev, adr, opt, len, buf[8]); return 0; } @@ -669,8 +671,8 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj, ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &ha->edc_data_dma); if (!ha->edc_data) { - DEBUG2(qla_printk(KERN_INFO, ha, - "Unable to allocate memory for EDC status.\n")); + ql_log(ql_log_warn, vha, 0x708c, + "Unable to allocate memory for EDC status.\n"); return 0; } } @@ -688,9 +690,9 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj, rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data, dev, adr, len, opt); if (rval != QLA_SUCCESS) { - DEBUG2(qla_printk(KERN_INFO, ha, - "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n", - rval, dev, adr, opt, len)); + ql_log(ql_log_info, vha, 0x7075, + "Unable to write EDC status (%x) %02x:%04x:%02x.\n", + rval, dev, adr, opt, len); return 0; } @@ -749,7 +751,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, &ha->xgmac_data_dma, GFP_KERNEL); if (!ha->xgmac_data) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x7076, "Unable to allocate memory for XGMAC read-data.\n"); return 0; } @@ -761,7 +763,7 @@ do_read: rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, XGMAC_DATA_SIZE, &actual_size); if (rval != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x7077, "Unable to read XGMAC data (%x).\n", rval); count = 0; } @@ -801,7 +803,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, &ha->dcbx_tlv_dma, GFP_KERNEL); if (!ha->dcbx_tlv) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x7078, "Unable to allocate memory for DCBX TLV read-data.\n"); return 0; } @@ -813,8 +815,8 @@ do_read: rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, DCBX_TLV_DATA_SIZE); if (rval != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Unable to read DCBX TLV data (%x).\n", rval); + ql_log(ql_log_warn, vha, 0x7079, + "Unable to read DCBX TLV (%x).\n", rval); count = 0; } @@ -869,9 +871,13 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) ret = sysfs_create_bin_file(&host->shost_gendev.kobj, iter->attr); if (ret) - qla_printk(KERN_INFO, vha->hw, - "Unable to create sysfs %s binary attribute " - "(%d).\n", iter->name, ret); + ql_log(ql_log_warn, vha, 0x00f3, + "Unable to create sysfs %s binary attribute (%d).\n", + iter->name, ret); + else + ql_dbg(ql_dbg_init, vha, 0x00f4, + "Successfully created sysfs %s binary attribure.\n", + iter->name); } } @@ -1126,7 +1132,7 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, return -EPERM; if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x707a, "Abort ISP active -- ignoring beacon request.\n"); return -EBUSY; } @@ -1322,9 +1328,8 @@ qla2x00_thermal_temp_show(struct device *dev, temp = frac = 0; if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) - DEBUG2_3_11(printk(KERN_WARNING - "%s(%ld): isp reset in progress.\n", - __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x707b, + "ISP reset active.\n"); else if (!vha->hw->flags.eeh_busy) rval = qla2x00_get_thermal_temp(vha, &temp, &frac); if (rval != QLA_SUCCESS) @@ -1343,8 +1348,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) - DEBUG2_3_11(printk("%s(%ld): isp reset in progress.\n", - __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x707c, + "ISP reset active.\n"); else if (!vha->hw->flags.eeh_busy) rval = qla2x00_get_firmware_state(vha, state); if (rval != QLA_SUCCESS) @@ -1645,8 +1650,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); if (stats == NULL) { - DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", - __func__, base_vha->host_no)); + ql_log(ql_log_warn, vha, 0x707d, + "Failed to allocate memory for stats.\n"); goto done; } memset(stats, 0, DMA_POOL_SIZE); @@ -1746,15 +1751,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) ret = qla24xx_vport_create_req_sanity_check(fc_vport); if (ret) { - DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, " - "status %x\n", ret)); + ql_log(ql_log_warn, vha, 0x707e, + "Vport sanity check failed, status %x\n", ret); return (ret); } vha = qla24xx_create_vhost(fc_vport); if (vha == NULL) { - DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n", - vha)); + ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n"); return FC_VPORT_FAILED; } if (disable) { @@ -1764,8 +1768,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) atomic_set(&vha->vp_state, VP_FAILED); /* ready to create vport */ - qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n", - vha->vp_idx); + ql_log(ql_log_info, vha, 0x7080, + "VP entry id %d assigned.\n", vha->vp_idx); /* initialized vport states */ atomic_set(&vha->loop_state, LOOP_DOWN); @@ -1775,8 +1779,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || atomic_read(&base_vha->loop_state) == LOOP_DEAD) { /* Don't retry or attempt login of this virtual port */ - DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n", - base_vha->host_no)); + ql_dbg(ql_dbg_user, vha, 0x7081, + "Vport loop state is not UP.\n"); atomic_set(&vha->loop_state, LOOP_DEAD); if (!disable) fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); @@ -1785,9 +1789,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) { vha->flags.difdix_supported = 1; - DEBUG18(qla_printk(KERN_INFO, ha, - "Registering for DIF/DIX type 1 and 3" - " protection.\n")); + ql_dbg(ql_dbg_user, vha, 0x7082, + "Registered for DIF/DIX type 1 and 3 protection.\n"); scsi_host_set_prot(vha->host, SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION @@ -1802,8 +1805,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, &ha->pdev->dev)) { - DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", - vha->host_no, vha->vp_idx)); + ql_dbg(ql_dbg_user, vha, 0x7083, + "scsi_add_host failure for VP[%d].\n", vha->vp_idx); goto vport_create_failed_2; } @@ -1820,6 +1823,10 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) if (ha->flags.cpu_affinity_enabled) { req = ha->req_q_map[1]; + ql_dbg(ql_dbg_multiq, vha, 0xc000, + "Request queue %p attached with " + "VP[%d], cpu affinity =%d\n", + req, vha->vp_idx, ha->flags.cpu_affinity_enabled); goto vport_queue; } else if (ql2xmaxqueues == 1 || !ha->npiv_info) goto vport_queue; @@ -1836,13 +1843,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, qos); if (!ret) - qla_printk(KERN_WARNING, ha, - "Can't create request queue for vp_idx:%d\n", - vha->vp_idx); + ql_log(ql_log_warn, vha, 0x7084, + "Can't create request queue for VP[%d]\n", + vha->vp_idx); else { - DEBUG2(qla_printk(KERN_INFO, ha, - "Request Que:%d (QoS: %d) created for vp_idx:%d\n", - ret, qos, vha->vp_idx)); + ql_dbg(ql_dbg_multiq, vha, 0xc001, + "Request Que:%d Q0s: %d) created for VP[%d]\n", + ret, qos, vha->vp_idx); + ql_dbg(ql_dbg_user, vha, 0x7085, + "Request Que:%d Q0s: %d) created for VP[%d]\n", + ret, qos, vha->vp_idx); req = ha->req_q_map[ret]; } } @@ -1882,12 +1892,13 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) if (vha->timer_active) { qla2x00_vp_stop_timer(vha); - DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]" - " = %p has stopped\n", vha->host_no, vha->vp_idx, vha)); + ql_dbg(ql_dbg_user, vha, 0x7086, + "Timer for the VP[%d] has stopped\n", vha->vp_idx); } /* No pending activities shall be there on the vha now */ - DEBUG(msleep(random32()%10)); /* Just to see if something falls on + if (ql2xextended_error_logging & ql_dbg_user) + msleep(random32()%10); /* Just to see if something falls on * the net we have placed below */ BUG_ON(atomic_read(&vha->vref_count)); @@ -1901,12 +1912,12 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) if (vha->req->id && !ha->flags.cpu_affinity_enabled) { if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) - qla_printk(KERN_WARNING, ha, - "Queue delete failed.\n"); + ql_log(ql_log_warn, vha, 0x7087, + "Queue delete failed.\n"); } scsi_host_put(vha->host); - qla_printk(KERN_INFO, ha, "vport %d deleted\n", id); + ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id); return 0; } diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 8c10e2c4928..07d1767cd26 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -36,7 +36,8 @@ done: } int -qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) +qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, + struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) { int i, ret, num_valid; uint8_t *bcode; @@ -51,18 +52,17 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) if (bcode_val == 0xFFFFFFFF) { /* No FCP Priority config data in flash */ - DEBUG2(printk(KERN_INFO - "%s: No FCP priority config data.\n", - __func__)); + ql_dbg(ql_dbg_user, vha, 0x7051, + "No FCP Priority config data.\n"); return 0; } if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' || bcode[3] != 'S') { /* Invalid FCP priority data header*/ - DEBUG2(printk(KERN_ERR - "%s: Invalid FCP Priority data header. bcode=0x%x\n", - __func__, bcode_val)); + ql_dbg(ql_dbg_user, vha, 0x7052, + "Invalid FCP Priority data header. bcode=0x%x.\n", + bcode_val); return 0; } if (flag != 1) @@ -77,15 +77,14 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) if (num_valid == 0) { /* No valid FCP priority data entries */ - DEBUG2(printk(KERN_ERR - "%s: No valid FCP Priority data entries.\n", - __func__)); + ql_dbg(ql_dbg_user, vha, 0x7053, + "No valid FCP Priority data entries.\n"); ret = 0; } else { /* FCP priority data is valid */ - DEBUG2(printk(KERN_INFO - "%s: Valid FCP priority data. num entries = %d\n", - __func__, num_valid)); + ql_dbg(ql_dbg_user, vha, 0x7054, + "Valid FCP priority data. num entries = %d.\n", + num_valid); } return ret; @@ -182,10 +181,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) if (!ha->fcp_prio_cfg) { ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); if (!ha->fcp_prio_cfg) { - qla_printk(KERN_WARNING, ha, - "Unable to allocate memory " - "for fcp prio config data (%x).\n", - FCP_PRIO_CFG_SIZE); + ql_log(ql_log_warn, vha, 0x7050, + "Unable to allocate memory for fcp prio " + "config data (%x).\n", FCP_PRIO_CFG_SIZE); bsg_job->reply->result = (DID_ERROR << 16); ret = -ENOMEM; goto exit_fcp_prio_cfg; @@ -198,9 +196,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) FCP_PRIO_CFG_SIZE); /* validate fcp priority data */ - if (!qla24xx_fcp_prio_cfg_valid( - (struct qla_fcp_prio_cfg *) - ha->fcp_prio_cfg, 1)) { + + if (!qla24xx_fcp_prio_cfg_valid(vha, + (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) { bsg_job->reply->result = (DID_ERROR << 16); ret = -EINVAL; /* If buffer was invalidatic int @@ -256,9 +254,8 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) /* pass through is supported only for ISP 4Gb or higher */ if (!IS_FWI2_CAPABLE(ha)) { - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld):ELS passthru not supported for ISP23xx based " - "adapters\n", vha->host_no)); + ql_dbg(ql_dbg_user, vha, 0x7001, + "ELS passthru not supported for ISP23xx based adapters.\n"); rval = -EPERM; goto done; } @@ -266,11 +263,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) /* Multiple SG's are not supported for ELS requests */ if (bsg_job->request_payload.sg_cnt > 1 || bsg_job->reply_payload.sg_cnt > 1) { - DEBUG2(printk(KERN_INFO - "multiple SG's are not supported for ELS requests" - " [request_sg_cnt: %x reply_sg_cnt: %x]\n", - bsg_job->request_payload.sg_cnt, - bsg_job->reply_payload.sg_cnt)); + ql_dbg(ql_dbg_user, vha, 0x7002, + "Multiple SG's are not suppored for ELS requests, " + "request_sg_cnt=%x reply_sg_cnt=%x.\n", + bsg_job->request_payload.sg_cnt, + bsg_job->reply_payload.sg_cnt); rval = -EPERM; goto done; } @@ -281,9 +278,9 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) * if not perform fabric login */ if (qla2x00_fabric_login(vha, fcport, &nextlid)) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "failed to login port %06X for ELS passthru\n", - fcport->d_id.b24)); + ql_dbg(ql_dbg_user, vha, 0x7003, + "Failed to login port %06X for ELS passthru.\n", + fcport->d_id.b24); rval = -EIO; goto done; } @@ -314,8 +311,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) } if (!vha->flags.online) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "host not online\n")); + ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); rval = -EIO; goto done; } @@ -337,12 +333,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { - DEBUG2(printk(KERN_INFO - "dma mapping resulted in different sg counts \ - [request_sg_cnt: %x dma_request_sg_cnt: %x\ - reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", - bsg_job->request_payload.sg_cnt, req_sg_cnt, - bsg_job->reply_payload.sg_cnt, rsp_sg_cnt)); + ql_log(ql_log_warn, vha, 0x7008, + "dma mapping resulted in different sg counts, " + "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x " + "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, + req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); rval = -EAGAIN; goto done_unmap_sg; } @@ -363,15 +358,16 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) "bsg_els_rpt" : "bsg_els_hst"); els->u.bsg_job = bsg_job; - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x " - "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type, - bsg_job->request->rqst_data.h_els.command_code, - fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa)); + ql_dbg(ql_dbg_user, vha, 0x700a, + "bsg rqst type: %s els type: %x - loop-id=%x " + "portid=%-2x%02x%02x.\n", type, + bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x700e, + "qla2x00_start_sp failed = %d\n", rval); kfree(sp->ctx); mempool_free(sp, ha->srb_mempool); rval = -EIO; @@ -411,6 +407,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job) dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!req_sg_cnt) { + ql_log(ql_log_warn, vha, 0x700f, + "dma_map_sg return %d for request\n", req_sg_cnt); rval = -ENOMEM; goto done; } @@ -418,24 +416,25 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job) rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); if (!rsp_sg_cnt) { + ql_log(ql_log_warn, vha, 0x7010, + "dma_map_sg return %d for reply\n", rsp_sg_cnt); rval = -ENOMEM; goto done; } if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "[request_sg_cnt: %x dma_request_sg_cnt: %x\ - reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", - bsg_job->request_payload.sg_cnt, req_sg_cnt, - bsg_job->reply_payload.sg_cnt, rsp_sg_cnt)); + ql_log(ql_log_warn, vha, 0x7011, + "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " + "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, + req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); rval = -EAGAIN; goto done_unmap_sg; } if (!vha->flags.online) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "host not online\n")); + ql_log(ql_log_warn, vha, 0x7012, + "Host is not online.\n"); rval = -EIO; goto done_unmap_sg; } @@ -451,8 +450,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job) loop_id = vha->mgmt_svr_loop_id; break; default: - DEBUG2(qla_printk(KERN_INFO, ha, - "Unknown loop id: %x\n", loop_id)); + ql_dbg(ql_dbg_user, vha, 0x7013, + "Unknown loop id: %x.\n", loop_id); rval = -EINVAL; goto done_unmap_sg; } @@ -464,6 +463,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job) */ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (!fcport) { + ql_log(ql_log_warn, vha, 0x7014, + "Failed to allocate fcport.\n"); rval = -ENOMEM; goto done_unmap_sg; } @@ -479,6 +480,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job) /* Alloc SRB structure */ sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx)); if (!sp) { + ql_log(ql_log_warn, vha, 0x7015, + "qla2x00_get_ctx_bsg_sp failed.\n"); rval = -ENOMEM; goto done_free_fcport; } @@ -488,15 +491,17 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job) ct->name = "bsg_ct"; ct->u.bsg_job = bsg_job; - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x " - "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type, - (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16), - fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa)); + ql_dbg(ql_dbg_user, vha, 0x7016, + "bsg rqst type: %s else type: %x - " + "loop-id=%x portid=%02x%02x%02x.\n", type, + (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16), + fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x7017, + "qla2x00_start_sp failed=%d.\n", rval); kfree(sp->ctx); mempool_free(sp, ha->srb_mempool); rval = -EIO; @@ -535,9 +540,8 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config, ha->notify_dcbx_comp = 1; ret = qla81xx_set_port_config(vha, new_config); if (ret != QLA_SUCCESS) { - DEBUG2(printk(KERN_ERR - "%s(%lu): Set port config failed\n", - __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x7021, + "set port config failed.\n"); ha->notify_dcbx_comp = 0; rval = -EINVAL; goto done_set_internal; @@ -545,11 +549,11 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config, /* Wait for DCBX complete event */ if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "State change notificaition not received.\n")); + ql_dbg(ql_dbg_user, vha, 0x7022, + "State change notification not received.\n"); } else - DEBUG2(qla_printk(KERN_INFO, ha, - "State change RECEIVED\n")); + ql_dbg(ql_dbg_user, vha, 0x7023, + "State change received.\n"); ha->notify_dcbx_comp = 0; @@ -581,9 +585,8 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config, ha->notify_dcbx_comp = wait; ret = qla81xx_set_port_config(vha, new_config); if (ret != QLA_SUCCESS) { - DEBUG2(printk(KERN_ERR - "%s(%lu): Set port config failed\n", - __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x7025, + "Set port config failed.\n"); ha->notify_dcbx_comp = 0; rval = -EINVAL; goto done_reset_internal; @@ -592,14 +595,14 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config, /* Wait for DCBX complete event */ if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "State change notificaition not received.\n")); + ql_dbg(ql_dbg_user, vha, 0x7026, + "State change notification not received.\n"); ha->notify_dcbx_comp = 0; rval = -EINVAL; goto done_reset_internal; } else - DEBUG2(qla_printk(KERN_INFO, ha, - "State change RECEIVED\n")); + ql_dbg(ql_dbg_user, vha, 0x7027, + "State change received.\n"); ha->notify_dcbx_comp = 0; } @@ -629,11 +632,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || - test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) + test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { + ql_log(ql_log_warn, vha, 0x7018, "Abort active or needed.\n"); return -EBUSY; + } if (!vha->flags.online) { - DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n")); + ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); return -EIO; } @@ -641,26 +646,31 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); - if (!elreq.req_sg_cnt) + if (!elreq.req_sg_cnt) { + ql_log(ql_log_warn, vha, 0x701a, + "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt); return -ENOMEM; + } elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); if (!elreq.rsp_sg_cnt) { + ql_log(ql_log_warn, vha, 0x701b, + "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt); rval = -ENOMEM; goto done_unmap_req_sg; } if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { - DEBUG2(printk(KERN_INFO - "dma mapping resulted in different sg counts " - "[request_sg_cnt: %x dma_request_sg_cnt: %x " - "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", - bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, - bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt)); + ql_log(ql_log_warn, vha, 0x701c, + "dma mapping resulted in different sg counts, " + "request_sg_cnt: %x dma_request_sg_cnt: %x " + "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", + bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, + bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt); rval = -EAGAIN; goto done_unmap_sg; } @@ -668,8 +678,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, &req_data_dma, GFP_KERNEL); if (!req_data) { - DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data " - "failed for host=%lu\n", __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x701d, + "dma alloc failed for req_data.\n"); rval = -ENOMEM; goto done_unmap_sg; } @@ -677,8 +687,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, &rsp_data_dma, GFP_KERNEL); if (!rsp_data) { - DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data " - "failed for host=%lu\n", __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x7004, + "dma alloc failed for rsp_data.\n"); rval = -ENOMEM; goto done_free_dma_req; } @@ -699,8 +709,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && elreq.options == EXTERNAL_LOOPBACK) { type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type)); + ql_dbg(ql_dbg_user, vha, 0x701e, + "BSG request type: %s.\n", type); command_sent = INT_DEF_LB_ECHO_CMD; rval = qla2x00_echo_test(vha, &elreq, response); } else { @@ -708,9 +718,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) memset(config, 0, sizeof(config)); memset(new_config, 0, sizeof(new_config)); if (qla81xx_get_port_config(vha, config)) { - DEBUG2(printk(KERN_ERR - "%s(%lu): Get port config failed\n", - __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x701f, + "Get port config failed.\n"); bsg_job->reply->reply_payload_rcv_len = 0; bsg_job->reply->result = (DID_ERROR << 16); rval = -EPERM; @@ -718,11 +727,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) } if (elreq.options != EXTERNAL_LOOPBACK) { - DEBUG2(qla_printk(KERN_INFO, ha, - "Internal: current port config = %x\n", - config[0])); + ql_dbg(ql_dbg_user, vha, 0x7020, + "Internal: curent port config = %x\n", + config[0]); if (qla81xx_set_internal_loopback(vha, config, new_config)) { + ql_log(ql_log_warn, vha, 0x7024, + "Internal loopback failed.\n"); bsg_job->reply->reply_payload_rcv_len = 0; bsg_job->reply->result = @@ -746,9 +757,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) } type = "FC_BSG_HST_VENDOR_LOOPBACK"; - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld) bsg rqst type: %s\n", - vha->host_no, type)); + ql_dbg(ql_dbg_user, vha, 0x7028, + "BSG request type: %s.\n", type); command_sent = INT_DEF_LB_LOOPBACK_CMD; rval = qla2x00_loopback_test(vha, &elreq, response); @@ -763,17 +773,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) { - DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing " - "ISP\n", __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x7029, + "MBX command error, Aborting ISP.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); /* Also reset the MPI */ if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) { - qla_printk(KERN_INFO, ha, - "MPI reset failed for host%ld.\n", - vha->host_no); + ql_log(ql_log_warn, vha, 0x702a, + "MPI reset failed.\n"); } bsg_job->reply->reply_payload_rcv_len = 0; @@ -783,17 +792,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) } } else { type = "FC_BSG_HST_VENDOR_LOOPBACK"; - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld) bsg rqst type: %s\n", - vha->host_no, type)); + ql_dbg(ql_dbg_user, vha, 0x702b, + "BSG request type: %s.\n", type); command_sent = INT_DEF_LB_LOOPBACK_CMD; rval = qla2x00_loopback_test(vha, &elreq, response); } } if (rval) { - DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " - "request %s failed\n", vha->host_no, type)); + ql_log(ql_log_warn, vha, 0x702c, + "Vendor request %s failed.\n", type); fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); @@ -805,8 +813,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) bsg_job->reply->reply_payload_rcv_len = 0; bsg_job->reply->result = (DID_ERROR << 16); } else { - DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " - "request %s completed\n", vha->host_no, type)); + ql_dbg(ql_dbg_user, vha, 0x702d, + "Vendor request %s completed.\n", type); bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t); @@ -851,12 +859,13 @@ qla84xx_reset(struct fc_bsg_job *bsg_job) if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || - test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) + test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { + ql_log(ql_log_warn, vha, 0x702e, "Abort active or needed.\n"); return -EBUSY; + } if (!IS_QLA84XX(ha)) { - DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, " - "exiting.\n", vha->host_no)); + ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); return -EINVAL; } @@ -865,14 +874,14 @@ qla84xx_reset(struct fc_bsg_job *bsg_job) rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); if (rval) { - DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " - "request 84xx reset failed\n", vha->host_no)); + ql_log(ql_log_warn, vha, 0x7030, + "Vendor request 84xx reset failed.\n"); rval = bsg_job->reply->reply_payload_rcv_len = 0; bsg_job->reply->result = (DID_ERROR << 16); } else { - DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " - "request 84xx reset completed\n", vha->host_no)); + ql_dbg(ql_dbg_user, vha, 0x7031, + "Vendor request 84xx reset completed.\n"); bsg_job->reply->result = DID_OK; } @@ -902,21 +911,24 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job) return -EBUSY; if (!IS_QLA84XX(ha)) { - DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, " - "exiting.\n", vha->host_no)); + ql_dbg(ql_dbg_user, vha, 0x7032, + "Not 84xx, exiting.\n"); return -EINVAL; } sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); - if (!sg_cnt) + if (!sg_cnt) { + ql_log(ql_log_warn, vha, 0x7033, + "dma_map_sg returned %d for request.\n", sg_cnt); return -ENOMEM; + } if (sg_cnt != bsg_job->request_payload.sg_cnt) { - DEBUG2(printk(KERN_INFO - "dma mapping resulted in different sg counts " - "request_sg_cnt: %x dma_request_sg_cnt: %x ", - bsg_job->request_payload.sg_cnt, sg_cnt)); + ql_log(ql_log_warn, vha, 0x7034, + "DMA mapping resulted in different sg counts, " + "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", + bsg_job->request_payload.sg_cnt, sg_cnt); rval = -EAGAIN; goto done_unmap_sg; } @@ -925,8 +937,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job) fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, &fw_dma, GFP_KERNEL); if (!fw_buf) { - DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf " - "failed for host=%lu\n", __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x7035, + "DMA alloc failed for fw_buf.\n"); rval = -ENOMEM; goto done_unmap_sg; } @@ -936,8 +948,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job) mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); if (!mn) { - DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer " - "failed for host=%lu\n", __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x7036, + "DMA alloc failed for fw buffer.\n"); rval = -ENOMEM; goto done_free_fw_buf; } @@ -965,15 +977,15 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job) rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); if (rval) { - DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " - "request 84xx updatefw failed\n", vha->host_no)); + ql_log(ql_log_warn, vha, 0x7037, + "Vendor request 84xx updatefw failed.\n"); rval = bsg_job->reply->reply_payload_rcv_len = 0; bsg_job->reply->result = (DID_ERROR << 16); } else { - DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " - "request 84xx updatefw completed\n", vha->host_no)); + ql_dbg(ql_dbg_user, vha, 0x7038, + "Vendor request 84xx updatefw completed.\n"); bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_job->reply->result = DID_OK; @@ -1009,27 +1021,30 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || - test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) + test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { + ql_log(ql_log_warn, vha, 0x7039, + "Abort active or needed.\n"); return -EBUSY; + } if (!IS_QLA84XX(ha)) { - DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, " - "exiting.\n", vha->host_no)); + ql_log(ql_log_warn, vha, 0x703a, + "Not 84xx, exiting.\n"); return -EINVAL; } ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request + sizeof(struct fc_bsg_request)); if (!ql84_mgmt) { - DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n", - __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x703b, + "MGMT header not provided, exiting.\n"); return -EINVAL; } mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); if (!mn) { - DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer " - "failed for host=%lu\n", __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x703c, + "DMA alloc failed for fw buffer.\n"); return -ENOMEM; } @@ -1044,6 +1059,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); if (!sg_cnt) { + ql_log(ql_log_warn, vha, 0x703d, + "dma_map_sg returned %d for reply.\n", sg_cnt); rval = -ENOMEM; goto exit_mgmt; } @@ -1051,10 +1068,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) dma_direction = DMA_FROM_DEVICE; if (sg_cnt != bsg_job->reply_payload.sg_cnt) { - DEBUG2(printk(KERN_INFO - "dma mapping resulted in different sg counts " - "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n", - bsg_job->reply_payload.sg_cnt, sg_cnt)); + ql_log(ql_log_warn, vha, 0x703e, + "DMA mapping resulted in different sg counts, " + "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", + bsg_job->reply_payload.sg_cnt, sg_cnt); rval = -EAGAIN; goto done_unmap_sg; } @@ -1064,9 +1081,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, &mgmt_dma, GFP_KERNEL); if (!mgmt_b) { - DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b " - "failed for host=%lu\n", - __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x703f, + "DMA alloc failed for mgmt_b.\n"); rval = -ENOMEM; goto done_unmap_sg; } @@ -1094,6 +1110,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!sg_cnt) { + ql_log(ql_log_warn, vha, 0x7040, + "dma_map_sg returned %d.\n", sg_cnt); rval = -ENOMEM; goto exit_mgmt; } @@ -1101,10 +1119,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) dma_direction = DMA_TO_DEVICE; if (sg_cnt != bsg_job->request_payload.sg_cnt) { - DEBUG2(printk(KERN_INFO - "dma mapping resulted in different sg counts " - "request_sg_cnt: %x dma_request_sg_cnt: %x ", - bsg_job->request_payload.sg_cnt, sg_cnt)); + ql_log(ql_log_warn, vha, 0x7041, + "DMA mapping resulted in different sg counts, " + "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", + bsg_job->request_payload.sg_cnt, sg_cnt); rval = -EAGAIN; goto done_unmap_sg; } @@ -1113,9 +1131,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, &mgmt_dma, GFP_KERNEL); if (!mgmt_b) { - DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b " - "failed for host=%lu\n", - __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x7042, + "DMA alloc failed for mgmt_b.\n"); rval = -ENOMEM; goto done_unmap_sg; } @@ -1156,15 +1173,15 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); if (rval) { - DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " - "request 84xx mgmt failed\n", vha->host_no)); + ql_log(ql_log_warn, vha, 0x7043, + "Vendor request 84xx mgmt failed.\n"); rval = bsg_job->reply->reply_payload_rcv_len = 0; bsg_job->reply->result = (DID_ERROR << 16); } else { - DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " - "request 84xx mgmt completed\n", vha->host_no)); + ql_dbg(ql_dbg_user, vha, 0x7044, + "Vendor request 84xx mgmt completed.\n"); bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_job->reply->result = DID_OK; @@ -1204,7 +1221,6 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) { struct Scsi_Host *host = bsg_job->shost; scsi_qla_host_t *vha = shost_priv(host); - struct qla_hw_data *ha = vha->hw; int rval = 0; struct qla_port_param *port_param = NULL; fc_port_t *fcport = NULL; @@ -1215,26 +1231,27 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || - test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) + test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { + ql_log(ql_log_warn, vha, 0x7045, "abort active or needed.\n"); return -EBUSY; + } if (!IS_IIDMA_CAPABLE(vha->hw)) { - DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not " - "supported\n", __func__, vha->host_no)); + ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); return -EINVAL; } port_param = (struct qla_port_param *)((char *)bsg_job->request + sizeof(struct fc_bsg_request)); if (!port_param) { - DEBUG2(printk("%s(%ld): port_param header not provided, " - "exiting.\n", __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x7047, + "port_param header not provided.\n"); return -EINVAL; } if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { - DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n", - __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x7048, + "Invalid destination type.\n"); return -EINVAL; } @@ -1249,21 +1266,20 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) } if (!fcport) { - DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n", - __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x7049, + "Failed to find port.\n"); return -EINVAL; } if (atomic_read(&fcport->state) != FCS_ONLINE) { - DEBUG2(printk(KERN_ERR "%s(%ld): Port not online\n", - __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x704a, + "Port is not online.\n"); return -EINVAL; } if (fcport->flags & FCF_LOGIN_NEEDED) { - DEBUG2(printk(KERN_ERR "%s(%ld): Remote port not logged in, " - "flags = 0x%x\n", - __func__, vha->host_no, fcport->flags)); + ql_log(ql_log_warn, vha, 0x704b, + "Remote port not logged in flags = 0x%x.\n", fcport->flags); return -EINVAL; } @@ -1275,15 +1291,13 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) &port_param->speed, mb); if (rval) { - DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for " - "%02x%02x%02x%02x%02x%02x%02x%02x -- " - "%04x %x %04x %04x.\n", - vha->host_no, fcport->port_name[0], - fcport->port_name[1], - fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], - fcport->port_name[6], fcport->port_name[7], rval, - fcport->fp_speed, mb[0], mb[1])); + ql_log(ql_log_warn, vha, 0x704c, + "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- " + "%04x %x %04x %04x.\n", fcport->port_name[0], + fcport->port_name[1], fcport->port_name[2], + fcport->port_name[3], fcport->port_name[4], + fcport->port_name[5], fcport->port_name[6], + fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]); rval = 0; bsg_job->reply->result = (DID_ERROR << 16); @@ -1307,11 +1321,12 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) } static int -qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha, +qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha, uint8_t is_update) { uint32_t start = 0; int valid = 0; + struct qla_hw_data *ha = vha->hw; bsg_job->reply->reply_payload_rcv_len = 0; @@ -1319,14 +1334,20 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha, return -EINVAL; start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; - if (start > ha->optrom_size) + if (start > ha->optrom_size) { + ql_log(ql_log_warn, vha, 0x7055, + "start %d > optrom_size %d.\n", start, ha->optrom_size); return -EINVAL; + } - if (ha->optrom_state != QLA_SWAITING) + if (ha->optrom_state != QLA_SWAITING) { + ql_log(ql_log_info, vha, 0x7056, + "optrom_state %d.\n", ha->optrom_state); return -EBUSY; + } ha->optrom_region_start = start; - + ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update); if (is_update) { if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) valid = 1; @@ -1337,9 +1358,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha, IS_QLA8XXX_TYPE(ha)) valid = 1; if (!valid) { - qla_printk(KERN_WARNING, ha, - "Invalid start region 0x%x/0x%x.\n", - start, bsg_job->request_payload.payload_len); + ql_log(ql_log_warn, vha, 0x7058, + "Invalid start region 0x%x/0x%x.\n", start, + bsg_job->request_payload.payload_len); return -EINVAL; } @@ -1358,9 +1379,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha, ha->optrom_buffer = vmalloc(ha->optrom_region_size); if (!ha->optrom_buffer) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x7059, "Read: Unable to allocate memory for optrom retrieval " - "(%x).\n", ha->optrom_region_size); + "(%x)\n", ha->optrom_region_size); ha->optrom_state = QLA_SWAITING; return -ENOMEM; @@ -1378,7 +1399,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job) struct qla_hw_data *ha = vha->hw; int rval = 0; - rval = qla2x00_optrom_setup(bsg_job, ha, 0); + rval = qla2x00_optrom_setup(bsg_job, vha, 0); if (rval) return rval; @@ -1406,7 +1427,7 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job) struct qla_hw_data *ha = vha->hw; int rval = 0; - rval = qla2x00_optrom_setup(bsg_job, ha, 1); + rval = qla2x00_optrom_setup(bsg_job, vha, 1); if (rval) return rval; @@ -1464,6 +1485,23 @@ int qla24xx_bsg_request(struct fc_bsg_job *bsg_job) { int ret = -EINVAL; + struct fc_rport *rport; + fc_port_t *fcport = NULL; + struct Scsi_Host *host; + scsi_qla_host_t *vha; + + if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { + rport = bsg_job->rport; + fcport = *(fc_port_t **) rport->dd_data; + host = rport_to_shost(rport); + vha = shost_priv(host); + } else { + host = bsg_job->shost; + vha = shost_priv(host); + } + + ql_dbg(ql_dbg_user, vha, 0x7000, + "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode); switch (bsg_job->request->msgcode) { case FC_BSG_RPT_ELS: @@ -1480,7 +1518,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job) case FC_BSG_HST_DEL_RPORT: case FC_BSG_RPT_CT: default: - DEBUG2(printk("qla2xxx: unsupported BSG request\n")); + ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); break; } return ret; @@ -1514,17 +1552,15 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) && (sp_bsg->u.bsg_job == bsg_job)) { spin_unlock_irqrestore(&ha->hardware_lock, flags); if (ha->isp_ops->abort_command(sp)) { - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld): mbx " - "abort_command failed\n", - vha->host_no)); + ql_log(ql_log_warn, vha, 0x7089, + "mbx abort_command " + "failed.\n"); bsg_job->req->errors = bsg_job->reply->result = -EIO; } else { - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld): mbx " - "abort_command success\n", - vha->host_no)); + ql_dbg(ql_dbg_user, vha, 0x708a, + "mbx abort_command " + "success.\n"); bsg_job->req->errors = bsg_job->reply->result = 0; } @@ -1535,8 +1571,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) } } spin_unlock_irqrestore(&ha->hardware_lock, flags); - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld) SRB not found to abort\n", vha->host_no)); + ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); bsg_job->req->errors = bsg_job->reply->result = -ENXIO; return 0; diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index c53719a9a74..2155071f310 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -4,10 +4,36 @@ * * See LICENSE.qla2xxx for copyright and licensing details. */ + +/* + * Table for showing the current message id in use for particular level + * Change this table for addition of log/debug messages. + * ----------------------------------------------------- + * | Level | Last Value Used | + * ----------------------------------------------------- + * | Module Init and Probe | 0x0116 | + * | Mailbox commands | 0x111e | + * | Device Discovery | 0x2083 | + * | Queue Command and IO tracing | 0x302e | + * | DPC Thread | 0x401c | + * | Async Events | 0x5059 | + * | Timer Routines | 0x600d | + * | User Space Interactions | 0x709c | + * | Task Management | 0x8043 | + * | AER/EEH | 0x900f | + * | Virtual Port | 0xa007 | + * | ISP82XX Specific | 0xb027 | + * | MultiQ | 0xc00b | + * | Misc | 0xd00b | + * ----------------------------------------------------- + */ + #include "qla_def.h" #include <linux/delay.h> +static uint32_t ql_dbg_offset = 0x800; + static inline void qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) { @@ -383,11 +409,11 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) struct qla_hw_data *ha = vha->hw; if (rval != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Failed to dump firmware (%x)!!!\n", rval); + ql_log(ql_log_warn, vha, 0xd000, + "Failed to dump firmware (%x).\n", rval); ha->fw_dumped = 0; } else { - qla_printk(KERN_INFO, ha, + ql_log(ql_log_info, vha, 0xd001, "Firmware dump saved to temp buffer (%ld/%p).\n", vha->host_no, ha->fw_dump); ha->fw_dumped = 1; @@ -419,15 +445,16 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); if (!ha->fw_dump) { - qla_printk(KERN_WARNING, ha, - "No buffer available for dump!!!\n"); + ql_log(ql_log_warn, vha, 0xd002, + "No buffer available for dump.\n"); goto qla2300_fw_dump_failed; } if (ha->fw_dumped) { - qla_printk(KERN_WARNING, ha, - "Firmware has been previously dumped (%p) -- ignoring " - "request...\n", ha->fw_dump); + ql_log(ql_log_warn, vha, 0xd003, + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", + ha->fw_dump); goto qla2300_fw_dump_failed; } fw = &ha->fw_dump->isp.isp23; @@ -582,15 +609,16 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); if (!ha->fw_dump) { - qla_printk(KERN_WARNING, ha, - "No buffer available for dump!!!\n"); + ql_log(ql_log_warn, vha, 0xd004, + "No buffer available for dump.\n"); goto qla2100_fw_dump_failed; } if (ha->fw_dumped) { - qla_printk(KERN_WARNING, ha, - "Firmware has been previously dumped (%p) -- ignoring " - "request...\n", ha->fw_dump); + ql_log(ql_log_warn, vha, 0xd005, + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", + ha->fw_dump); goto qla2100_fw_dump_failed; } fw = &ha->fw_dump->isp.isp21; @@ -779,15 +807,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); if (!ha->fw_dump) { - qla_printk(KERN_WARNING, ha, - "No buffer available for dump!!!\n"); + ql_log(ql_log_warn, vha, 0xd006, + "No buffer available for dump.\n"); goto qla24xx_fw_dump_failed; } if (ha->fw_dumped) { - qla_printk(KERN_WARNING, ha, - "Firmware has been previously dumped (%p) -- ignoring " - "request...\n", ha->fw_dump); + ql_log(ql_log_warn, vha, 0xd007, + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", + ha->fw_dump); goto qla24xx_fw_dump_failed; } fw = &ha->fw_dump->isp.isp24; @@ -1017,15 +1046,16 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); if (!ha->fw_dump) { - qla_printk(KERN_WARNING, ha, - "No buffer available for dump!!!\n"); + ql_log(ql_log_warn, vha, 0xd008, + "No buffer available for dump.\n"); goto qla25xx_fw_dump_failed; } if (ha->fw_dumped) { - qla_printk(KERN_WARNING, ha, - "Firmware has been previously dumped (%p) -- ignoring " - "request...\n", ha->fw_dump); + ql_log(ql_log_warn, vha, 0xd009, + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", + ha->fw_dump); goto qla25xx_fw_dump_failed; } fw = &ha->fw_dump->isp.isp25; @@ -1328,15 +1358,16 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); if (!ha->fw_dump) { - qla_printk(KERN_WARNING, ha, - "No buffer available for dump!!!\n"); + ql_log(ql_log_warn, vha, 0xd00a, + "No buffer available for dump.\n"); goto qla81xx_fw_dump_failed; } if (ha->fw_dumped) { - qla_printk(KERN_WARNING, ha, - "Firmware has been previously dumped (%p) -- ignoring " - "request...\n", ha->fw_dump); + ql_log(ql_log_warn, vha, 0xd00b, + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", + ha->fw_dump); goto qla81xx_fw_dump_failed; } fw = &ha->fw_dump->isp.isp81; @@ -1619,106 +1650,255 @@ qla81xx_fw_dump_failed: /****************************************************************************/ /* Driver Debug Functions. */ /****************************************************************************/ - +/* + * This function is for formatting and logging debug information. + * It is to be used when vha is available. It formats the message + * and logs it to the messages file. + * parameters: + * level: The level of the debug messages to be printed. + * If ql2xextended_error_logging value is correctly set, + * this message will appear in the messages file. + * vha: Pointer to the scsi_qla_host_t. + * id: This is a unique identifier for the level. It identifies the + * part of the code from where the message originated. + * msg: The message to be displayed. + */ void -qla2x00_dump_regs(scsi_qla_host_t *vha) -{ - int i; - struct qla_hw_data *ha = vha->hw; - struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; - struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; - uint16_t __iomem *mbx_reg; +ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) { + + char pbuf[QL_DBG_BUF_LEN]; + va_list ap; + uint32_t len; + struct pci_dev *pdev = NULL; + + memset(pbuf, 0, QL_DBG_BUF_LEN); + + va_start(ap, msg); + + if ((level & ql2xextended_error_logging) == level) { + if (vha != NULL) { + pdev = vha->hw->pdev; + /* <module-name> <pci-name> <msg-id>:<host> Message */ + sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR, + dev_name(&(pdev->dev)), id + ql_dbg_offset, + vha->host_no); + } else + sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR, + "0000:00:00.0", id + ql_dbg_offset); + + len = strlen(pbuf); + vsprintf(pbuf+len, msg, ap); + pr_warning("%s", pbuf); + } - mbx_reg = IS_FWI2_CAPABLE(ha) ? ®24->mailbox0: - MAILBOX_REG(ha, reg, 0); + va_end(ap); - printk("Mailbox registers:\n"); - for (i = 0; i < 6; i++) - printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i, - RD_REG_WORD(mbx_reg++)); } - +/* + * This function is for formatting and logging debug information. + * It is to be used when vha is not available and pci is availble, + * i.e., before host allocation. It formats the message and logs it + * to the messages file. + * parameters: + * level: The level of the debug messages to be printed. + * If ql2xextended_error_logging value is correctly set, + * this message will appear in the messages file. + * pdev: Pointer to the struct pci_dev. + * id: This is a unique id for the level. It identifies the part + * of the code from where the message originated. + * msg: The message to be displayed. + */ void -qla2x00_dump_buffer(uint8_t * b, uint32_t size) -{ - uint32_t cnt; - uint8_t c; +ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) { - printk(" 0 1 2 3 4 5 6 7 8 9 " - "Ah Bh Ch Dh Eh Fh\n"); - printk("----------------------------------------" - "----------------------\n"); - - for (cnt = 0; cnt < size;) { - c = *b++; - printk("%02x",(uint32_t) c); - cnt++; - if (!(cnt % 16)) - printk("\n"); - else - printk(" "); + char pbuf[QL_DBG_BUF_LEN]; + va_list ap; + uint32_t len; + + if (pdev == NULL) + return; + + memset(pbuf, 0, QL_DBG_BUF_LEN); + + va_start(ap, msg); + + if ((level & ql2xextended_error_logging) == level) { + /* <module-name> <dev-name>:<msg-id> Message */ + sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR, + dev_name(&(pdev->dev)), id + ql_dbg_offset); + + len = strlen(pbuf); + vsprintf(pbuf+len, msg, ap); + pr_warning("%s", pbuf); } - if (cnt % 16) - printk("\n"); + + va_end(ap); + } +/* + * This function is for formatting and logging log messages. + * It is to be used when vha is available. It formats the message + * and logs it to the messages file. All the messages will be logged + * irrespective of value of ql2xextended_error_logging. + * parameters: + * level: The level of the log messages to be printed in the + * messages file. + * vha: Pointer to the scsi_qla_host_t + * id: This is a unique id for the level. It identifies the + * part of the code from where the message originated. + * msg: The message to be displayed. + */ void -qla2x00_dump_buffer_zipped(uint8_t *b, uint32_t size) -{ - uint32_t cnt; - uint8_t c; - uint8_t last16[16], cur16[16]; - uint32_t lc = 0, num_same16 = 0, j; +ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) { - printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 " - "Ah Bh Ch Dh Eh Fh\n"); - printk(KERN_DEBUG "----------------------------------------" - "----------------------\n"); + char pbuf[QL_DBG_BUF_LEN]; + va_list ap; + uint32_t len; + struct pci_dev *pdev = NULL; - for (cnt = 0; cnt < size;) { - c = *b++; + memset(pbuf, 0, QL_DBG_BUF_LEN); - cur16[lc++] = c; + va_start(ap, msg); - cnt++; - if (cnt % 16) - continue; - - /* We have 16 now */ - lc = 0; - if (num_same16 == 0) { - memcpy(last16, cur16, 16); - num_same16++; - continue; + if (level <= ql_errlev) { + if (vha != NULL) { + pdev = vha->hw->pdev; + /* <module-name> <msg-id>:<host> Message */ + sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR, + dev_name(&(pdev->dev)), id, vha->host_no); + } else + sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR, + "0000:00:00.0", id); + + len = strlen(pbuf); + vsprintf(pbuf+len, msg, ap); + + switch (level) { + case 0: /* FATAL LOG */ + pr_crit("%s", pbuf); + break; + case 1: + pr_err("%s", pbuf); + break; + case 2: + pr_warn("%s", pbuf); + break; + default: + pr_info("%s", pbuf); + break; } - if (memcmp(cur16, last16, 16) == 0) { - num_same16++; - continue; + } + + va_end(ap); +} + +/* + * This function is for formatting and logging log messages. + * It is to be used when vha is not available and pci is availble, + * i.e., before host allocation. It formats the message and logs + * it to the messages file. All the messages are logged irrespective + * of the value of ql2xextended_error_logging. + * parameters: + * level: The level of the log messages to be printed in the + * messages file. + * pdev: Pointer to the struct pci_dev. + * id: This is a unique id for the level. It identifies the + * part of the code from where the message originated. + * msg: The message to be displayed. + */ +void +ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) { + + char pbuf[QL_DBG_BUF_LEN]; + va_list ap; + uint32_t len; + + if (pdev == NULL) + return; + + memset(pbuf, 0, QL_DBG_BUF_LEN); + + va_start(ap, msg); + + if (level <= ql_errlev) { + /* <module-name> <dev-name>:<msg-id> Message */ + sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR, + dev_name(&(pdev->dev)), id); + + len = strlen(pbuf); + vsprintf(pbuf+len, msg, ap); + switch (level) { + case 0: /* FATAL LOG */ + pr_crit("%s", pbuf); + break; + case 1: + pr_err("%s", pbuf); + break; + case 2: + pr_warn("%s", pbuf); + break; + default: + pr_info("%s", pbuf); + break; } - for (j = 0; j < 16; j++) - printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]); - printk(KERN_DEBUG "\n"); - - if (num_same16 > 1) - printk(KERN_DEBUG "> prev pattern repeats (%u)" - "more times\n", num_same16-1); - memcpy(last16, cur16, 16); - num_same16 = 1; } - if (num_same16) { - for (j = 0; j < 16; j++) - printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]); - printk(KERN_DEBUG "\n"); + va_end(ap); +} - if (num_same16 > 1) - printk(KERN_DEBUG "> prev pattern repeats (%u)" - "more times\n", num_same16-1); +void +ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id) +{ + int i; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; + struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; + uint16_t __iomem *mbx_reg; + + if ((level & ql2xextended_error_logging) == level) { + + if (IS_QLA82XX(ha)) + mbx_reg = ®82->mailbox_in[0]; + else if (IS_FWI2_CAPABLE(ha)) + mbx_reg = ®24->mailbox0; + else + mbx_reg = MAILBOX_REG(ha, reg, 0); + + ql_dbg(level, vha, id, "Mailbox registers:\n"); + for (i = 0; i < 6; i++) + ql_dbg(level, vha, id, + "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++)); } - if (lc) { - for (j = 0; j < lc; j++) - printk(KERN_DEBUG "%02x ", (uint32_t)cur16[j]); - printk(KERN_DEBUG "\n"); +} + + +void +ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id, + uint8_t *b, uint32_t size) +{ + uint32_t cnt; + uint8_t c; + if ((level & ql2xextended_error_logging) == level) { + + ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 " + "9 Ah Bh Ch Dh Eh Fh\n"); + ql_dbg(level, vha, id, "----------------------------------" + "----------------------------\n"); + + ql_dbg(level, vha, id, ""); + for (cnt = 0; cnt < size;) { + c = *b++; + printk("%02x", (uint32_t) c); + cnt++; + if (!(cnt % 16)) + printk("\n"); + else + printk(" "); + } + if (cnt % 16) + ql_dbg(level, vha, id, "\n"); } } diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 930414541ec..98a377b9901 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -8,146 +8,6 @@ #include "qla_def.h" /* - * Driver debug definitions. - */ -/* #define QL_DEBUG_LEVEL_1 */ /* Output register accesses to COM1 */ -/* #define QL_DEBUG_LEVEL_2 */ /* Output error msgs to COM1 */ -/* #define QL_DEBUG_LEVEL_3 */ /* Output function trace msgs to COM1 */ -/* #define QL_DEBUG_LEVEL_4 */ /* Output NVRAM trace msgs to COM1 */ -/* #define QL_DEBUG_LEVEL_5 */ /* Output ring trace msgs to COM1 */ -/* #define QL_DEBUG_LEVEL_6 */ /* Output WATCHDOG timer trace to COM1 */ -/* #define QL_DEBUG_LEVEL_7 */ /* Output RISC load trace msgs to COM1 */ -/* #define QL_DEBUG_LEVEL_8 */ /* Output ring saturation msgs to COM1 */ -/* #define QL_DEBUG_LEVEL_9 */ /* Output IOCTL trace msgs */ -/* #define QL_DEBUG_LEVEL_10 */ /* Output IOCTL error msgs */ -/* #define QL_DEBUG_LEVEL_11 */ /* Output Mbx Cmd trace msgs */ -/* #define QL_DEBUG_LEVEL_12 */ /* Output IP trace msgs */ -/* #define QL_DEBUG_LEVEL_13 */ /* Output fdmi function trace msgs */ -/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ -/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ -/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ -/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ -/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */ - -/* -* Macros use for debugging the driver. -*/ - -#define DEBUG(x) do { if (ql2xextended_error_logging) { x; } } while (0) - -#if defined(QL_DEBUG_LEVEL_1) -#define DEBUG1(x) do {x;} while (0) -#else -#define DEBUG1(x) do {} while (0) -#endif - -#define DEBUG2(x) do { if (ql2xextended_error_logging) { x; } } while (0) -#define DEBUG2_3(x) do { if (ql2xextended_error_logging) { x; } } while (0) -#define DEBUG2_3_11(x) do { if (ql2xextended_error_logging) { x; } } while (0) -#define DEBUG2_9_10(x) do { if (ql2xextended_error_logging) { x; } } while (0) -#define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0) -#define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0) -#define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0) -#define DEBUG2_17(x) do { if (ql2xextended_error_logging) { x; } } while (0) - -#if defined(QL_DEBUG_LEVEL_3) -#define DEBUG3(x) do {x;} while (0) -#define DEBUG3_11(x) do {x;} while (0) -#else -#define DEBUG3(x) do {} while (0) -#endif - -#if defined(QL_DEBUG_LEVEL_4) -#define DEBUG4(x) do {x;} while (0) -#else -#define DEBUG4(x) do {} while (0) -#endif - -#if defined(QL_DEBUG_LEVEL_5) -#define DEBUG5(x) do {x;} while (0) -#else -#define DEBUG5(x) do {} while (0) -#endif - -#if defined(QL_DEBUG_LEVEL_7) -#define DEBUG7(x) do {x;} while (0) -#else -#define DEBUG7(x) do {} while (0) -#endif - -#if defined(QL_DEBUG_LEVEL_9) -#define DEBUG9(x) do {x;} while (0) -#define DEBUG9_10(x) do {x;} while (0) -#else -#define DEBUG9(x) do {} while (0) -#endif - -#if defined(QL_DEBUG_LEVEL_10) -#define DEBUG10(x) do {x;} while (0) -#define DEBUG9_10(x) do {x;} while (0) -#else -#define DEBUG10(x) do {} while (0) - #if !defined(DEBUG9_10) - #define DEBUG9_10(x) do {} while (0) - #endif -#endif - -#if defined(QL_DEBUG_LEVEL_11) -#define DEBUG11(x) do{x;} while(0) -#if !defined(DEBUG3_11) -#define DEBUG3_11(x) do{x;} while(0) -#endif -#else -#define DEBUG11(x) do{} while(0) - #if !defined(QL_DEBUG_LEVEL_3) - #define DEBUG3_11(x) do{} while(0) - #endif -#endif - -#if defined(QL_DEBUG_LEVEL_12) -#define DEBUG12(x) do {x;} while (0) -#else -#define DEBUG12(x) do {} while (0) -#endif - -#if defined(QL_DEBUG_LEVEL_13) -#define DEBUG13(x) do {x;} while (0) -#else -#define DEBUG13(x) do {} while (0) -#endif - -#if defined(QL_DEBUG_LEVEL_14) -#define DEBUG14(x) do {x;} while (0) -#else -#define DEBUG14(x) do {} while (0) -#endif - -#if defined(QL_DEBUG_LEVEL_15) -#define DEBUG15(x) do {x;} while (0) -#else -#define DEBUG15(x) do {} while (0) -#endif - -#if defined(QL_DEBUG_LEVEL_16) -#define DEBUG16(x) do {x;} while (0) -#else -#define DEBUG16(x) do {} while (0) -#endif - -#if defined(QL_DEBUG_LEVEL_17) -#define DEBUG17(x) do {x;} while (0) -#else -#define DEBUG17(x) do {} while (0) -#endif - -#if defined(QL_DEBUG_LEVEL_18) -#define DEBUG18(x) do {if (ql2xextended_error_logging) x; } while (0) -#else -#define DEBUG18(x) do {} while (0) -#endif - - -/* * Firmware Dump structure definition */ @@ -370,3 +230,50 @@ struct qla2xxx_fw_dump { struct qla81xx_fw_dump isp81; } isp; }; + +#define QL_MSGHDR "qla2xxx" + +#define ql_log_fatal 0 /* display fatal errors */ +#define ql_log_warn 1 /* display critical errors */ +#define ql_log_info 2 /* display all recovered errors */ +#define ql_log_all 3 /* This value is only used by ql_errlev. + * No messages will use this value. + * This should be always highest value + * as compared to other log levels. + */ + +extern int ql_errlev; + +void +ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...); +void +ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...); + +void +ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...); +void +ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...); + +/* Debug Levels */ +/* The 0x40000000 is the max value any debug level can have + * as ql2xextended_error_logging is of type signed int + */ +#define ql_dbg_init 0x40000000 /* Init Debug */ +#define ql_dbg_mbx 0x20000000 /* MBX Debug */ +#define ql_dbg_disc 0x10000000 /* Device Discovery Debug */ +#define ql_dbg_io 0x08000000 /* IO Tracing Debug */ +#define ql_dbg_dpc 0x04000000 /* DPC Thead Debug */ +#define ql_dbg_async 0x02000000 /* Async events Debug */ +#define ql_dbg_timer 0x01000000 /* Timer Debug */ +#define ql_dbg_user 0x00800000 /* User Space Interations Debug */ +#define ql_dbg_taskm 0x00400000 /* Task Management Debug */ +#define ql_dbg_aer 0x00200000 /* AER/EEH Debug */ +#define ql_dbg_multiq 0x00100000 /* MultiQ Debug */ +#define ql_dbg_p3p 0x00080000 /* P3P specific Debug */ +#define ql_dbg_vport 0x00040000 /* Virtual Port Debug */ +#define ql_dbg_buffer 0x00020000 /* For dumping the buffer/regs */ +#define ql_dbg_misc 0x00010000 /* For dumping everything that is not + * not covered by upper categories + */ + +#define QL_DBG_BUF_LEN 512 diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index a5a4e1275bf..0b4c2b794c6 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -64,7 +64,7 @@ qla2x00_dfs_fce_open(struct inode *inode, struct file *file) /* Pause tracing to flush FCE buffers. */ rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd); if (rval) - qla_printk(KERN_WARNING, ha, + ql_dbg(ql_dbg_user, vha, 0x705c, "DebugFS: Unable to disable FCE (%d).\n", rval); ha->flags.fce_enabled = 0; @@ -92,7 +92,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file) rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs); if (rval) { - qla_printk(KERN_WARNING, ha, + ql_dbg(ql_dbg_user, vha, 0x700d, "DebugFS: Unable to reinitialize FCE (%d).\n", rval); ha->flags.fce_enabled = 0; } @@ -125,8 +125,8 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha) atomic_set(&qla2x00_dfs_root_count, 0); qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL); if (!qla2x00_dfs_root) { - qla_printk(KERN_NOTICE, ha, - "DebugFS: Unable to create root directory.\n"); + ql_log(ql_log_warn, vha, 0x00f7, + "Unable to create debugfs root directory.\n"); goto out; } @@ -137,8 +137,8 @@ create_dir: mutex_init(&ha->fce_mutex); ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root); if (!ha->dfs_dir) { - qla_printk(KERN_NOTICE, ha, - "DebugFS: Unable to create ha directory.\n"); + ql_log(ql_log_warn, vha, 0x00f8, + "Unable to create debugfs ha directory.\n"); goto out; } @@ -148,8 +148,8 @@ create_nodes: ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, &dfs_fce_ops); if (!ha->dfs_fce) { - qla_printk(KERN_NOTICE, ha, - "DebugFS: Unable to fce node.\n"); + ql_log(ql_log_warn, vha, 0x00f9, + "Unable to create debugfs fce node.\n"); goto out; } out: diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 0b381224ae4..29b1a3e2823 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -185,7 +185,7 @@ extern int qla24xx_start_scsi(srb_t *sp); int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, uint16_t, uint16_t, uint8_t); extern int qla2x00_start_sp(srb_t *); -extern uint16_t qla24xx_calc_iocbs(uint16_t); +extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t); extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); extern int qla24xx_dif_start_scsi(srb_t *); @@ -439,6 +439,9 @@ extern void qla81xx_fw_dump(scsi_qla_host_t *, int); extern void qla2x00_dump_regs(scsi_qla_host_t *); extern void qla2x00_dump_buffer(uint8_t *, uint32_t); extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t); +extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t); +extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t, + uint8_t *, uint32_t); /* * Global Function Prototypes in qla_gs.c source file. @@ -478,7 +481,8 @@ extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16 extern int qla2x00_echo_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *); extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *); -extern int qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *, uint8_t); +extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *, + struct qla_fcp_prio_cfg *, uint8_t); /* * Global Function Prototypes in qla_dfs.c source file. diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 8cd9066ad90..37937aa3c3b 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -121,11 +121,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, rval = QLA_FUNCTION_FAILED; if (ms_pkt->entry_status != 0) { - DEBUG2_3(printk(KERN_WARNING "scsi(%ld): %s failed, error status " - "(%x) on port_id: %02x%02x%02x.\n", - vha->host_no, routine, ms_pkt->entry_status, - vha->d_id.b.domain, vha->d_id.b.area, - vha->d_id.b.al_pa)); + ql_dbg(ql_dbg_disc, vha, 0x2031, + "%s failed, error status (%x) on port_id: %02x%02x%02x.\n", + routine, ms_pkt->entry_status, vha->d_id.b.domain, + vha->d_id.b.area, vha->d_id.b.al_pa); } else { if (IS_FWI2_CAPABLE(ha)) comp_status = le16_to_cpu( @@ -138,24 +137,24 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, case CS_DATA_OVERRUN: /* Overrun? */ if (ct_rsp->header.response != __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { - DEBUG2_3(printk("scsi(%ld): %s failed, " - "rejected request on port_id: %02x%02x%02x\n", - vha->host_no, routine, + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077, + "%s failed rejected request on port_id: " + "%02x%02x%02x.\n", routine, vha->d_id.b.domain, vha->d_id.b.area, - vha->d_id.b.al_pa)); - DEBUG2_3(qla2x00_dump_buffer( - (uint8_t *)&ct_rsp->header, - sizeof(struct ct_rsp_hdr))); + vha->d_id.b.al_pa); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, + 0x2078, (uint8_t *)&ct_rsp->header, + sizeof(struct ct_rsp_hdr)); rval = QLA_INVALID_COMMAND; } else rval = QLA_SUCCESS; break; default: - DEBUG2_3(printk("scsi(%ld): %s failed, completion " - "status (%x) on port_id: %02x%02x%02x.\n", - vha->host_no, routine, comp_status, + ql_dbg(ql_dbg_disc, vha, 0x2033, + "%s failed, completion status (%x) on port_id: " + "%02x%02x%02x.\n", routine, comp_status, vha->d_id.b.domain, vha->d_id.b.area, - vha->d_id.b.al_pa)); + vha->d_id.b.al_pa); break; } } @@ -202,8 +201,8 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x2062, + "GA_NXT issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; @@ -222,11 +221,10 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) fcport->d_id.b.domain = 0xf0; - DEBUG2_3(printk("scsi(%ld): GA_NXT entry - " - "nn %02x%02x%02x%02x%02x%02x%02x%02x " + ql_dbg(ql_dbg_disc, vha, 0x2063, + "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x " "pn %02x%02x%02x%02x%02x%02x%02x%02x " - "portid=%02x%02x%02x.\n", - vha->host_no, + "port_id=%02x%02x%02x.\n", fcport->node_name[0], fcport->node_name[1], fcport->node_name[2], fcport->node_name[3], fcport->node_name[4], fcport->node_name[5], @@ -236,7 +234,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], fcport->port_name[7], fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa)); + fcport->d_id.b.al_pa); } return (rval); @@ -287,8 +285,8 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x2055, + "GID_PT issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; @@ -364,8 +362,8 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed " - "(%d).\n", vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x2056, + "GPN_ID issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GPN_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; @@ -424,8 +422,8 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed " - "(%d).\n", vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x2057, + "GNN_ID issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GNN_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; @@ -434,11 +432,10 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) memcpy(list[i].node_name, ct_rsp->rsp.gnn_id.node_name, WWN_SIZE); - DEBUG2_3(printk("scsi(%ld): GID_PT entry - " - "nn %02x%02x%02x%02x%02x%02x%02x%02x " - "pn %02x%02x%02x%02x%02x%02x%02x%02x " + ql_dbg(ql_dbg_disc, vha, 0x2058, + "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x " + "pn %02x%02x%02x%02x%02x%02x%02X%02x " "portid=%02x%02x%02x.\n", - vha->host_no, list[i].node_name[0], list[i].node_name[1], list[i].node_name[2], list[i].node_name[3], list[i].node_name[4], list[i].node_name[5], @@ -448,7 +445,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) list[i].port_name[4], list[i].port_name[5], list[i].port_name[6], list[i].port_name[7], list[i].d_id.b.domain, list[i].d_id.b.area, - list[i].d_id.b.al_pa)); + list[i].d_id.b.al_pa); } /* Last device exit. */ @@ -499,14 +496,14 @@ qla2x00_rft_id(scsi_qla_host_t *vha) sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x2043, + "RFT_ID issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { - DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2044, + "RFT_ID exiting normally.\n"); } return (rval); @@ -528,8 +525,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha) struct ct_sns_rsp *ct_rsp; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { - DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on " - "ISP2100/ISP2200.\n", vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2046, + "RFF_ID call not supported on ISP2100/ISP2200.\n"); return (QLA_SUCCESS); } @@ -556,14 +553,14 @@ qla2x00_rff_id(scsi_qla_host_t *vha) sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x2047, + "RFF_ID issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { - DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2048, + "RFF_ID exiting normally.\n"); } return (rval); @@ -609,14 +606,14 @@ qla2x00_rnn_id(scsi_qla_host_t *vha) sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x204d, + "RNN_ID issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { - DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x204e, + "RNN_ID exiting normally.\n"); } return (rval); @@ -647,8 +644,8 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha) struct ct_sns_rsp *ct_rsp; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { - DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on " - "ISP2100/ISP2200.\n", vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2050, + "RSNN_ID call unsupported on ISP2100/ISP2200.\n"); return (QLA_SUCCESS); } @@ -682,14 +679,14 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha) sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x2051, + "RSNN_NN issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { - DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2052, + "RSNN_NN exiting normally.\n"); } return (rval); @@ -757,13 +754,14 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) sizeof(struct sns_cmd_pkt)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x205f, + "GA_NXT Send SNS failed (%d).\n", rval); } else if (sns_cmd->p.gan_data[8] != 0x80 || sns_cmd->p.gan_data[9] != 0x02) { - DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, " - "ga_nxt_rsp:\n", vha->host_no)); - DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16)); + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d, + "GA_NXT failed, rejected request ga_nxt_rsp:\n"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074, + sns_cmd->p.gan_data, 16); rval = QLA_FUNCTION_FAILED; } else { /* Populate fc_port_t entry. */ @@ -778,11 +776,10 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE) fcport->d_id.b.domain = 0xf0; - DEBUG2_3(printk("scsi(%ld): GA_NXT entry - " - "nn %02x%02x%02x%02x%02x%02x%02x%02x " + ql_dbg(ql_dbg_disc, vha, 0x2061, + "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x " "pn %02x%02x%02x%02x%02x%02x%02x%02x " - "portid=%02x%02x%02x.\n", - vha->host_no, + "port_id=%02x%02x%02x.\n", fcport->node_name[0], fcport->node_name[1], fcport->node_name[2], fcport->node_name[3], fcport->node_name[4], fcport->node_name[5], @@ -792,7 +789,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], fcport->port_name[7], fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa)); + fcport->d_id.b.al_pa); } return (rval); @@ -831,13 +828,14 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) sizeof(struct sns_cmd_pkt)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x206d, + "GID_PT Send SNS failed (%d).\n", rval); } else if (sns_cmd->p.gid_data[8] != 0x80 || sns_cmd->p.gid_data[9] != 0x02) { - DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, " - "gid_rsp:\n", vha->host_no)); - DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16)); + ql_dbg(ql_dbg_disc, vha, 0x202f, + "GID_PT failed, rejected request, gid_rsp:\n"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081, + sns_cmd->p.gid_data, 16); rval = QLA_FUNCTION_FAILED; } else { /* Set port IDs in switch info list. */ @@ -900,13 +898,14 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed " - "(%d).\n", vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x2032, + "GPN_ID Send SNS failed (%d).\n", rval); } else if (sns_cmd->p.gpn_data[8] != 0x80 || sns_cmd->p.gpn_data[9] != 0x02) { - DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected " - "request, gpn_rsp:\n", vha->host_no)); - DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16)); + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e, + "GPN_ID failed, rejected request, gpn_rsp:\n"); + ql_dump_buffer(ql_dbg_disc, vha, 0x207f, + sns_cmd->p.gpn_data, 16); rval = QLA_FUNCTION_FAILED; } else { /* Save portname */ @@ -955,24 +954,24 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed " - "(%d).\n", vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x203f, + "GNN_ID Send SNS failed (%d).\n", rval); } else if (sns_cmd->p.gnn_data[8] != 0x80 || sns_cmd->p.gnn_data[9] != 0x02) { - DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected " - "request, gnn_rsp:\n", vha->host_no)); - DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16)); + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082, + "GNN_ID failed, rejected request, gnn_rsp:\n"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a, + sns_cmd->p.gnn_data, 16); rval = QLA_FUNCTION_FAILED; } else { /* Save nodename */ memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16], WWN_SIZE); - DEBUG2_3(printk("scsi(%ld): GID_PT entry - " - "nn %02x%02x%02x%02x%02x%02x%02x%02x " + ql_dbg(ql_dbg_disc, vha, 0x206e, + "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x " "pn %02x%02x%02x%02x%02x%02x%02x%02x " - "portid=%02x%02x%02x.\n", - vha->host_no, + "port_id=%02x%02x%02x.\n", list[i].node_name[0], list[i].node_name[1], list[i].node_name[2], list[i].node_name[3], list[i].node_name[4], list[i].node_name[5], @@ -982,7 +981,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) list[i].port_name[4], list[i].port_name[5], list[i].port_name[6], list[i].port_name[7], list[i].d_id.b.domain, list[i].d_id.b.area, - list[i].d_id.b.al_pa)); + list[i].d_id.b.al_pa); } /* Last device exit. */ @@ -1025,17 +1024,18 @@ qla2x00_sns_rft_id(scsi_qla_host_t *vha) sizeof(struct sns_cmd_pkt)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x2060, + "RFT_ID Send SNS failed (%d).\n", rval); } else if (sns_cmd->p.rft_data[8] != 0x80 || sns_cmd->p.rft_data[9] != 0x02) { - DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, " - "rft_rsp:\n", vha->host_no)); - DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16)); + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083, + "RFT_ID failed, rejected request rft_rsp:\n"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080, + sns_cmd->p.rft_data, 16); rval = QLA_FUNCTION_FAILED; } else { - DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2073, + "RFT_ID exiting normally.\n"); } return (rval); @@ -1081,17 +1081,18 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha) sizeof(struct sns_cmd_pkt)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x204a, + "RNN_ID Send SNS failed (%d).\n", rval); } else if (sns_cmd->p.rnn_data[8] != 0x80 || sns_cmd->p.rnn_data[9] != 0x02) { - DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, " - "rnn_rsp:\n", vha->host_no)); - DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16)); + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b, + "RNN_ID failed, rejected request, rnn_rsp:\n"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c, + sns_cmd->p.rnn_data, 16); rval = QLA_FUNCTION_FAILED; } else { - DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x204c, + "RNN_ID exiting normally.\n"); } return (rval); @@ -1116,10 +1117,10 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha) ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, mb, BIT_1|BIT_0); if (mb[0] != MBS_COMMAND_COMPLETE) { - DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " - "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", - __func__, vha->host_no, vha->mgmt_svr_loop_id, mb[0], mb[1], - mb[2], mb[6], mb[7])); + ql_dbg(ql_dbg_disc, vha, 0x2024, + "Failed management_server login: loopid=%x mb[0]=%x " + "mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n", + vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]); ret = QLA_FUNCTION_FAILED; } else vha->flags.management_server_logged_in = 1; @@ -1292,11 +1293,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE); size += 4 + WWN_SIZE; - DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n", - __func__, vha->host_no, - eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2], - eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5], - eiter->a.node_name[6], eiter->a.node_name[7])); + ql_dbg(ql_dbg_disc, vha, 0x2025, + "NodeName = %02x%02x%02x%02x%02x%02x%02x%02x.\n", + eiter->a.node_name[0], eiter->a.node_name[1], + eiter->a.node_name[2], eiter->a.node_name[3], + eiter->a.node_name[4], eiter->a.node_name[5], + eiter->a.node_name[6], eiter->a.node_name[7]); /* Manufacturer. */ eiter = (struct ct_fdmi_hba_attr *) (entries + size); @@ -1307,8 +1309,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; - DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, vha->host_no, - eiter->a.manufacturer)); + ql_dbg(ql_dbg_disc, vha, 0x2026, + "Manufacturer = %s.\n", eiter->a.manufacturer); /* Serial number. */ eiter = (struct ct_fdmi_hba_attr *) (entries + size); @@ -1320,8 +1322,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; - DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, vha->host_no, - eiter->a.serial_num)); + ql_dbg(ql_dbg_disc, vha, 0x2027, + "Serial no. = %s.\n", eiter->a.serial_num); /* Model name. */ eiter = (struct ct_fdmi_hba_attr *) (entries + size); @@ -1332,8 +1334,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; - DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, vha->host_no, - eiter->a.model)); + ql_dbg(ql_dbg_disc, vha, 0x2028, + "Model Name = %s.\n", eiter->a.model); /* Model description. */ eiter = (struct ct_fdmi_hba_attr *) (entries + size); @@ -1345,8 +1347,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; - DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, vha->host_no, - eiter->a.model_desc)); + ql_dbg(ql_dbg_disc, vha, 0x2029, + "Model Desc = %s.\n", eiter->a.model_desc); /* Hardware version. */ eiter = (struct ct_fdmi_hba_attr *) (entries + size); @@ -1357,8 +1359,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; - DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, vha->host_no, - eiter->a.hw_version)); + ql_dbg(ql_dbg_disc, vha, 0x202a, + "Hardware ver = %s.\n", eiter->a.hw_version); /* Driver version. */ eiter = (struct ct_fdmi_hba_attr *) (entries + size); @@ -1369,8 +1371,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; - DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, vha->host_no, - eiter->a.driver_version)); + ql_dbg(ql_dbg_disc, vha, 0x202b, + "Driver ver = %s.\n", eiter->a.driver_version); /* Option ROM version. */ eiter = (struct ct_fdmi_hba_attr *) (entries + size); @@ -1381,8 +1383,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; - DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, vha->host_no, - eiter->a.orom_version)); + ql_dbg(ql_dbg_disc, vha , 0x202c, + "Optrom vers = %s.\n", eiter->a.orom_version); /* Firmware version */ eiter = (struct ct_fdmi_hba_attr *) (entries + size); @@ -1393,44 +1395,46 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; - DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, vha->host_no, - eiter->a.fw_version)); + ql_dbg(ql_dbg_disc, vha, 0x202d, + "Firmware vers = %s.\n", eiter->a.fw_version); /* Update MS request size. */ qla2x00_update_ms_fdmi_iocb(vha, size + 16); - DEBUG13(printk("%s(%ld): RHBA identifier=" - "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, - vha->host_no, ct_req->req.rhba.hba_identifier[0], + ql_dbg(ql_dbg_disc, vha, 0x202e, + "RHBA identifier = " + "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", + ct_req->req.rhba.hba_identifier[0], ct_req->req.rhba.hba_identifier[1], ct_req->req.rhba.hba_identifier[2], ct_req->req.rhba.hba_identifier[3], ct_req->req.rhba.hba_identifier[4], ct_req->req.rhba.hba_identifier[5], ct_req->req.rhba.hba_identifier[6], - ct_req->req.rhba.hba_identifier[7], size)); - DEBUG13(qla2x00_dump_buffer(entries, size)); + ct_req->req.rhba.hba_identifier[7], size); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076, + entries, size); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x2030, + "RHBA issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && ct_rsp->header.explanation_code == CT_EXPL_ALREADY_REGISTERED) { - DEBUG2_13(printk("%s(%ld): HBA already registered.\n", - __func__, vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2034, + "HBA already registered.\n"); rval = QLA_ALREADY_REGISTERED; } } else { - DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2035, + "RHBA exiting normally.\n"); } return rval; @@ -1464,26 +1468,26 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha) /* Prepare FDMI command arguments -- portname. */ memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE); - DEBUG13(printk("%s(%ld): DHBA portname=" - "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, vha->host_no, + ql_dbg(ql_dbg_disc, vha, 0x2036, + "DHBA portname = %02x%02x%02x%02x%02x%02x%02x%02x.\n", ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1], ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3], ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5], - ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7])); + ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x2037, + "DHBA issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { - DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2038, + "DHBA exiting normally.\n"); } return rval; @@ -1534,9 +1538,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) eiter->a.fc4_types[2] = 0x01; size += 4 + 32; - DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__, - vha->host_no, eiter->a.fc4_types[2], - eiter->a.fc4_types[1])); + ql_dbg(ql_dbg_disc, vha, 0x2039, + "FC4_TYPES=%02x %02x.\n", + eiter->a.fc4_types[2], + eiter->a.fc4_types[1]); /* Supported speed. */ eiter = (struct ct_fdmi_port_attr *) (entries + size); @@ -1561,8 +1566,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) FDMI_PORT_SPEED_1GB); size += 4 + 4; - DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, vha->host_no, - eiter->a.sup_speed)); + ql_dbg(ql_dbg_disc, vha, 0x203a, + "Supported_Speed=%x.\n", eiter->a.sup_speed); /* Current speed. */ eiter = (struct ct_fdmi_port_attr *) (entries + size); @@ -1596,8 +1601,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) } size += 4 + 4; - DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, vha->host_no, - eiter->a.cur_speed)); + ql_dbg(ql_dbg_disc, vha, 0x203b, + "Current_Speed=%x.\n", eiter->a.cur_speed); /* Max frame size. */ eiter = (struct ct_fdmi_port_attr *) (entries + size); @@ -1609,8 +1614,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) eiter->a.max_frame_size = cpu_to_be32(max_frame_size); size += 4 + 4; - DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, vha->host_no, - eiter->a.max_frame_size)); + ql_dbg(ql_dbg_disc, vha, 0x203c, + "Max_Frame_Size=%x.\n", eiter->a.max_frame_size); /* OS device name. */ eiter = (struct ct_fdmi_port_attr *) (entries + size); @@ -1621,8 +1626,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; - DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, vha->host_no, - eiter->a.os_dev_name)); + ql_dbg(ql_dbg_disc, vha, 0x204b, + "OS_Device_Name=%s.\n", eiter->a.os_dev_name); /* Hostname. */ if (strlen(fc_host_system_hostname(vha->host))) { @@ -1637,35 +1642,36 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) eiter->len = cpu_to_be16(4 + alen); size += 4 + alen; - DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__, - vha->host_no, eiter->a.host_name)); + ql_dbg(ql_dbg_disc, vha, 0x203d, + "HostName=%s.\n", eiter->a.host_name); } /* Update MS request size. */ qla2x00_update_ms_fdmi_iocb(vha, size + 16); - DEBUG13(printk("%s(%ld): RPA portname=" - "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, - vha->host_no, ct_req->req.rpa.port_name[0], - ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2], - ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4], - ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6], - ct_req->req.rpa.port_name[7], size)); - DEBUG13(qla2x00_dump_buffer(entries, size)); + ql_dbg(ql_dbg_disc, vha, 0x203e, + "RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n", + ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1], + ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3], + ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5], + ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7], + size); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079, + entries, size); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x2040, + "RPA issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { - DEBUG2(printk("scsi(%ld): RPA exiting normally.\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2041, + "RPA exiting nornally.\n"); } return rval; @@ -1749,8 +1755,8 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB " - "failed (%d).\n", vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x2023, + "GFPN_ID issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GFPN_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; @@ -1860,8 +1866,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB " - "failed (%d).\n", vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x2059, + "GPSC issue IOCB failed (%d).\n", rval); } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GPSC")) != QLA_SUCCESS) { /* FM command unsupported? */ @@ -1870,9 +1876,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) CT_REASON_INVALID_COMMAND_CODE || ct_rsp->header.reason_code == CT_REASON_COMMAND_UNSUPPORTED)) { - DEBUG2(printk("scsi(%ld): GPSC command " - "unsupported, disabling query...\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x205a, + "GPSC command unsupported, disabling " + "query.\n"); ha->flags.gpsc_supported = 0; rval = QLA_FUNCTION_FAILED; break; @@ -1898,9 +1904,10 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) break; } - DEBUG2_3(printk("scsi(%ld): GPSC ext entry - " - "fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x " - "speed=%04x.\n", vha->host_no, + ql_dbg(ql_dbg_disc, vha, 0x205b, + "GPSC ext entry - fpn " + "%02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x " + "speed=%04x.\n", list[i].fabric_port_name[0], list[i].fabric_port_name[1], list[i].fabric_port_name[2], @@ -1910,7 +1917,7 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) list[i].fabric_port_name[6], list[i].fabric_port_name[7], be16_to_cpu(ct_rsp->rsp.gpsc.speeds), - be16_to_cpu(ct_rsp->rsp.gpsc.speed))); + be16_to_cpu(ct_rsp->rsp.gpsc.speed)); } /* Last device exit. */ @@ -1968,14 +1975,12 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { - DEBUG2_3(printk(KERN_INFO - "scsi(%ld): GFF_ID issue IOCB failed " - "(%d).\n", vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x205c, + "GFF_ID issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GFF_ID") != QLA_SUCCESS) { - DEBUG2_3(printk(KERN_INFO - "scsi(%ld): GFF_ID IOCB status had a " - "failure status code\n", vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x205d, + "GFF_ID IOCB status had a failure status code.\n"); } else { fcp_scsi_features = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 920b76bfbb9..def694271bf 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -153,11 +153,10 @@ qla2x00_async_iocb_timeout(srb_t *sp) fc_port_t *fcport = sp->fcport; struct srb_ctx *ctx = sp->ctx; - DEBUG2(printk(KERN_WARNING - "scsi(%ld:%x): Async-%s timeout - portid=%02x%02x%02x.\n", - fcport->vha->host_no, sp->handle, - ctx->name, fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa)); + ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, + "Async-%s timeout - portid=%02x%02x%02x.\n", + ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa); fcport->flags &= ~FCF_ASYNC_SENT; if (ctx->type == SRB_LOGIN_CMD) { @@ -211,11 +210,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, if (rval != QLA_SUCCESS) goto done_free_sp; - DEBUG2(printk(KERN_DEBUG - "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x " - "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, - fcport->login_retry)); + ql_dbg(ql_dbg_disc, vha, 0x2072, + "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n", + fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, fcport->login_retry); return rval; done_free_sp: @@ -259,10 +257,10 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) if (rval != QLA_SUCCESS) goto done_free_sp; - DEBUG2(printk(KERN_DEBUG - "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n", - fcport->vha->host_no, sp->handle, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); + ql_dbg(ql_dbg_disc, vha, 0x2070, + "Async-logout - loop-id=%x portid=%02x%02x%02x.\n", + fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa); return rval; done_free_sp: @@ -309,11 +307,10 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, if (rval != QLA_SUCCESS) goto done_free_sp; - DEBUG2(printk(KERN_DEBUG - "scsi(%ld:%x): Async-adisc - loop-id=%x portid=%02x%02x%02x.\n", - fcport->vha->host_no, sp->handle, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); - + ql_dbg(ql_dbg_disc, vha, 0x206f, + "Async-adisc - loopid=%x portid=%02x%02x%02x.\n", + fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa); return rval; done_free_sp: @@ -362,11 +359,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, if (rval != QLA_SUCCESS) goto done_free_sp; - DEBUG2(printk(KERN_DEBUG - "scsi(%ld:%x): Async-tmf - loop-id=%x portid=%02x%02x%02x.\n", - fcport->vha->host_no, sp->handle, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); - + ql_dbg(ql_dbg_taskm, vha, 0x802f, + "Async-tmf loop-id=%x portid=%02x%02x%02x.\n", + fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa); return rval; done_free_sp: @@ -471,9 +467,8 @@ qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport, flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) { - DEBUG2_3_11(printk(KERN_WARNING - "%s(%ld): TM IOCB failed (%x).\n", - __func__, vha->host_no, rval)); + ql_dbg(ql_dbg_taskm, vha, 0x8030, + "TM IOCB failed (%x).\n", rval); } return; @@ -519,11 +514,12 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) set_bit(0, ha->req_qid_map); set_bit(0, ha->rsp_qid_map); - qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); + ql_log(ql_log_info, vha, 0x0040, + "Configuring PCI space...\n"); rval = ha->isp_ops->pci_config(vha); if (rval) { - DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n", - vha->host_no)); + ql_log(ql_log_warn, vha, 0x0044, + "Unable to configure PCI space.\n"); return (rval); } @@ -531,20 +527,21 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) rval = qla2xxx_get_flash_info(vha); if (rval) { - DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n", - vha->host_no)); + ql_log(ql_log_fatal, vha, 0x004f, + "Unable to validate FLASH data.\n"); return (rval); } ha->isp_ops->get_flash_version(vha, req->ring); - - qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); + ql_log(ql_log_info, vha, 0x0061, + "Configure NVRAM parameters...\n"); ha->isp_ops->nvram_config(vha); if (ha->flags.disable_serdes) { /* Mask HBA via NVRAM settings? */ - qla_printk(KERN_INFO, ha, "Masking HBA WWPN " + ql_log(ql_log_info, vha, 0x0077, + "Masking HBA WWPN " "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n", vha->port_name[0], vha->port_name[1], vha->port_name[2], vha->port_name[3], @@ -553,7 +550,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) return QLA_FUNCTION_FAILED; } - qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); + ql_log(ql_log_info, vha, 0x0078, + "Verifying loaded RISC code...\n"); if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { rval = ha->isp_ops->chip_diag(vha); @@ -567,7 +565,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) if (IS_QLA84XX(ha)) { ha->cs84xx = qla84xx_get_chip(vha); if (!ha->cs84xx) { - qla_printk(KERN_ERR, ha, + ql_log(ql_log_warn, vha, 0x00d0, "Unable to configure ISP84XX.\n"); return QLA_FUNCTION_FAILED; } @@ -579,8 +577,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) /* Issue verify 84xx FW IOCB to complete 84xx initialization */ rval = qla84xx_init_chip(vha); if (rval != QLA_SUCCESS) { - qla_printk(KERN_ERR, ha, - "Unable to initialize ISP84XX.\n"); + ql_log(ql_log_warn, vha, 0x00d4, + "Unable to initialize ISP84XX.\n"); qla84xx_put_chip(vha); } } @@ -797,9 +795,7 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha) rval = QLA_FUNCTION_FAILED; if (ha->flags.disable_risc_code_load) { - DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n", - vha->host_no)); - qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n"); + ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n"); /* Verify checksum of loaded RISC code. */ rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); @@ -810,10 +806,9 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha) } } - if (rval) { - DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n", - vha->host_no)); - } + if (rval) + ql_dbg(ql_dbg_init, vha, 0x007a, + "**** Load RISC code ****.\n"); return (rval); } @@ -1105,8 +1100,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) /* Assume a failed state */ rval = QLA_FUNCTION_FAILED; - DEBUG3(printk("scsi(%ld): Testing device at %lx.\n", - vha->host_no, (u_long)®->flash_address)); + ql_dbg(ql_dbg_init, vha, 0x007b, + "Testing device at %lx.\n", (u_long)®->flash_address); spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1128,8 +1123,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) if (!cnt) goto chip_diag_failed; - DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", - vha->host_no)); + ql_dbg(ql_dbg_init, vha, 0x007c, + "Reset register cleared by chip reset.\n"); /* Reset RISC processor. */ WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); @@ -1150,7 +1145,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) goto chip_diag_failed; /* Check product ID of chip */ - DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no)); + ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n"); mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); @@ -1158,8 +1153,9 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || mb[3] != PROD_ID_3) { - qla_printk(KERN_WARNING, ha, - "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]); + ql_log(ql_log_warn, vha, 0x0062, + "Wrong product ID = 0x%x,0x%x,0x%x.\n", + mb[1], mb[2], mb[3]); goto chip_diag_failed; } @@ -1178,8 +1174,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) if (IS_QLA2200(ha) && RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { /* Limit firmware transfer size with a 2200A */ - DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n", - vha->host_no)); + ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n"); ha->device_type |= DT_ISP2200A; ha->fw_transfer_size = 128; @@ -1188,24 +1183,20 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) /* Wrap Incoming Mailboxes Test. */ spin_unlock_irqrestore(&ha->hardware_lock, flags); - DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no)); + ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n"); rval = qla2x00_mbx_reg_test(vha); - if (rval) { - DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", - vha->host_no)); - qla_printk(KERN_WARNING, ha, - "Failed mailbox send register test\n"); - } - else { + if (rval) + ql_log(ql_log_warn, vha, 0x0080, + "Failed mailbox send register test.\n"); + else /* Flag a successful rval */ rval = QLA_SUCCESS; - } spin_lock_irqsave(&ha->hardware_lock, flags); chip_diag_failed: if (rval) - DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED " - "****\n", vha->host_no)); + ql_log(ql_log_info, vha, 0x0081, + "Chip diagnostics **** FAILED ****.\n"); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -1232,10 +1223,8 @@ qla24xx_chip_diag(scsi_qla_host_t *vha) rval = qla2x00_mbx_reg_test(vha); if (rval) { - DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", - vha->host_no)); - qla_printk(KERN_WARNING, ha, - "Failed mailbox send register test\n"); + ql_log(ql_log_warn, vha, 0x0082, + "Failed mailbox send register test.\n"); } else { /* Flag a successful rval */ rval = QLA_SUCCESS; @@ -1257,8 +1246,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) struct rsp_que *rsp = ha->rsp_q_map[0]; if (ha->fw_dump) { - qla_printk(KERN_WARNING, ha, - "Firmware dump previously allocated.\n"); + ql_dbg(ql_dbg_init, vha, 0x00bd, + "Firmware dump already allocated.\n"); return; } @@ -1288,8 +1277,9 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, GFP_KERNEL); if (!tc) { - qla_printk(KERN_WARNING, ha, "Unable to allocate " - "(%d KB) for FCE.\n", FCE_SIZE / 1024); + ql_log(ql_log_warn, vha, 0x00be, + "Unable to allocate (%d KB) for FCE.\n", + FCE_SIZE / 1024); goto try_eft; } @@ -1297,16 +1287,15 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, ha->fce_mb, &ha->fce_bufs); if (rval) { - qla_printk(KERN_WARNING, ha, "Unable to initialize " - "FCE (%d).\n", rval); + ql_log(ql_log_warn, vha, 0x00bf, + "Unable to initialize FCE (%d).\n", rval); dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma); ha->flags.fce_enabled = 0; goto try_eft; } - - qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n", - FCE_SIZE / 1024); + ql_log(ql_log_info, vha, 0x00c0, + "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024); fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; ha->flags.fce_enabled = 1; @@ -1317,23 +1306,23 @@ try_eft: tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, GFP_KERNEL); if (!tc) { - qla_printk(KERN_WARNING, ha, "Unable to allocate " - "(%d KB) for EFT.\n", EFT_SIZE / 1024); + ql_log(ql_log_warn, vha, 0x00c1, + "Unable to allocate (%d KB) for EFT.\n", + EFT_SIZE / 1024); goto cont_alloc; } memset(tc, 0, EFT_SIZE); rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); if (rval) { - qla_printk(KERN_WARNING, ha, "Unable to initialize " - "EFT (%d).\n", rval); + ql_log(ql_log_warn, vha, 0x00c2, + "Unable to initialize EFT (%d).\n", rval); dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma); goto cont_alloc; } - - qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n", - EFT_SIZE / 1024); + ql_log(ql_log_info, vha, 0x00c3, + "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); eft_size = EFT_SIZE; ha->eft_dma = tc_dma; @@ -1350,8 +1339,9 @@ cont_alloc: ha->fw_dump = vmalloc(dump_size); if (!ha->fw_dump) { - qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for " - "firmware dump!!!\n", dump_size / 1024); + ql_log(ql_log_warn, vha, 0x00c4, + "Unable to allocate (%d KB) for firmware dump.\n", + dump_size / 1024); if (ha->fce) { dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, @@ -1368,8 +1358,8 @@ cont_alloc: } return; } - qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n", - dump_size / 1024); + ql_log(ql_log_info, vha, 0x00c5, + "Allocated (%d KB) for firmware dump.\n", dump_size / 1024); ha->fw_dump_len = dump_size; ha->fw_dump->signature[0] = 'Q'; @@ -1398,23 +1388,21 @@ qla81xx_mpi_sync(scsi_qla_host_t *vha) int rval; uint16_t dc; uint32_t dw; - struct qla_hw_data *ha = vha->hw; if (!IS_QLA81XX(vha->hw)) return QLA_SUCCESS; rval = qla2x00_write_ram_word(vha, 0x7c00, 1); if (rval != QLA_SUCCESS) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "Sync-MPI: Unable to acquire semaphore.\n")); + ql_log(ql_log_warn, vha, 0x0105, + "Unable to acquire semaphore.\n"); goto done; } pci_read_config_word(vha->hw->pdev, 0x54, &dc); rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); if (rval != QLA_SUCCESS) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "Sync-MPI: Unable to read sync.\n")); + ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n"); goto done_release; } @@ -1426,15 +1414,14 @@ qla81xx_mpi_sync(scsi_qla_host_t *vha) dw |= dc; rval = qla2x00_write_ram_word(vha, 0x7a15, dw); if (rval != QLA_SUCCESS) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "Sync-MPI: Unable to gain sync.\n")); + ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n"); } done_release: rval = qla2x00_write_ram_word(vha, 0x7c00, 0); if (rval != QLA_SUCCESS) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "Sync-MPI: Unable to release semaphore.\n")); + ql_log(ql_log_warn, vha, 0x006d, + "Unable to release semaphore.\n"); } done: @@ -1479,14 +1466,14 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) /* Load firmware sequences */ rval = ha->isp_ops->load_risc(vha, &srisc_address); if (rval == QLA_SUCCESS) { - DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC " - "code.\n", vha->host_no)); + ql_dbg(ql_dbg_init, vha, 0x00c9, + "Verifying Checksum of loaded RISC code.\n"); rval = qla2x00_verify_checksum(vha, srisc_address); if (rval == QLA_SUCCESS) { /* Start firmware execution. */ - DEBUG(printk("scsi(%ld): Checksum OK, start " - "firmware.\n", vha->host_no)); + ql_dbg(ql_dbg_init, vha, 0x00ca, + "Starting firmware.\n"); rval = qla2x00_execute_fw(vha, srisc_address); /* Retrieve firmware information. */ @@ -1522,9 +1509,9 @@ enable_82xx_npiv: } } } else { - DEBUG2(printk(KERN_INFO - "scsi(%ld): ISP Firmware failed checksum.\n", - vha->host_no)); + ql_log(ql_log_fatal, vha, 0x00cd, + "ISP Firmware failed checksum.\n"); + goto failed; } } @@ -1549,7 +1536,7 @@ enable_82xx_npiv: ha->flags.fac_supported = 1; ha->fdt_block_size = size << 2; } else { - qla_printk(KERN_ERR, ha, + ql_log(ql_log_warn, vha, 0x00ce, "Unsupported FAC firmware (%d.%02d.%02d).\n", ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version); @@ -1557,8 +1544,8 @@ enable_82xx_npiv: } failed: if (rval) { - DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", - vha->host_no)); + ql_log(ql_log_fatal, vha, 0x00cf, + "Setup chip ****FAILED****.\n"); } return (rval); @@ -1608,10 +1595,11 @@ qla2x00_update_fw_options(scsi_qla_host_t *vha) return; /* Serial Link options. */ - DEBUG3(printk("scsi(%ld): Serial link options:\n", - vha->host_no)); - DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options, - sizeof(ha->fw_seriallink_options))); + ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115, + "Serial link options.\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109, + (uint8_t *)&ha->fw_seriallink_options, + sizeof(ha->fw_seriallink_options)); ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; if (ha->fw_seriallink_options[3] & BIT_2) { @@ -1688,7 +1676,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) le16_to_cpu(ha->fw_seriallink_options24[2]), le16_to_cpu(ha->fw_seriallink_options24[3])); if (rval != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x0104, "Unable to update Serial Link options (%x).\n", rval); } } @@ -1746,8 +1734,9 @@ qla24xx_config_rings(struct scsi_qla_host *vha) icb->rid = __constant_cpu_to_le16(rid); if (ha->flags.msix_enabled) { msix = &ha->msix_entries[1]; - DEBUG2_17(printk(KERN_INFO - "Registering vector 0x%x for base que\n", msix->entry)); + ql_dbg(ql_dbg_init, vha, 0x00fd, + "Registering vector 0x%x for base que.\n", + msix->entry); icb->msix = cpu_to_le16(msix->entry); } /* Use alternate PCI bus number */ @@ -1764,8 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha) icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22); ha->flags.disable_msix_handshake = 1; - qla_printk(KERN_INFO, ha, - "MSIX Handshake Disable Mode turned on\n"); + ql_dbg(ql_dbg_init, vha, 0x00fe, + "MSIX Handshake Disable Mode turned on.\n"); } else { icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22); @@ -1850,7 +1839,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) /* Update any ISP specific firmware options before initialization. */ ha->isp_ops->update_fw_options(vha); - DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no)); + ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n"); if (ha->flags.npiv_supported) { if (ha->operating_mode == LOOP) @@ -1866,11 +1855,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha) rval = qla2x00_init_firmware(vha, ha->init_cb_size); if (rval) { - DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n", - vha->host_no)); + ql_log(ql_log_fatal, vha, 0x00d2, + "Init Firmware **** FAILED ****.\n"); } else { - DEBUG3(printk("scsi(%ld): Init firmware -- success.\n", - vha->host_no)); + ql_dbg(ql_dbg_init, vha, 0x00d3, + "Init Firmware -- success.\n"); } return (rval); @@ -1913,10 +1902,8 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) /* Wait for ISP to finish LIP */ if (!vha->flags.init_done) - qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n"); - - DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n", - vha->host_no)); + ql_log(ql_log_info, vha, 0x801e, + "Waiting for LIP to complete.\n"); do { rval = qla2x00_get_firmware_state(vha, state); @@ -1925,30 +1912,35 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) vha->device_flags &= ~DFLG_NO_CABLE; } if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { - DEBUG16(printk("scsi(%ld): fw_state=%x " - "84xx=%x.\n", vha->host_no, state[0], - state[2])); + ql_dbg(ql_dbg_taskm, vha, 0x801f, + "fw_state=%x 84xx=%x.\n", state[0], + state[2]); if ((state[2] & FSTATE_LOGGED_IN) && (state[2] & FSTATE_WAITING_FOR_VERIFY)) { - DEBUG16(printk("scsi(%ld): Sending " - "verify iocb.\n", vha->host_no)); + ql_dbg(ql_dbg_taskm, vha, 0x8028, + "Sending verify iocb.\n"); cs84xx_time = jiffies; rval = qla84xx_init_chip(vha); - if (rval != QLA_SUCCESS) + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, + vha, 0x8043, + "Init chip failed.\n"); break; + } /* Add time taken to initialize. */ cs84xx_time = jiffies - cs84xx_time; wtime += cs84xx_time; mtime += cs84xx_time; - DEBUG16(printk("scsi(%ld): Increasing " - "wait time by %ld. New time %ld\n", - vha->host_no, cs84xx_time, wtime)); + ql_dbg(ql_dbg_taskm, vha, 0x8042, + "Increasing wait time by %ld. " + "New time %ld.\n", cs84xx_time, + wtime); } } else if (state[0] == FSTATE_READY) { - DEBUG(printk("scsi(%ld): F/W Ready - OK \n", - vha->host_no)); + ql_dbg(ql_dbg_taskm, vha, 0x8037, + "F/W Ready - OK.\n"); qla2x00_get_retry_cnt(vha, &ha->retry_count, &ha->login_timeout, &ha->r_a_tov); @@ -1965,7 +1957,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) * other than Wait for Login. */ if (time_after_eq(jiffies, mtime)) { - qla_printk(KERN_INFO, ha, + ql_log(ql_log_info, vha, 0x8038, "Cable is unplugged...\n"); vha->device_flags |= DFLG_NO_CABLE; @@ -1985,17 +1977,17 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) /* Delay for a while */ msleep(500); - DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", - vha->host_no, state[0], jiffies)); + ql_dbg(ql_dbg_taskm, vha, 0x8039, + "fw_state=%x curr time=%lx.\n", state[0], jiffies); } while (1); - DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n", - vha->host_no, state[0], state[1], state[2], state[3], state[4], - jiffies)); + ql_dbg(ql_dbg_taskm, vha, 0x803a, + "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0], + state[1], state[2], state[3], state[4], jiffies); if (rval) { - DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", - vha->host_no)); + ql_log(ql_log_warn, vha, 0x803b, + "Firmware ready **** FAILED ****.\n"); } return (rval); @@ -2034,19 +2026,19 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || IS_QLA8XXX_TYPE(ha) || (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { - DEBUG2(printk("%s(%ld) Loop is in a transition state\n", - __func__, vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2008, + "Loop is in a transition state.\n"); } else { - qla_printk(KERN_WARNING, ha, - "ERROR -- Unable to get host loop ID.\n"); + ql_log(ql_log_warn, vha, 0x2009, + "Unable to get host loop ID.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } return (rval); } if (topo == 4) { - qla_printk(KERN_INFO, ha, - "Cannot get topology - retrying.\n"); + ql_log(ql_log_info, vha, 0x200a, + "Cannot get topology - retrying.\n"); return (QLA_FUNCTION_FAILED); } @@ -2059,31 +2051,27 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) switch (topo) { case 0: - DEBUG3(printk("scsi(%ld): HBA in NL topology.\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n"); ha->current_topology = ISP_CFG_NL; strcpy(connect_type, "(Loop)"); break; case 1: - DEBUG3(printk("scsi(%ld): HBA in FL topology.\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n"); ha->switch_cap = sw_cap; ha->current_topology = ISP_CFG_FL; strcpy(connect_type, "(FL_Port)"); break; case 2: - DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n"); ha->operating_mode = P2P; ha->current_topology = ISP_CFG_N; strcpy(connect_type, "(N_Port-to-N_Port)"); break; case 3: - DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n"); ha->switch_cap = sw_cap; ha->operating_mode = P2P; ha->current_topology = ISP_CFG_F; @@ -2091,9 +2079,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) break; default: - DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. " - "Using NL.\n", - vha->host_no, topo)); + ql_dbg(ql_dbg_disc, vha, 0x200f, + "HBA in unknown topology %x, using NL.\n", topo); ha->current_topology = ISP_CFG_NL; strcpy(connect_type, "(Loop)"); break; @@ -2106,14 +2093,16 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) vha->d_id.b.al_pa = al_pa; if (!vha->flags.init_done) - qla_printk(KERN_INFO, ha, - "Topology - %s, Host Loop address 0x%x\n", + ql_log(ql_log_info, vha, 0x2010, + "Topology - %s, Host Loop address 0x%x.\n", connect_type, vha->loop_id); if (rval) { - DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no)); + ql_log(ql_log_warn, vha, 0x2011, + "%s FAILED\n", __func__); } else { - DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2012, + "%s success\n", __func__); } return(rval); @@ -2227,18 +2216,22 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) chksum += *ptr++; - DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); - DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); + ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f, + "Contents of NVRAM.\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110, + (uint8_t *)nv, ha->nvram_size); /* Bad NVRAM data, set defaults parameters. */ if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { /* Reset NVRAM data. */ - qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " - "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], - nv->nvram_version); - qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " - "invalid -- WWPN) defaults.\n"); + ql_log(ql_log_warn, vha, 0x0064, + "Inconisistent NVRAM " + "detected: checksum=0x%x id=%c version=0x%x.\n", + chksum, nv->id[0], nv->nvram_version); + ql_log(ql_log_warn, vha, 0x0065, + "Falling back to " + "functioning (yet invalid -- WWPN) defaults.\n"); /* * Set default initialization control block. @@ -2382,8 +2375,13 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) /* * Set host adapter parameters. */ + + /* + * BIT_7 in the host-parameters section allows for modification to + * internal driver logging. + */ if (nv->host_p[0] & BIT_7) - ql2xextended_error_logging = 1; + ql2xextended_error_logging = 0x7fffffff; ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); /* Always load RISC code on non ISP2[12]00 chips. */ if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) @@ -2488,10 +2486,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) if (ha->zio_mode != QLA_ZIO_DISABLED) { ha->zio_mode = QLA_ZIO_MODE_6; - DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer " - "delay (%d us).\n", vha->host_no, ha->zio_mode, - ha->zio_timer * 100)); - qla_printk(KERN_INFO, ha, + ql_log(ql_log_info, vha, 0x0068, "ZIO mode %d enabled; timer delay (%d us).\n", ha->zio_mode, ha->zio_timer * 100); @@ -2502,8 +2497,8 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) } if (rval) { - DEBUG2_3(printk(KERN_WARNING - "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); + ql_log(ql_log_warn, vha, 0x0069, + "NVRAM configuration failed.\n"); } return (rval); } @@ -2574,15 +2569,15 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { rval = qla2x00_configure_hba(vha); if (rval != QLA_SUCCESS) { - DEBUG(printk("scsi(%ld): Unable to configure HBA.\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2013, + "Unable to configure HBA.\n"); return (rval); } } save_flags = flags = vha->dpc_flags; - DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n", - vha->host_no, flags)); + ql_dbg(ql_dbg_disc, vha, 0x2014, + "Configure loop -- dpc flags = 0x%lx.\n", flags); /* * If we have both an RSCN and PORT UPDATE pending then handle them @@ -2619,15 +2614,21 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) } if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { - if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { + ql_dbg(ql_dbg_disc, vha, 0x2015, + "Loop resync needed, failing.\n"); rval = QLA_FUNCTION_FAILED; + } else rval = qla2x00_configure_local_loop(vha); } if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { - if (LOOP_TRANSITION(vha)) + if (LOOP_TRANSITION(vha)) { + ql_dbg(ql_dbg_disc, vha, 0x201e, + "Needs RSCN update and loop transition.\n"); rval = QLA_FUNCTION_FAILED; + } else rval = qla2x00_configure_fabric(vha); } @@ -2638,16 +2639,17 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) rval = QLA_FUNCTION_FAILED; } else { atomic_set(&vha->loop_state, LOOP_READY); - - DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2069, + "LOOP READY.\n"); } } if (rval) { - DEBUG2_3(printk("%s(%ld): *** FAILED ***\n", - __func__, vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x206a, + "%s *** FAILED ***.\n", __func__); } else { - DEBUG3(printk("%s: exiting normally\n", __func__)); + ql_dbg(ql_dbg_disc, vha, 0x206b, + "%s: exiting normally.\n", __func__); } /* Restore state if a resync event occurred during processing */ @@ -2695,8 +2697,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) new_fcport = NULL; entries = MAX_FIBRE_DEVICES; - DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no)); - DEBUG3(qla2x00_get_fcal_position_map(vha, NULL)); + ql_dbg(ql_dbg_disc, vha, 0x2016, + "Getting FCAL position map.\n"); + if (ql2xextended_error_logging & ql_dbg_disc) + qla2x00_get_fcal_position_map(vha, NULL); /* Get list of logged in devices. */ memset(ha->gid_list, 0, GID_LIST_SIZE); @@ -2705,14 +2709,17 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) if (rval != QLA_SUCCESS) goto cleanup_allocation; - DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", - vha->host_no, entries)); - DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, - entries * sizeof(struct gid_list_info))); + ql_dbg(ql_dbg_disc, vha, 0x2017, + "Entries in ID list (%d).\n", entries); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075, + (uint8_t *)ha->gid_list, + entries * sizeof(struct gid_list_info)); /* Allocate temporary fcport for any new fcports discovered. */ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { + ql_log(ql_log_warn, vha, 0x2018, + "Memory allocation failed for fcport.\n"); rval = QLA_MEMORY_ALLOC_FAILED; goto cleanup_allocation; } @@ -2726,9 +2733,9 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) fcport->port_type != FCT_BROADCAST && (fcport->flags & FCF_FABRIC_DEVICE) == 0) { - DEBUG(printk("scsi(%ld): Marking port lost, " - "loop_id=0x%04x\n", - vha->host_no, fcport->loop_id)); + ql_dbg(ql_dbg_disc, vha, 0x2019, + "Marking port lost loop_id=0x%04x.\n", + fcport->loop_id); qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); } @@ -2769,12 +2776,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) new_fcport->vp_idx = vha->vp_idx; rval2 = qla2x00_get_port_database(vha, new_fcport, 0); if (rval2 != QLA_SUCCESS) { - DEBUG2(printk("scsi(%ld): Failed to retrieve fcport " - "information -- get_port_database=%x, " - "loop_id=0x%04x\n", - vha->host_no, rval2, new_fcport->loop_id)); - DEBUG2(printk("scsi(%ld): Scheduling resync...\n", - vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x201a, + "Failed to retrieve fcport information " + "-- get_port_database=%x, loop_id=0x%04x.\n", + rval2, new_fcport->loop_id); + ql_dbg(ql_dbg_disc, vha, 0x201b, + "Scheduling resync.\n"); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); continue; } @@ -2810,6 +2817,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) fcport = new_fcport; new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { + ql_log(ql_log_warn, vha, 0x201c, + "Failed to allocate memory for fcport.\n"); rval = QLA_MEMORY_ALLOC_FAILED; goto cleanup_allocation; } @@ -2828,8 +2837,8 @@ cleanup_allocation: kfree(new_fcport); if (rval != QLA_SUCCESS) { - DEBUG2(printk("scsi(%ld): Configure local loop error exit: " - "rval=%x\n", vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x201d, + "Configure local loop error exit: rval=%x.\n", rval); } return (rval); @@ -2858,27 +2867,27 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, mb); if (rval != QLA_SUCCESS) { - DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA " - "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n", - vha->host_no, fcport->port_name[0], fcport->port_name[1], + ql_dbg(ql_dbg_disc, vha, 0x2004, + "Unable to adjust iIDMA " + "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x " + "%04x.\n", fcport->port_name[0], fcport->port_name[1], fcport->port_name[2], fcport->port_name[3], fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], fcport->port_name[7], rval, - fcport->fp_speed, mb[0], mb[1])); + fcport->fp_speed, mb[0], mb[1]); } else { link_speed = link_speeds[LS_UNKNOWN]; if (fcport->fp_speed < 5) link_speed = link_speeds[fcport->fp_speed]; else if (fcport->fp_speed == 0x13) link_speed = link_speeds[5]; - DEBUG2(qla_printk(KERN_INFO, ha, - "iIDMA adjusted to %s GB/s on " - "%02x%02x%02x%02x%02x%02x%02x%02x.\n", - link_speed, fcport->port_name[0], - fcport->port_name[1], fcport->port_name[2], - fcport->port_name[3], fcport->port_name[4], - fcport->port_name[5], fcport->port_name[6], - fcport->port_name[7])); + ql_dbg(ql_dbg_disc, vha, 0x2005, + "iIDMA adjusted to %s GB/s " + "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed, + fcport->port_name[0], fcport->port_name[1], + fcport->port_name[2], fcport->port_name[3], + fcport->port_name[4], fcport->port_name[5], + fcport->port_name[6], fcport->port_name[7]); } } @@ -2887,7 +2896,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) { struct fc_rport_identifiers rport_ids; struct fc_rport *rport; - struct qla_hw_data *ha = vha->hw; unsigned long flags; qla2x00_rport_del(fcport); @@ -2899,8 +2907,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); if (!rport) { - qla_printk(KERN_WARNING, ha, - "Unable to allocate fc remote port!\n"); + ql_log(ql_log_warn, vha, 0x2006, + "Unable to allocate fc remote port.\n"); return; } spin_lock_irqsave(fcport->vha->host->host_lock, flags); @@ -2975,8 +2983,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) loop_id = SNS_FL_PORT; rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); if (rval != QLA_SUCCESS) { - DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL " - "Port\n", vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x201f, + "MBX_GET_PORT_NAME failed, No FL Port.\n"); vha->device_flags &= ~SWITCH_FOUND; return (QLA_SUCCESS); @@ -3003,32 +3011,32 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 0xfc, mb, BIT_1 | BIT_0); if (mb[0] != MBS_COMMAND_COMPLETE) { - DEBUG2(qla_printk(KERN_INFO, ha, - "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " - "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id, - mb[0], mb[1], mb[2], mb[6], mb[7])); + ql_dbg(ql_dbg_disc, vha, 0x2042, + "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x " + "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1], + mb[2], mb[6], mb[7]); return (QLA_SUCCESS); } if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { if (qla2x00_rft_id(vha)) { /* EMPTY */ - DEBUG2(printk("scsi(%ld): Register FC-4 " - "TYPE failed.\n", vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2045, + "Register FC-4 TYPE failed.\n"); } if (qla2x00_rff_id(vha)) { /* EMPTY */ - DEBUG2(printk("scsi(%ld): Register FC-4 " - "Features failed.\n", vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2049, + "Register FC-4 Features failed.\n"); } if (qla2x00_rnn_id(vha)) { /* EMPTY */ - DEBUG2(printk("scsi(%ld): Register Node Name " - "failed.\n", vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x204f, + "Register Node Name failed.\n"); } else if (qla2x00_rsnn_nn(vha)) { /* EMPTY */ - DEBUG2(printk("scsi(%ld): Register Symbolic " - "Node Name failed.\n", vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2053, + "Register Symobilic Node Name failed.\n"); } } @@ -3132,8 +3140,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) } if (rval) { - DEBUG2(printk("scsi(%ld): Configure fabric error exit: " - "rval=%d\n", vha->host_no, rval)); + ql_dbg(ql_dbg_disc, vha, 0x2068, + "Configure fabric error exit rval=%d.\n", rval); } return (rval); @@ -3175,8 +3183,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL); if (!swl) { /*EMPTY*/ - DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " - "on GA_NXT\n", vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2054, + "GID_PT allocations failed, fallback on GA_NXT.\n"); } else { if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { kfree(swl); @@ -3201,6 +3209,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, /* Allocate temporary fcport for any new fcports discovered. */ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { + ql_log(ql_log_warn, vha, 0x205e, + "Failed to allocate memory for fcport.\n"); kfree(swl); return (QLA_MEMORY_ALLOC_FAILED); } @@ -3247,9 +3257,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, /* Send GA_NXT to the switch */ rval = qla2x00_ga_nxt(vha, new_fcport); if (rval != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "SNS scan failed -- assuming zero-entry " - "result...\n"); + ql_log(ql_log_warn, vha, 0x2064, + "SNS scan failed -- assuming " + "zero-entry result.\n"); list_for_each_entry_safe(fcport, fcptemp, new_fcports, list) { list_del(&fcport->list); @@ -3265,9 +3275,11 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, wrap.b24 = new_fcport->d_id.b24; first_dev = 0; } else if (new_fcport->d_id.b24 == wrap.b24) { - DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n", - vha->host_no, new_fcport->d_id.b.domain, - new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa)); + ql_dbg(ql_dbg_disc, vha, 0x2065, + "Device wrap (%02x%02x%02x).\n", + new_fcport->d_id.b.domain, + new_fcport->d_id.b.area, + new_fcport->d_id.b.al_pa); break; } @@ -3372,6 +3384,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, nxt_d_id.b24 = new_fcport->d_id.b24; new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { + ql_log(ql_log_warn, vha, 0x2066, + "Memory allocation failed for fcport.\n"); kfree(swl); return (QLA_MEMORY_ALLOC_FAILED); } @@ -3501,10 +3515,10 @@ qla2x00_device_resync(scsi_qla_host_t *vha) d_id.b.area = MSB(LSW(rscn_entry)); d_id.b.al_pa = LSB(LSW(rscn_entry)); - DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = " - "[%02x/%02x%02x%02x].\n", - vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain, - d_id.b.area, d_id.b.al_pa)); + ql_dbg(ql_dbg_disc, vha, 0x2020, + "RSCN queue entry[%d] = [%02x/%02x%02x%02x].\n", + vha->rscn_out_ptr, format, d_id.b.domain, d_id.b.area, + d_id.b.al_pa); vha->rscn_out_ptr++; if (vha->rscn_out_ptr == MAX_RSCN_COUNT) @@ -3520,17 +3534,17 @@ qla2x00_device_resync(scsi_qla_host_t *vha) if (rscn_entry != vha->rscn_queue[rscn_out_iter]) break; - DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue " - "entry found at [%d].\n", vha->host_no, - rscn_out_iter)); + ql_dbg(ql_dbg_disc, vha, 0x2021, + "Skipping duplicate RSCN queue entry found at " + "[%d].\n", rscn_out_iter); vha->rscn_out_ptr = rscn_out_iter; } /* Queue overflow, set switch default case. */ if (vha->flags.rscn_queue_overflow) { - DEBUG(printk("scsi(%ld): device_resync: rscn " - "overflow.\n", vha->host_no)); + ql_dbg(ql_dbg_disc, vha, 0x2022, + "device_resync: rscn overflow.\n"); format = 3; vha->flags.rscn_queue_overflow = 0; @@ -3659,10 +3673,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, tmp_loopid = 0; for (;;) { - DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x " - "for port %02x%02x%02x.\n", - vha->host_no, fcport->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa)); + ql_dbg(ql_dbg_disc, vha, 0x2000, + "Trying Fabric Login w/loop id 0x%04x for port " + "%02x%02x%02x.\n", + fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); /* Login fcport on switch. */ ha->isp_ops->fabric_login(vha, fcport->loop_id, @@ -3680,10 +3695,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, tmp_loopid = fcport->loop_id; fcport->loop_id = mb[1]; - DEBUG(printk("Fabric Login: port in use - next " - "loop id=0x%04x, port Id=%02x%02x%02x.\n", + ql_dbg(ql_dbg_disc, vha, 0x2001, + "Fabric Login: port in use - next loop " + "id=0x%04x, port id= %02x%02x%02x.\n", fcport->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa)); + fcport->d_id.b.area, fcport->d_id.b.al_pa); } else if (mb[0] == MBS_COMMAND_COMPLETE) { /* @@ -3744,11 +3760,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, /* * unrecoverable / not handled error */ - DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x " - "loop_id=%x jiffies=%lx.\n", - __func__, vha->host_no, mb[0], - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa, fcport->loop_id, jiffies)); + ql_dbg(ql_dbg_disc, vha, 0x2002, + "Failed=%x port_id=%02x%02x%02x loop_id=%x " + "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa, + fcport->loop_id, jiffies); *next_loopid = fcport->loop_id; ha->isp_ops->fabric_logout(vha, fcport->loop_id, @@ -3852,7 +3868,8 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) return (QLA_FUNCTION_FAILED); if (rval) - DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__)); + ql_dbg(ql_dbg_disc, vha, 0x206c, + "%s *** FAILED ***.\n", __func__); return (rval); } @@ -3929,8 +3946,8 @@ qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp; - qla_printk(KERN_INFO, ha, - "Performing ISP error recovery - ha= %p.\n", ha); + ql_dbg(ql_dbg_p3p, vha, 0xb002, + "Performing ISP error recovery - ha=%p.\n", ha); atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { @@ -3964,8 +3981,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); ha->qla_stats.total_isp_aborts++; - qla_printk(KERN_INFO, ha, - "Performing ISP error recovery - ha= %p.\n", ha); + ql_log(ql_log_info, vha, 0x00af, + "Performing ISP error recovery - ha=%p.\n", ha); /* For ISP82XX, reset_chip is just disabling interrupts. * Driver waits for the completion of the commands. @@ -4016,6 +4033,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) /* Make sure for ISP 82XX IO DMA is complete */ if (IS_QLA82XX(ha)) { qla82xx_chip_reset_cleanup(vha); + ql_log(ql_log_info, vha, 0x00b4, + "Done chip reset cleanup.\n"); /* Done waiting for pending commands. * Reset the online flag. @@ -4097,7 +4116,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs); if (rval) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x8033, "Unable to reinitialize FCE " "(%d).\n", rval); ha->flags.fce_enabled = 0; @@ -4109,7 +4128,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, EFT_NUM_BUFFERS); if (rval) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x8034, "Unable to reinitialize EFT " "(%d).\n", rval); } @@ -4118,9 +4137,9 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) vha->flags.online = 1; if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { if (ha->isp_abort_cnt == 0) { - qla_printk(KERN_WARNING, ha, - "ISP error recovery failed - " - "board disabled\n"); + ql_log(ql_log_fatal, vha, 0x8035, + "ISP error recover failed - " + "board disabled.\n"); /* * The next call disables the board * completely. @@ -4132,16 +4151,16 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) status = 0; } else { /* schedule another ISP abort */ ha->isp_abort_cnt--; - DEBUG(printk("qla%ld: ISP abort - " - "retry remaining %d\n", - vha->host_no, ha->isp_abort_cnt)); + ql_dbg(ql_dbg_taskm, vha, 0x8020, + "ISP abort - retry remaining %d.\n", + ha->isp_abort_cnt); status = 1; } } else { ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; - DEBUG(printk("qla2x00(%ld): ISP error recovery " - "- retrying (%d) more times\n", - vha->host_no, ha->isp_abort_cnt)); + ql_dbg(ql_dbg_taskm, vha, 0x8021, + "ISP error recovery - retrying (%d) " + "more times.\n", ha->isp_abort_cnt); set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); status = 1; } @@ -4150,9 +4169,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) } if (!status) { - DEBUG(printk(KERN_INFO - "qla2x00_abort_isp(%ld): succeeded.\n", - vha->host_no)); + ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { @@ -4169,8 +4186,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) spin_unlock_irqrestore(&ha->vport_slock, flags); } else { - qla_printk(KERN_INFO, ha, - "qla2x00_abort_isp: **** FAILED ****\n"); + ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n"); } return(status); @@ -4211,8 +4227,8 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) status = qla2x00_fw_ready(vha); if (!status) { - DEBUG(printk("%s(): Start configure loop, " - "status = %d\n", __func__, status)); + ql_dbg(ql_dbg_taskm, vha, 0x8031, + "Start configure loop status = %d.\n", status); /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); @@ -4234,9 +4250,8 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) if ((vha->device_flags & DFLG_NO_CABLE)) status = 0; - DEBUG(printk("%s(): Configure loop done, status = 0x%x\n", - __func__, - status)); + ql_dbg(ql_dbg_taskm, vha, 0x8032, + "Configure loop done, status = 0x%x.\n", status); } return (status); } @@ -4256,13 +4271,13 @@ qla25xx_init_queues(struct qla_hw_data *ha) rsp->options &= ~BIT_0; ret = qla25xx_init_rsp_que(base_vha, rsp); if (ret != QLA_SUCCESS) - DEBUG2_17(printk(KERN_WARNING - "%s Rsp que:%d init failed\n", __func__, - rsp->id)); + ql_dbg(ql_dbg_init, base_vha, 0x00ff, + "%s Rsp que: %d init failed.\n", + __func__, rsp->id); else - DEBUG2_17(printk(KERN_INFO - "%s Rsp que:%d inited\n", __func__, - rsp->id)); + ql_dbg(ql_dbg_init, base_vha, 0x0100, + "%s Rsp que: %d inited.\n", + __func__, rsp->id); } } for (i = 1; i < ha->max_req_queues; i++) { @@ -4272,13 +4287,13 @@ qla25xx_init_queues(struct qla_hw_data *ha) req->options &= ~BIT_0; ret = qla25xx_init_req_que(base_vha, req); if (ret != QLA_SUCCESS) - DEBUG2_17(printk(KERN_WARNING - "%s Req que:%d init failed\n", __func__, - req->id)); + ql_dbg(ql_dbg_init, base_vha, 0x0101, + "%s Req que: %d init failed.\n", + __func__, req->id); else - DEBUG2_17(printk(KERN_WARNING - "%s Req que:%d inited\n", __func__, - req->id)); + ql_dbg(ql_dbg_init, base_vha, 0x0102, + "%s Req que: %d inited.\n", + __func__, req->id); } } return ret; @@ -4397,19 +4412,22 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) chksum += le32_to_cpu(*dptr++); - DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); - DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); + ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a, + "Contents of NVRAM\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d, + (uint8_t *)nv, ha->nvram_size); /* Bad NVRAM data, set defaults parameters. */ if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { /* Reset NVRAM data. */ - qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " - "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], - le16_to_cpu(nv->nvram_version)); - qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " - "invalid -- WWPN) defaults.\n"); + ql_log(ql_log_warn, vha, 0x006b, + "Inconisistent NVRAM detected: checksum=0x%x id=%c " + "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version); + ql_log(ql_log_warn, vha, 0x006c, + "Falling back to functioning (yet invalid -- WWPN) " + "defaults.\n"); /* * Set default initialization control block. @@ -4587,10 +4605,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) if (ha->zio_mode != QLA_ZIO_DISABLED) { ha->zio_mode = QLA_ZIO_MODE_6; - DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " - "(%d us).\n", vha->host_no, ha->zio_mode, - ha->zio_timer * 100)); - qla_printk(KERN_INFO, ha, + ql_log(ql_log_info, vha, 0x006f, "ZIO mode %d enabled; timer delay (%d us).\n", ha->zio_mode, ha->zio_timer * 100); @@ -4601,8 +4616,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) } if (rval) { - DEBUG2_3(printk(KERN_WARNING - "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); + ql_log(ql_log_warn, vha, 0x0070, + "NVRAM configuration failed.\n"); } return (rval); } @@ -4620,8 +4635,8 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; - qla_printk(KERN_INFO, ha, - "FW: Loading from flash (%x)...\n", faddr); + ql_dbg(ql_dbg_init, vha, 0x008b, + "Loading firmware from flash (%x).\n", faddr); rval = QLA_SUCCESS; @@ -4637,11 +4652,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && dcode[3] == 0)) { - qla_printk(KERN_WARNING, ha, - "Unable to verify integrity of flash firmware image!\n"); - qla_printk(KERN_WARNING, ha, - "Firmware data: %08x %08x %08x %08x!\n", dcode[0], - dcode[1], dcode[2], dcode[3]); + ql_log(ql_log_fatal, vha, 0x008c, + "Unable to verify the integrity of flash firmware " + "image.\n"); + ql_log(ql_log_fatal, vha, 0x008d, + "Firmware data: %08x %08x %08x %08x.\n", + dcode[0], dcode[1], dcode[2], dcode[3]); return QLA_FUNCTION_FAILED; } @@ -4660,9 +4676,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, if (dlen > risc_size) dlen = risc_size; - DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " - "addr %x, number of dwords 0x%x, offset 0x%x.\n", - vha->host_no, risc_addr, dlen, faddr)); + ql_dbg(ql_dbg_init, vha, 0x008e, + "Loading risc segment@ risc addr %x " + "number of dwords 0x%x offset 0x%x.\n", + risc_addr, dlen, faddr); qla24xx_read_flash_data(vha, dcode, faddr, dlen); for (i = 0; i < dlen; i++) @@ -4671,12 +4688,9 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); if (rval) { - DEBUG(printk("scsi(%ld):[ERROR] Failed to load " - "segment %d of firmware\n", vha->host_no, - fragment)); - qla_printk(KERN_WARNING, ha, - "[ERROR] Failed to load segment %d of " - "firmware\n", fragment); + ql_log(ql_log_fatal, vha, 0x008f, + "Failed to load segment %d of firmware.\n", + fragment); break; } @@ -4709,9 +4723,10 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) /* Load firmware blob. */ blob = qla2x00_request_firmware(vha); if (!blob) { - qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); - qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " - "from: " QLA_FW_URL ".\n"); + ql_log(ql_log_info, vha, 0x0083, + "Fimware image unavailable.\n"); + ql_log(ql_log_info, vha, 0x0084, + "Firmware images can be retrieved from: "QLA_FW_URL ".\n"); return QLA_FUNCTION_FAILED; } @@ -4724,8 +4739,8 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) /* Validate firmware image by checking version. */ if (blob->fw->size < 8 * sizeof(uint16_t)) { - qla_printk(KERN_WARNING, ha, - "Unable to verify integrity of firmware image (%Zd)!\n", + ql_log(ql_log_fatal, vha, 0x0085, + "Unable to verify integrity of firmware image (%Zd).\n", blob->fw->size); goto fail_fw_integrity; } @@ -4734,11 +4749,11 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && wcode[2] == 0 && wcode[3] == 0)) { - qla_printk(KERN_WARNING, ha, - "Unable to verify integrity of firmware image!\n"); - qla_printk(KERN_WARNING, ha, - "Firmware data: %04x %04x %04x %04x!\n", wcode[0], - wcode[1], wcode[2], wcode[3]); + ql_log(ql_log_fatal, vha, 0x0086, + "Unable to verify integrity of firmware image.\n"); + ql_log(ql_log_fatal, vha, 0x0087, + "Firmware data: %04x %04x %04x %04x.\n", + wcode[0], wcode[1], wcode[2], wcode[3]); goto fail_fw_integrity; } @@ -4751,9 +4766,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) /* Validate firmware image size. */ fwclen += risc_size * sizeof(uint16_t); if (blob->fw->size < fwclen) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_fatal, vha, 0x0088, "Unable to verify integrity of firmware image " - "(%Zd)!\n", blob->fw->size); + "(%Zd).\n", blob->fw->size); goto fail_fw_integrity; } @@ -4762,10 +4777,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) wlen = (uint16_t)(ha->fw_transfer_size >> 1); if (wlen > risc_size) wlen = risc_size; - - DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " - "addr %x, number of words 0x%x.\n", vha->host_no, - risc_addr, wlen)); + ql_dbg(ql_dbg_init, vha, 0x0089, + "Loading risc segment@ risc addr %x number of " + "words 0x%x.\n", risc_addr, wlen); for (i = 0; i < wlen; i++) wcode[i] = swab16(fwcode[i]); @@ -4773,12 +4787,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) rval = qla2x00_load_ram(vha, req->dma, risc_addr, wlen); if (rval) { - DEBUG(printk("scsi(%ld):[ERROR] Failed to load " - "segment %d of firmware\n", vha->host_no, - fragment)); - qla_printk(KERN_WARNING, ha, - "[ERROR] Failed to load segment %d of " - "firmware\n", fragment); + ql_log(ql_log_fatal, vha, 0x008a, + "Failed to load segment %d of firmware.\n", + fragment); break; } @@ -4814,15 +4825,17 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) /* Load firmware blob. */ blob = qla2x00_request_firmware(vha); if (!blob) { - qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); - qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " - "from: " QLA_FW_URL ".\n"); + ql_log(ql_log_warn, vha, 0x0090, + "Fimware image unavailable.\n"); + ql_log(ql_log_warn, vha, 0x0091, + "Firmware images can be retrieved from: " + QLA_FW_URL ".\n"); return QLA_FUNCTION_FAILED; } - qla_printk(KERN_INFO, ha, - "FW: Loading via request-firmware...\n"); + ql_log(ql_log_info, vha, 0x0092, + "Loading via request-firmware.\n"); rval = QLA_SUCCESS; @@ -4834,8 +4847,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) /* Validate firmware image by checking version. */ if (blob->fw->size < 8 * sizeof(uint32_t)) { - qla_printk(KERN_WARNING, ha, - "Unable to verify integrity of firmware image (%Zd)!\n", + ql_log(ql_log_fatal, vha, 0x0093, + "Unable to verify integrity of firmware image (%Zd).\n", blob->fw->size); goto fail_fw_integrity; } @@ -4845,11 +4858,12 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && dcode[3] == 0)) { - qla_printk(KERN_WARNING, ha, - "Unable to verify integrity of firmware image!\n"); - qla_printk(KERN_WARNING, ha, - "Firmware data: %08x %08x %08x %08x!\n", dcode[0], - dcode[1], dcode[2], dcode[3]); + ql_log(ql_log_fatal, vha, 0x0094, + "Unable to verify integrity of firmware image (%Zd).\n", + blob->fw->size); + ql_log(ql_log_fatal, vha, 0x0095, + "Firmware data: %08x %08x %08x %08x.\n", + dcode[0], dcode[1], dcode[2], dcode[3]); goto fail_fw_integrity; } @@ -4861,9 +4875,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) /* Validate firmware image size. */ fwclen += risc_size * sizeof(uint32_t); if (blob->fw->size < fwclen) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_fatal, vha, 0x0096, "Unable to verify integrity of firmware image " - "(%Zd)!\n", blob->fw->size); + "(%Zd).\n", blob->fw->size); goto fail_fw_integrity; } @@ -4874,9 +4888,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) if (dlen > risc_size) dlen = risc_size; - DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " - "addr %x, number of dwords 0x%x.\n", vha->host_no, - risc_addr, dlen)); + ql_dbg(ql_dbg_init, vha, 0x0097, + "Loading risc segment@ risc addr %x " + "number of dwords 0x%x.\n", risc_addr, dlen); for (i = 0; i < dlen; i++) dcode[i] = swab32(fwcode[i]); @@ -4884,12 +4898,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); if (rval) { - DEBUG(printk("scsi(%ld):[ERROR] Failed to load " - "segment %d of firmware\n", vha->host_no, - fragment)); - qla_printk(KERN_WARNING, ha, - "[ERROR] Failed to load segment %d of " - "firmware\n", fragment); + ql_log(ql_log_fatal, vha, 0x0098, + "Failed to load segment %d of firmware.\n", + fragment); break; } @@ -4953,14 +4964,13 @@ try_blob_fw: if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) return rval; - qla_printk(KERN_ERR, ha, - "FW: Attempting to fallback to golden firmware...\n"); + ql_log(ql_log_info, vha, 0x0099, + "Attempting to fallback to golden firmware.\n"); rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); if (rval != QLA_SUCCESS) return rval; - qla_printk(KERN_ERR, ha, - "FW: Please update operational firmware...\n"); + ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n"); ha->flags.running_gold_fw = 1; return rval; @@ -4987,8 +4997,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) continue; if (qla2x00_setup_chip(vha) != QLA_SUCCESS) continue; - qla_printk(KERN_INFO, ha, - "Attempting retry of stop-firmware command...\n"); + ql_log(ql_log_info, vha, 0x8015, + "Attempting retry of stop-firmware command.\n"); ret = qla2x00_stop_firmware(vha); } } @@ -5023,10 +5033,10 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha) /* Login to SNS first */ ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1); if (mb[0] != MBS_COMMAND_COMPLETE) { - DEBUG15(qla_printk(KERN_INFO, ha, - "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " - "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS, - mb[0], mb[1], mb[2], mb[6], mb[7])); + ql_dbg(ql_dbg_init, vha, 0x0103, + "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x " + "mb[6]=%x mb[7]=%x.\n", + NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]); return (QLA_FUNCTION_FAILED); } @@ -5146,19 +5156,23 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) chksum += le32_to_cpu(*dptr++); - DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); - DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); + ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111, + "Contents of NVRAM:\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112, + (uint8_t *)nv, ha->nvram_size); /* Bad NVRAM data, set defaults parameters. */ if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { /* Reset NVRAM data. */ - qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " - "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], + ql_log(ql_log_info, vha, 0x0073, + "Inconisistent NVRAM detected: checksum=0x%x id=%c " + "version=0x%x.\n", chksum, nv->id[0], le16_to_cpu(nv->nvram_version)); - qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " - "invalid -- WWPN) defaults.\n"); + ql_log(ql_log_info, vha, 0x0074, + "Falling back to functioning (yet invalid -- WWPN) " + "defaults.\n"); /* * Set default initialization control block. @@ -5350,12 +5364,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) if (ha->zio_mode != QLA_ZIO_DISABLED) { ha->zio_mode = QLA_ZIO_MODE_6; - DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " - "(%d us).\n", vha->host_no, ha->zio_mode, - ha->zio_timer * 100)); - qla_printk(KERN_INFO, ha, + ql_log(ql_log_info, vha, 0x0075, "ZIO mode %d enabled; timer delay (%d us).\n", - ha->zio_mode, ha->zio_timer * 100); + ha->zio_mode, + ha->zio_timer * 100); icb->firmware_options_2 |= cpu_to_le32( (uint32_t)ha->zio_mode); @@ -5364,8 +5376,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) } if (rval) { - DEBUG2_3(printk(KERN_WARNING - "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); + ql_log(ql_log_warn, vha, 0x0076, + "NVRAM configuration failed.\n"); } return (rval); } @@ -5388,9 +5400,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) status = qla2x00_fw_ready(vha); if (!status) { - qla_printk(KERN_INFO, ha, - "%s(): Start configure loop, " - "status = %d\n", __func__, status); + ql_log(ql_log_info, vha, 0x803c, + "Start configure loop, status =%d.\n", status); /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); @@ -5412,9 +5423,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) if ((vha->device_flags & DFLG_NO_CABLE)) status = 0; - qla_printk(KERN_INFO, ha, - "%s(): Configure loop done, status = 0x%x\n", - __func__, status); + ql_log(ql_log_info, vha, 0x803d, + "Configure loop done, status = 0x%x.\n", status); } if (!status) { @@ -5450,9 +5460,9 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs); if (rval) { - qla_printk(KERN_WARNING, ha, - "Unable to reinitialize FCE " - "(%d).\n", rval); + ql_log(ql_log_warn, vha, 0x803e, + "Unable to reinitialize FCE (%d).\n", + rval); ha->flags.fce_enabled = 0; } } @@ -5462,17 +5472,16 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, EFT_NUM_BUFFERS); if (rval) { - qla_printk(KERN_WARNING, ha, - "Unable to reinitialize EFT " - "(%d).\n", rval); + ql_log(ql_log_warn, vha, 0x803f, + "Unable to reinitialize EFT (%d).\n", + rval); } } } if (!status) { - DEBUG(printk(KERN_INFO - "qla82xx_restart_isp(%ld): succeeded.\n", - vha->host_no)); + ql_dbg(ql_dbg_taskm, vha, 0x8040, + "qla82xx_restart_isp succeeded.\n"); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { @@ -5489,8 +5498,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) spin_unlock_irqrestore(&ha->vport_slock, flags); } else { - qla_printk(KERN_INFO, ha, - "qla82xx_restart_isp: **** FAILED ****\n"); + ql_log(ql_log_warn, vha, 0x8041, + "qla82xx_restart_isp **** FAILED ****.\n"); } return status; @@ -5640,9 +5649,8 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) if (ret == QLA_SUCCESS) fcport->fcp_prio = priority; else - DEBUG2(printk(KERN_WARNING - "scsi(%ld): Unable to activate fcp priority, " - " ret=0x%x\n", vha->host_no, ret)); + ql_dbg(ql_dbg_user, vha, 0x704f, + "Unable to activate fcp priority, ret=0x%x.\n", ret); return ret; } diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 4c8167e11f6..d2e904bc21c 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -94,11 +94,11 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state) /* Don't print state transitions during initial allocation of fcport */ if (old_state && old_state != state) { - DEBUG(qla_printk(KERN_WARNING, fcport->vha->hw, - "scsi(%ld): FCPort state transitioned from %s to %s - " - "portid=%02x%02x%02x.\n", fcport->vha->host_no, + ql_dbg(ql_dbg_disc, fcport->vha, 0x207d, + "FCPort state transitioned from %s to %s - " + "portid=%02x%02x%02x.\n", port_state_str[old_state], port_state_str[state], fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa)); + fcport->d_id.b.al_pa); } } diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 7bac3cd109d..49d6906af88 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -150,7 +150,8 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) /* We only support T10 DIF right now */ if (guard != SHOST_DIX_GUARD_CRC) { - DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard)); + ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007, + "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd); return 0; } @@ -343,9 +344,10 @@ qla2x00_start_scsi(srb_t *sp) /* Send marker if required */ if (vha->marker_needed != 0) { - if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) - != QLA_SUCCESS) + if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != + QLA_SUCCESS) { return (QLA_FUNCTION_FAILED); + } vha->marker_needed = 0; } @@ -490,8 +492,8 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, mrk24 = NULL; mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0); if (mrk == NULL) { - DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", - __func__, base_vha->host_no)); + ql_log(ql_log_warn, base_vha, 0x3026, + "Failed to allocate Marker IOCB.\n"); return (QLA_FUNCTION_FAILED); } @@ -547,9 +549,10 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req) device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; - DEBUG5(printk("%s(): IOCB data:\n", __func__)); - DEBUG5(qla2x00_dump_buffer( - (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE)); + ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d, + "IOCB data:\n"); + ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e, + (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE); /* Adjust ring index. */ req->ring_index++; @@ -604,7 +607,7 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req) * Returns the number of IOCB entries needed to store @dsds. */ inline uint16_t -qla24xx_calc_iocbs(uint16_t dsds) +qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds) { uint16_t iocbs; @@ -614,8 +617,6 @@ qla24xx_calc_iocbs(uint16_t dsds) if ((dsds - 1) % 5) iocbs++; } - DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n", - __func__, iocbs)); return iocbs; } @@ -712,6 +713,7 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, unsigned int protcnt) { struct sd_dif_tuple *spt; + scsi_qla_host_t *vha = shost_priv(cmd->device->host); unsigned char op = scsi_get_prot_op(cmd); switch (scsi_get_prot_type(cmd)) { @@ -768,9 +770,9 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, op == SCSI_PROT_WRITE_PASS)) { spt = page_address(sg_page(scsi_prot_sglist(cmd))) + scsi_prot_sglist(cmd)[0].offset; - DEBUG18(printk(KERN_DEBUG - "%s(): LBA from user %p, lba = 0x%x\n", - __func__, spt, (int)spt->ref_tag)); + ql_dbg(ql_dbg_io, vha, 0x3008, + "LBA from user %p, lba = 0x%x for cmd=%p.\n", + spt, (int)spt->ref_tag, cmd); pkt->ref_tag = swab32(spt->ref_tag); pkt->app_tag_mask[0] = 0x0; pkt->app_tag_mask[1] = 0x0; @@ -789,11 +791,11 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, break; } - DEBUG18(printk(KERN_DEBUG - "%s(): Setting protection Tags: (BIG) ref tag = 0x%x," - " app tag = 0x%x, prot SG count %d , cmd lba 0x%x," - " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt, - (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd))); + ql_dbg(ql_dbg_io, vha, 0x3009, + "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, " + "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n", + pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd), + scsi_get_prot_type(cmd), cmd); } @@ -809,6 +811,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, uint32_t *cur_dsd = dsd; int i; uint16_t used_dsds = tot_dsds; + scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host); uint8_t *cp; @@ -853,9 +856,10 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, cur_dsd = (uint32_t *)next_dsd; } sle_dma = sg_dma_address(sg); - DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x," - " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma), - MSD(sle_dma), sg_dma_len(sg))); + ql_dbg(ql_dbg_io, vha, 0x300a, + "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n", + cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), + sp->cmd); *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); @@ -863,8 +867,8 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { cp = page_address(sg_page(sg)) + sg->offset; - DEBUG18(printk("%s(): User Data buffer= %p:\n", - __func__ , cp)); + ql_dbg(ql_dbg_io, vha, 0x300b, + "User data buffer=%p for cmd=%p.\n", cp, sp->cmd); } } /* Null termination */ @@ -888,7 +892,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, struct scsi_cmnd *cmd; uint32_t *cur_dsd = dsd; uint16_t used_dsds = tot_dsds; - + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); uint8_t *cp; @@ -935,10 +939,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, } sle_dma = sg_dma_address(sg); if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { - DEBUG18(printk(KERN_DEBUG - "%s(): %p, sg entry %d - addr =0x%x" - "0x%x, len =%d\n", __func__ , cur_dsd, i, - LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg))); + ql_dbg(ql_dbg_io, vha, 0x3027, + "%s(): %p, sg_entry %d - " + "addr=0x%x0x%x, len=%d.\n", + __func__, cur_dsd, i, + LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg)); } *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); @@ -946,8 +951,9 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { cp = page_address(sg_page(sg)) + sg->offset; - DEBUG18(printk("%s(): Protection Data buffer = %p:\n", - __func__ , cp)); + ql_dbg(ql_dbg_io, vha, 0x3028, + "%s(): Protection Data buffer = %p.\n", __func__, + cp); } avail_dsds--; } @@ -996,22 +1002,16 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, *((uint32_t *)(&cmd_pkt->entry_type)) = __constant_cpu_to_le32(COMMAND_TYPE_CRC_2); + vha = sp->fcport->vha; + ha = vha->hw; + /* No data transfer */ data_bytes = scsi_bufflen(cmd); if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { - DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n", - __func__, data_bytes)); cmd_pkt->byte_count = __constant_cpu_to_le32(0); return QLA_SUCCESS; } - vha = sp->fcport->vha; - ha = vha->hw; - - DEBUG18(printk(KERN_DEBUG - "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__, - vha->host_no, sp, scsi_get_prot_op(sp->cmd))); - cmd_pkt->vp_index = sp->fcport->vp_idx; /* Set transfer direction */ @@ -1056,8 +1056,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, /* Determine SCSI command length -- align to 4 byte boundary */ if (cmd->cmd_len > 16) { - DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n", - __func__)); additional_fcpcdb_len = cmd->cmd_len - 16; if ((cmd->cmd_len % 4) != 0) { /* SCSI cmd > 16 bytes must be multiple of 4 */ @@ -1108,11 +1106,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ - DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data" - "entries %d, data bytes %d, Protection entries %d\n", - __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds), - data_bytes, tot_prot_dsds)); - /* Compute dif len and adjust data len to incude protection */ total_bytes = data_bytes; dif_bytes = 0; @@ -1150,14 +1143,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, additional_fcpcdb_len); *fcp_dl = htonl(total_bytes); - DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes" - " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__, - vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes, - crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size)); - if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { - DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n", - __func__, data_bytes)); cmd_pkt->byte_count = __constant_cpu_to_le32(0); return QLA_SUCCESS; } @@ -1182,8 +1168,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, return QLA_SUCCESS; crc_queuing_error: - DEBUG18(qla_printk(KERN_INFO, ha, - "CMD sent FAILED crc_q error:sp = %p\n", sp)); /* Cleanup will be performed by the caller */ return QLA_FUNCTION_FAILED; @@ -1225,8 +1209,8 @@ qla24xx_start_scsi(srb_t *sp) /* Send marker if required */ if (vha->marker_needed != 0) { - if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) - != QLA_SUCCESS) + if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != + QLA_SUCCESS) return QLA_FUNCTION_FAILED; vha->marker_needed = 0; } @@ -1243,8 +1227,9 @@ qla24xx_start_scsi(srb_t *sp) if (!req->outstanding_cmds[handle]) break; } - if (index == MAX_OUTSTANDING_COMMANDS) + if (index == MAX_OUTSTANDING_COMMANDS) { goto queuing_error; + } /* Map the sg table so we have an accurate count of sg entries needed */ if (scsi_sg_count(cmd)) { @@ -1256,8 +1241,7 @@ qla24xx_start_scsi(srb_t *sp) nseg = 0; tot_dsds = nseg; - - req_cnt = qla24xx_calc_iocbs(tot_dsds); + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); if (req->cnt < (req_cnt + 2)) { cnt = RD_REG_DWORD_RELAXED(req->req_q_out); @@ -1322,7 +1306,6 @@ qla24xx_start_scsi(srb_t *sp) /* Specify response queue number where completion should happen */ cmd_pkt->entry_status = (uint8_t) rsp->id; wmb(); - /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { @@ -1534,9 +1517,6 @@ queuing_error: /* Cleanup will be performed by the caller (queuecommand) */ spin_unlock_irqrestore(&ha->hardware_lock, flags); - - DEBUG18(qla_printk(KERN_INFO, ha, - "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd))); return QLA_FUNCTION_FAILED; } @@ -1581,8 +1561,11 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp) if (!req->outstanding_cmds[handle]) break; } - if (index == MAX_OUTSTANDING_COMMANDS) + if (index == MAX_OUTSTANDING_COMMANDS) { + ql_log(ql_log_warn, vha, 0x700b, + "No room on oustanding cmd array.\n"); goto queuing_error; + } /* Prep command array. */ req->current_outstanding_cmd = handle; @@ -1999,8 +1982,11 @@ qla2x00_start_sp(srb_t *sp) rval = QLA_FUNCTION_FAILED; spin_lock_irqsave(&ha->hardware_lock, flags); pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp); - if (!pkt) + if (!pkt) { + ql_log(ql_log_warn, sp->fcport->vha, 0x700c, + "qla2x00_alloc_iocbs failed.\n"); goto done; + } rval = QLA_SUCCESS; switch (ctx->type) { diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 1b60a95adb5..b16b7725dee 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -45,7 +45,7 @@ qla2100_intr_handler(int irq, void *dev_id) rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO - "%s(): NULL response queue pointer\n", __func__); + "%s(): NULL response queue pointer.\n", __func__); return (IRQ_NONE); } @@ -63,7 +63,7 @@ qla2100_intr_handler(int irq, void *dev_id) /* * Issue a "HARD" reset in order for the RISC interrupt - * bit to be cleared. Schedule a big hammmer to get + * bit to be cleared. Schedule a big hammer to get * out of the RISC PAUSED state. */ WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); @@ -91,9 +91,9 @@ qla2100_intr_handler(int irq, void *dev_id) qla2x00_async_event(vha, rsp, mb); } else { /*EMPTY*/ - DEBUG2(printk("scsi(%ld): Unrecognized " - "interrupt type (%d).\n", - vha->host_no, mb[0])); + ql_dbg(ql_dbg_async, vha, 0x5025, + "Unrecognized interrupt type (%d).\n", + mb[0]); } /* Release mailbox registers. */ WRT_REG_WORD(®->semaphore, 0); @@ -142,7 +142,7 @@ qla2300_intr_handler(int irq, void *dev_id) rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO - "%s(): NULL response queue pointer\n", __func__); + "%s(): NULL response queue pointer.\n", __func__); return (IRQ_NONE); } @@ -160,16 +160,18 @@ qla2300_intr_handler(int irq, void *dev_id) hccr = RD_REG_WORD(®->hccr); if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) - qla_printk(KERN_INFO, ha, "Parity error -- " - "HCCR=%x, Dumping firmware!\n", hccr); + ql_log(ql_log_warn, vha, 0x5026, + "Parity error -- HCCR=%x, Dumping " + "firmware.\n", hccr); else - qla_printk(KERN_INFO, ha, "RISC paused -- " - "HCCR=%x, Dumping firmware!\n", hccr); + ql_log(ql_log_warn, vha, 0x5027, + "RISC paused -- HCCR=%x, Dumping " + "firmware.\n", hccr); /* * Issue a "HARD" reset in order for the RISC * interrupt bit to be cleared. Schedule a big - * hammmer to get out of the RISC PAUSED state. + * hammer to get out of the RISC PAUSED state. */ WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); RD_REG_WORD(®->hccr); @@ -213,9 +215,8 @@ qla2300_intr_handler(int irq, void *dev_id) qla2x00_async_event(vha, rsp, mb); break; default: - DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " - "(%d).\n", - vha->host_no, stat & 0xff)); + ql_dbg(ql_dbg_async, vha, 0x5028, + "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); @@ -262,11 +263,11 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) } if (ha->mcp) { - DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", - __func__, vha->host_no, ha->mcp->mb[0])); + ql_dbg(ql_dbg_async, vha, 0x5000, + "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]); } else { - DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", - __func__, vha->host_no)); + ql_dbg(ql_dbg_async, vha, 0x5001, + "MBX pointer ERROR.\n"); } } @@ -285,22 +286,24 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) mb[cnt] = RD_REG_WORD(wptr); - DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " - "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no, - event[aen & 0xff], - mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6])); + ql_dbg(ql_dbg_async, vha, 0x5021, + "Inter-Driver Commucation %s -- " + "%04x %04x %04x %04x %04x %04x %04x.\n", + event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], + mb[4], mb[5], mb[6]); /* Acknowledgement needed? [Notify && non-zero timeout]. */ timeout = (descr >> 8) & 0xf; if (aen != MBA_IDC_NOTIFY || !timeout) return; - DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " - "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout)); + ql_dbg(ql_dbg_async, vha, 0x5022, + "Inter-Driver Commucation %s -- ACK timeout=%d.\n", + vha->host_no, event[aen & 0xff], timeout); rval = qla2x00_post_idc_ack_work(vha, mb); if (rval != QLA_SUCCESS) - qla_printk(KERN_WARNING, vha->hw, + ql_log(ql_log_warn, vha, 0x5023, "IDC failed to post ACK.\n"); } @@ -393,15 +396,15 @@ skip_rio: break; case MBA_RESET: /* Reset */ - DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", - vha->host_no)); + ql_dbg(ql_dbg_async, vha, 0x5002, + "Asynchronous RESET.\n"); set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); break; case MBA_SYSTEM_ERR: /* System Error */ mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox7) : 0; - qla_printk(KERN_INFO, ha, + ql_log(ql_log_warn, vha, 0x5003, "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); @@ -409,7 +412,7 @@ skip_rio: if (IS_FWI2_CAPABLE(ha)) { if (mb[1] == 0 && mb[2] == 0) { - qla_printk(KERN_ERR, ha, + ql_log(ql_log_fatal, vha, 0x5004, "Unrecoverable Hardware Error: adapter " "marked OFFLINE!\n"); vha->flags.online = 0; @@ -422,7 +425,7 @@ skip_rio: set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } } else if (mb[1] == 0) { - qla_printk(KERN_INFO, ha, + ql_log(ql_log_fatal, vha, 0x5005, "Unrecoverable Hardware Error: adapter marked " "OFFLINE!\n"); vha->flags.online = 0; @@ -431,31 +434,27 @@ skip_rio: break; case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ - DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n", - vha->host_no, mb[1])); - qla_printk(KERN_WARNING, ha, - "ISP Request Transfer Error (%x).\n", mb[1]); + ql_log(ql_log_warn, vha, 0x5006, + "ISP Request Transfer Error (%x).\n", mb[1]); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ - DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", - vha->host_no)); - qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); + ql_log(ql_log_warn, vha, 0x5007, + "ISP Response Transfer Error.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ - DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", - vha->host_no)); + ql_dbg(ql_dbg_async, vha, 0x5008, + "Asynchronous WAKEUP_THRES.\n"); break; case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ - DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no, - mb[1])); - qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]); + ql_log(ql_log_info, vha, 0x5009, + "LIP occurred (%x).\n", mb[1]); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); @@ -488,10 +487,8 @@ skip_rio: ha->link_data_rate = mb[1]; } - DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", - vha->host_no, link_speed)); - qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n", - link_speed); + ql_log(ql_log_info, vha, 0x500a, + "LOOP UP detected (%s Gbps).\n", link_speed); vha->flags.management_server_logged_in = 0; qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); @@ -500,12 +497,9 @@ skip_rio: case MBA_LOOP_DOWN: /* Loop Down Event */ mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox4) : 0; mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; - DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " - "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3], - mbx)); - qla_printk(KERN_INFO, ha, - "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3], - mbx); + ql_log(ql_log_info, vha, 0x500b, + "LOOP DOWN detected (%x %x %x %x).\n", + mb[1], mb[2], mb[3], mbx); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); @@ -525,9 +519,7 @@ skip_rio: break; case MBA_LIP_RESET: /* LIP reset occurred */ - DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", - vha->host_no, mb[1])); - qla_printk(KERN_INFO, ha, + ql_log(ql_log_info, vha, 0x500c, "LIP reset occurred (%x).\n", mb[1]); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { @@ -554,14 +546,15 @@ skip_rio: break; if (IS_QLA8XXX_TYPE(ha)) { - DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x " - "%04x\n", vha->host_no, mb[1], mb[2], mb[3])); + ql_dbg(ql_dbg_async, vha, 0x500d, + "DCBX Completed -- %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); if (ha->notify_dcbx_comp) complete(&ha->dcbx_comp); } else - DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE " - "received.\n", vha->host_no)); + ql_dbg(ql_dbg_async, vha, 0x500e, + "Asynchronous P2P MODE received.\n"); /* * Until there's a transition from loop down to loop up, treat @@ -594,10 +587,7 @@ skip_rio: if (IS_QLA2100(ha)) break; - DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " - "received.\n", - vha->host_no)); - qla_printk(KERN_INFO, ha, + ql_log(ql_log_info, vha, 0x500f, "Configuration change detected: value=%x.\n", mb[1]); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { @@ -640,11 +630,9 @@ skip_rio: /* Global event -- port logout or port unavailable. */ if (mb[1] == 0xffff && mb[2] == 0x7) { - DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", - vha->host_no)); - DEBUG(printk(KERN_INFO - "scsi(%ld): Port unavailable %04x %04x %04x.\n", - vha->host_no, mb[1], mb[2], mb[3])); + ql_dbg(ql_dbg_async, vha, 0x5010, + "Port unavailable %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); @@ -674,17 +662,15 @@ skip_rio: atomic_set(&vha->loop_down_timer, 0); if (atomic_read(&vha->loop_state) != LOOP_DOWN && atomic_read(&vha->loop_state) != LOOP_DEAD) { - DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " - "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1], - mb[2], mb[3])); + ql_dbg(ql_dbg_async, vha, 0x5011, + "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", + mb[1], mb[2], mb[3]); break; } - DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", - vha->host_no)); - DEBUG(printk(KERN_INFO - "scsi(%ld): Port database changed %04x %04x %04x.\n", - vha->host_no, mb[1], mb[2], mb[3])); + ql_dbg(ql_dbg_async, vha, 0x5012, + "Port database changed %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); /* * Mark all devices as missing so we will login again. @@ -707,20 +693,17 @@ skip_rio: if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) break; - DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", - vha->host_no)); - DEBUG(printk(KERN_INFO - "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n", - vha->host_no, mb[1], mb[2], mb[3])); + ql_dbg(ql_dbg_async, vha, 0x5013, + "RSCN database changed -- %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) | vha->d_id.b.al_pa; if (rscn_entry == host_pid) { - DEBUG(printk(KERN_INFO - "scsi(%ld): Ignoring RSCN update to local host " - "port ID (%06x)\n", - vha->host_no, host_pid)); + ql_dbg(ql_dbg_async, vha, 0x5014, + "Ignoring RSCN update to local host " + "port ID (%06x).\n", host_pid); break; } @@ -747,8 +730,8 @@ skip_rio: /* case MBA_RIO_RESPONSE: */ case MBA_ZIO_RESPONSE: - DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n", - vha->host_no)); + ql_dbg(ql_dbg_async, vha, 0x5015, + "[R|Z]IO update completion.\n"); if (IS_FWI2_CAPABLE(ha)) qla24xx_process_response_queue(vha, rsp); @@ -757,61 +740,68 @@ skip_rio: break; case MBA_DISCARD_RND_FRAME: - DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " - "%04x.\n", vha->host_no, mb[1], mb[2], mb[3])); + ql_dbg(ql_dbg_async, vha, 0x5016, + "Discard RND Frame -- %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); break; case MBA_TRACE_NOTIFICATION: - DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", - vha->host_no, mb[1], mb[2])); + ql_dbg(ql_dbg_async, vha, 0x5017, + "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); break; case MBA_ISP84XX_ALERT: - DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- " - "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); + ql_dbg(ql_dbg_async, vha, 0x5018, + "ISP84XX Alert Notification -- %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); spin_lock_irqsave(&ha->cs84xx->access_lock, flags); switch (mb[1]) { case A84_PANIC_RECOVERY: - qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery " - "%04x %04x\n", mb[2], mb[3]); + ql_log(ql_log_info, vha, 0x5019, + "Alert 84XX: panic recovery %04x %04x.\n", + mb[2], mb[3]); break; case A84_OP_LOGIN_COMPLETE: ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; - DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" - "firmware version %x\n", ha->cs84xx->op_fw_version)); + ql_log(ql_log_info, vha, 0x501a, + "Alert 84XX: firmware version %x.\n", + ha->cs84xx->op_fw_version); break; case A84_DIAG_LOGIN_COMPLETE: ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; - DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" - "diagnostic firmware version %x\n", - ha->cs84xx->diag_fw_version)); + ql_log(ql_log_info, vha, 0x501b, + "Alert 84XX: diagnostic firmware version %x.\n", + ha->cs84xx->diag_fw_version); break; case A84_GOLD_LOGIN_COMPLETE: ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; ha->cs84xx->fw_update = 1; - DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold " - "firmware version %x\n", - ha->cs84xx->gold_fw_version)); + ql_log(ql_log_info, vha, 0x501c, + "Alert 84XX: gold firmware version %x.\n", + ha->cs84xx->gold_fw_version); break; default: - qla_printk(KERN_ERR, ha, - "Alert 84xx: Invalid Alert %04x %04x %04x\n", + ql_log(ql_log_warn, vha, 0x501d, + "Alert 84xx: Invalid Alert %04x %04x %04x.\n", mb[1], mb[2], mb[3]); } spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); break; case MBA_DCBX_START: - DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n", - vha->host_no, mb[1], mb[2], mb[3])); + ql_dbg(ql_dbg_async, vha, 0x501e, + "DCBX Started -- %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); break; case MBA_DCBX_PARAM_UPDATE: - DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- " - "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); + ql_dbg(ql_dbg_async, vha, 0x501f, + "DCBX Parameters Updated -- %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); break; case MBA_FCF_CONF_ERR: - DEBUG2(printk("scsi(%ld): FCF Configuration Error -- " - "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); + ql_dbg(ql_dbg_async, vha, 0x5020, + "FCF Configuration Error -- %04x %04x %04x.\n", + mb[1], mb[2], mb[3]); break; case MBA_IDC_COMPLETE: case MBA_IDC_NOTIFY: @@ -838,10 +828,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, /* Validate handle. */ if (index >= MAX_OUTSTANDING_COMMANDS) { - DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n", - vha->host_no, index)); - qla_printk(KERN_WARNING, ha, - "Invalid SCSI completion handle %d.\n", index); + ql_log(ql_log_warn, vha, 0x3014, + "Invalid SCSI command index (%x).\n", index); if (IS_QLA82XX(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); @@ -859,10 +847,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, sp->cmd->result = DID_OK << 16; qla2x00_sp_compl(ha, sp); } else { - DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" - " handle(0x%x)\n", vha->host_no, req->id, index)); - qla_printk(KERN_WARNING, ha, - "Invalid ISP SCSI completion handle\n"); + ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); if (IS_QLA82XX(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); @@ -882,8 +867,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, index = LSW(pkt->handle); if (index >= MAX_OUTSTANDING_COMMANDS) { - qla_printk(KERN_WARNING, ha, - "%s: Invalid completion handle (%x).\n", func, index); + ql_log(ql_log_warn, vha, 0x5031, + "Invalid command index (%x).\n", index); if (IS_QLA82XX(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else @@ -892,15 +877,13 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, } sp = req->outstanding_cmds[index]; if (!sp) { - qla_printk(KERN_WARNING, ha, - "%s: Invalid completion handle (%x) -- timed-out.\n", func, - index); + ql_log(ql_log_warn, vha, 0x5032, + "Invalid completion handle (%x) -- timed-out.\n", index); return sp; } if (sp->handle != index) { - qla_printk(KERN_WARNING, ha, - "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle, - index); + ql_log(ql_log_warn, vha, 0x5033, + "SRB handle (%x) mismatch %x.\n", sp->handle, index); return NULL; } @@ -937,17 +920,17 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? QLA_LOGIO_LOGIN_RETRIED : 0; if (mbx->entry_status) { - DEBUG2(printk(KERN_WARNING - "scsi(%ld:%x): Async-%s error entry - portid=%02x%02x%02x " + ql_dbg(ql_dbg_async, vha, 0x5043, + "Async-%s error entry - portid=%02x%02x%02x " "entry-status=%x status=%x state-flag=%x " "status-flags=%x.\n", - fcport->vha->host_no, sp->handle, type, - fcport->d_id.b.domain, fcport->d_id.b.area, + type, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, mbx->entry_status, le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), - le16_to_cpu(mbx->status_flags))); + le16_to_cpu(mbx->status_flags)); - DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx))); + ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057, + (uint8_t *)mbx, sizeof(*mbx)); goto logio_done; } @@ -957,12 +940,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) status = 0; if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { - DEBUG2(printk(KERN_DEBUG - "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x " - "mbx1=%x.\n", - fcport->vha->host_no, sp->handle, type, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1))); + ql_dbg(ql_dbg_async, vha, 0x5045, + "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n", + type, fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)); data[0] = MBS_COMMAND_COMPLETE; if (ctx->type == SRB_LOGIN_CMD) { @@ -987,14 +968,14 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, break; } - DEBUG2(printk(KERN_WARNING - "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x status=%x " + ql_log(ql_log_warn, vha, 0x5046, + "Async-%s failed - portid=%02x%02x%02x status=%x " "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", - fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain, + type, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), - le16_to_cpu(mbx->mb7))); + le16_to_cpu(mbx->mb7)); logio_done: lio->done(sp); @@ -1025,9 +1006,8 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, type = "ct pass-through"; break; default: - qla_printk(KERN_WARNING, ha, - "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, - sp_bsg->type); + ql_log(ql_log_warn, vha, 0x5047, + "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type); return; } @@ -1045,20 +1025,20 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, bsg_job->reply->reply_payload_rcv_len = le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); - DEBUG2(qla_printk(KERN_WARNING, ha, - "scsi(%ld): CT pass-through-%s error " + ql_log(ql_log_warn, vha, 0x5048, + "CT pass-through-%s error " "comp_status-status=0x%x total_byte = 0x%x.\n", - vha->host_no, type, comp_status, - bsg_job->reply->reply_payload_rcv_len)); + type, comp_status, + bsg_job->reply->reply_payload_rcv_len); } else { - DEBUG2(qla_printk(KERN_WARNING, ha, - "scsi(%ld): CT pass-through-%s error " - "comp_status-status=0x%x.\n", - vha->host_no, type, comp_status)); + ql_log(ql_log_warn, vha, 0x5049, + "CT pass-through-%s error " + "comp_status-status=0x%x.\n", type, comp_status); bsg_job->reply->result = DID_ERROR << 16; bsg_job->reply->reply_payload_rcv_len = 0; } - DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt))); + ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058, + (uint8_t *)pkt, sizeof(*pkt)); } else { bsg_job->reply->result = DID_OK << 16; bsg_job->reply->reply_payload_rcv_len = @@ -1110,9 +1090,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, type = "ct pass-through"; break; default: - qla_printk(KERN_WARNING, ha, - "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, - sp_bsg->type); + ql_log(ql_log_warn, vha, 0x503e, + "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type); return; } @@ -1132,27 +1111,31 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, bsg_job->reply->reply_payload_rcv_len = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); - DEBUG2(qla_printk(KERN_WARNING, ha, - "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " + ql_log(ql_log_info, vha, 0x503f, + "ELS-CT pass-through-%s error comp_status-status=0x%x " "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", - vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2], - le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count))); + type, comp_status, fw_status[1], fw_status[2], + le16_to_cpu(((struct els_sts_entry_24xx *) + pkt)->total_byte_count)); fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); } else { - DEBUG2(qla_printk(KERN_WARNING, ha, - "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " + ql_log(ql_log_info, vha, 0x5040, + "ELS-CT pass-through-%s error comp_status-status=0x%x " "error subcode 1=0x%x error subcode 2=0x%x.\n", - vha->host_no, sp->handle, type, comp_status, - le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1), - le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2))); + type, comp_status, + le16_to_cpu(((struct els_sts_entry_24xx *) + pkt)->error_subcode_1), + le16_to_cpu(((struct els_sts_entry_24xx *) + pkt)->error_subcode_2)); bsg_job->reply->result = DID_ERROR << 16; bsg_job->reply->reply_payload_rcv_len = 0; fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); } - DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt))); + ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056, + (uint8_t *)pkt, sizeof(*pkt)); } else { bsg_job->reply->result = DID_OK << 16; @@ -1201,25 +1184,24 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? QLA_LOGIO_LOGIN_RETRIED : 0; if (logio->entry_status) { - DEBUG2(printk(KERN_WARNING - "scsi(%ld:%x): Async-%s error entry - " + ql_log(ql_log_warn, vha, 0x5034, + "Async-%s error entry - " "portid=%02x%02x%02x entry-status=%x.\n", - fcport->vha->host_no, sp->handle, type, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa, logio->entry_status)); - DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio))); + type, fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, logio->entry_status); + ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059, + (uint8_t *)logio, sizeof(*logio)); goto logio_done; } if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { - DEBUG2(printk(KERN_DEBUG - "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x " + ql_dbg(ql_dbg_async, vha, 0x5036, + "Async-%s complete - portid=%02x%02x%02x " "iop0=%x.\n", - fcport->vha->host_no, sp->handle, type, - fcport->d_id.b.domain, fcport->d_id.b.area, + type, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, - le32_to_cpu(logio->io_parameter[0]))); + le32_to_cpu(logio->io_parameter[0])); data[0] = MBS_COMMAND_COMPLETE; if (ctx->type != SRB_LOGIN_CMD) @@ -1256,14 +1238,14 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, break; } - DEBUG2(printk(KERN_WARNING - "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x comp=%x " + ql_dbg(ql_dbg_async, vha, 0x5037, + "Async-%s failed - portid=%02x%02x%02x comp=%x " "iop0=%x iop1=%x.\n", - fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain, + type, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, le16_to_cpu(logio->comp_status), le32_to_cpu(logio->io_parameter[0]), - le32_to_cpu(logio->io_parameter[1]))); + le32_to_cpu(logio->io_parameter[1])); logio_done: lio->done(sp); @@ -1292,38 +1274,34 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, fcport = sp->fcport; if (sts->entry_status) { - DEBUG2(printk(KERN_WARNING - "scsi(%ld:%x): Async-%s error - entry-status(%x).\n", - fcport->vha->host_no, sp->handle, type, - sts->entry_status)); + ql_log(ql_log_warn, vha, 0x5038, + "Async-%s error - entry-status(%x).\n", + type, sts->entry_status); } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { - DEBUG2(printk(KERN_WARNING - "scsi(%ld:%x): Async-%s error - completion status(%x).\n", - fcport->vha->host_no, sp->handle, type, - sts->comp_status)); + ql_log(ql_log_warn, vha, 0x5039, + "Async-%s error - completion status(%x).\n", + type, sts->comp_status); } else if (!(le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID)) { - DEBUG2(printk(KERN_WARNING - "scsi(%ld:%x): Async-%s error - no response info(%x).\n", - fcport->vha->host_no, sp->handle, type, - sts->scsi_status)); + ql_log(ql_log_warn, vha, 0x503a, + "Async-%s error - no response info(%x).\n", + type, sts->scsi_status); } else if (le32_to_cpu(sts->rsp_data_len) < 4) { - DEBUG2(printk(KERN_WARNING - "scsi(%ld:%x): Async-%s error - not enough response(%d).\n", - fcport->vha->host_no, sp->handle, type, - sts->rsp_data_len)); + ql_log(ql_log_warn, vha, 0x503b, + "Async-%s error - not enough response(%d).\n", + type, sts->rsp_data_len); } else if (sts->data[3]) { - DEBUG2(printk(KERN_WARNING - "scsi(%ld:%x): Async-%s error - response(%x).\n", - fcport->vha->host_no, sp->handle, type, - sts->data[3])); + ql_log(ql_log_warn, vha, 0x503c, + "Async-%s error - response(%x).\n", + type, sts->data[3]); } else { error = 0; } if (error) { iocb->u.tmf.data = error; - DEBUG2(qla2x00_dump_buffer((uint8_t *)sts, sizeof(*sts))); + ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, + (uint8_t *)sts, sizeof(*sts)); } iocb->done(sp); @@ -1360,8 +1338,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp) } if (pkt->entry_status != 0) { - DEBUG3(printk(KERN_INFO - "scsi(%ld): Process error entry.\n", vha->host_no)); + ql_log(ql_log_warn, vha, 0x5035, + "Process error entry.\n"); qla2x00_error_entry(vha, rsp, pkt); ((response_t *)pkt)->signature = RESPONSE_PROCESSED; @@ -1399,10 +1377,10 @@ qla2x00_process_response_queue(struct rsp_que *rsp) break; default: /* Type Not Supported. */ - DEBUG4(printk(KERN_WARNING - "scsi(%ld): Received unknown response pkt type %x " + ql_log(ql_log_warn, vha, 0x504a, + "Received unknown response pkt type %x " "entry status=%x.\n", - vha->host_no, pkt->entry_type, pkt->entry_status)); + pkt->entry_type, pkt->entry_status); break; } ((response_t *)pkt)->signature = RESPONSE_PROCESSED; @@ -1418,6 +1396,7 @@ static inline void qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, uint32_t sense_len, struct rsp_que *rsp) { + struct scsi_qla_host *vha = sp->fcport->vha; struct scsi_cmnd *cp = sp->cmd; if (sense_len >= SCSI_SENSE_BUFFERSIZE) @@ -1435,11 +1414,13 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, if (sp->request_sense_length != 0) rsp->status_srb = sp; - DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " - "cmd=%p\n", __func__, sp->fcport->vha->host_no, - cp->device->channel, cp->device->id, cp->device->lun, cp)); + ql_dbg(ql_dbg_io, vha, 0x301c, + "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n", + sp->fcport->vha->host_no, cp->device->channel, cp->device->id, + cp->device->lun, cp); if (sense_len) - DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len)); + ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, + cp->sense_buffer, sense_len); } struct scsi_dif_tuple { @@ -1457,6 +1438,7 @@ struct scsi_dif_tuple { static inline void qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) { + struct scsi_qla_host *vha = sp->fcport->vha; struct scsi_cmnd *cmd = sp->cmd; struct scsi_dif_tuple *ep = (struct scsi_dif_tuple *)&sts24->data[20]; @@ -1473,15 +1455,15 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) e_guard = be16_to_cpu(ep->guard); a_guard = be16_to_cpu(ap->guard); - DEBUG18(printk(KERN_DEBUG - "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24)); + ql_dbg(ql_dbg_io, vha, 0x3023, + "iocb(s) %p Returned STATUS.\n", sts24); - DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref" + ql_dbg(ql_dbg_io, vha, 0x3024, + "DIF ERROR in cmd 0x%x lba 0x%llx act ref" " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" - " tag=0x%x, act guard=0x%x, exp guard=0x%x\n", + " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, - a_app_tag, e_app_tag, a_guard, e_guard)); - + a_app_tag, e_app_tag, a_guard, e_guard); /* check guard */ if (e_guard != a_guard) { @@ -1569,9 +1551,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) sp = NULL; if (sp == NULL) { - qla_printk(KERN_WARNING, ha, - "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no, - sts->handle); + ql_log(ql_log_warn, vha, 0x3017, + "Invalid status handle (0x%x).\n", sts->handle); if (IS_QLA82XX(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); @@ -1582,9 +1563,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) } cp = sp->cmd; if (cp == NULL) { - qla_printk(KERN_WARNING, ha, - "scsi(%ld): Command already returned (0x%x/%p).\n", - vha->host_no, sts->handle, sp); + ql_log(ql_log_warn, vha, 0x3018, + "Command already returned (0x%x/%p).\n", + sts->handle, sp); return; } @@ -1629,10 +1610,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) par_sense_len -= rsp_info_len; } if (rsp_info_len > 3 && rsp_info[3]) { - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d): FCP I/O protocol failure " - "(0x%x/0x%x).\n", vha->host_no, cp->device->id, - cp->device->lun, rsp_info_len, rsp_info[3])); + ql_log(ql_log_warn, vha, 0x3019, + "FCP I/O protocol failure (0x%x/0x%x).\n", + rsp_info_len, rsp_info[3]); cp->result = DID_BUS_BUSY << 16; goto out; @@ -1661,11 +1641,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) if (!lscsi_status && ((unsigned)(scsi_bufflen(cp) - resid) < cp->underflow)) { - qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d): Mid-layer underflow " + ql_log(ql_log_warn, vha, 0x301a, + "Mid-layer underflow " "detected (0x%x of 0x%x bytes).\n", - vha->host_no, cp->device->id, - cp->device->lun, resid, scsi_bufflen(cp)); + resid, scsi_bufflen(cp)); cp->result = DID_ERROR << 16; break; @@ -1674,9 +1653,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) cp->result = DID_OK << 16 | lscsi_status; if (lscsi_status == SAM_STAT_TASK_SET_FULL) { - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d) QUEUE FULL detected.\n", - vha->host_no, cp->device->id, cp->device->lun)); + ql_log(ql_log_warn, vha, 0x301b, + "QUEUE FULL detected.\n"); break; } logit = 0; @@ -1697,11 +1675,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) scsi_set_resid(cp, resid); if (scsi_status & SS_RESIDUAL_UNDER) { if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d) Dropped frame(s) detected " - "(0x%x of 0x%x bytes).\n", vha->host_no, - cp->device->id, cp->device->lun, resid, - scsi_bufflen(cp))); + ql_log(ql_log_warn, vha, 0x301d, + "Dropped frame(s) detected " + "(0x%x of 0x%x bytes).\n", + resid, scsi_bufflen(cp)); cp->result = DID_ERROR << 16 | lscsi_status; break; @@ -1710,20 +1687,18 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) if (!lscsi_status && ((unsigned)(scsi_bufflen(cp) - resid) < cp->underflow)) { - qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d): Mid-layer underflow " + ql_log(ql_log_warn, vha, 0x301e, + "Mid-layer underflow " "detected (0x%x of 0x%x bytes).\n", - vha->host_no, cp->device->id, - cp->device->lun, resid, scsi_bufflen(cp)); + resid, scsi_bufflen(cp)); cp->result = DID_ERROR << 16; break; } } else { - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " - "of 0x%x bytes).\n", vha->host_no, cp->device->id, - cp->device->lun, resid, scsi_bufflen(cp))); + ql_log(ql_log_warn, vha, 0x301f, + "Dropped frame(s) detected (0x%x " + "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); cp->result = DID_ERROR << 16 | lscsi_status; goto check_scsi_status; @@ -1739,10 +1714,8 @@ check_scsi_status: */ if (lscsi_status != 0) { if (lscsi_status == SAM_STAT_TASK_SET_FULL) { - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d) QUEUE FULL detected.\n", - vha->host_no, cp->device->id, - cp->device->lun)); + ql_log(ql_log_warn, vha, 0x3020, + "QUEUE FULL detected.\n"); logit = 1; break; } @@ -1781,10 +1754,9 @@ check_scsi_status: break; } - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d) Port down status: port-state=0x%x\n", - vha->host_no, cp->device->id, cp->device->lun, - atomic_read(&fcport->state))); + ql_dbg(ql_dbg_io, vha, 0x3021, + "Port down status: port-state=0x%x.\n", + atomic_read(&fcport->state)); if (atomic_read(&fcport->state) == FCS_ONLINE) qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); @@ -1804,15 +1776,13 @@ check_scsi_status: out: if (logit) - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) " - "portid=%02x%02x%02x oxid=0x%x cdb=%02x%02x%02x len=0x%x " - "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no, - cp->device->id, cp->device->lun, comp_status, scsi_status, - cp->result, fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa, ox_id, cp->cmnd[0], cp->cmnd[1], - cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, resid_len, - fw_resid_len)); + ql_dbg(ql_dbg_io, vha, 0x3022, + "FCP command status: 0x%x-0x%x (0x%x) " + "oxid=0x%x cdb=%02x%02x%02x len=0x%x " + "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", + comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0], + cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, + resid_len, fw_resid_len); if (rsp->status_srb == NULL) qla2x00_sp_compl(ha, sp); @@ -1830,16 +1800,15 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) { uint8_t sense_sz = 0; struct qla_hw_data *ha = rsp->hw; + struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); srb_t *sp = rsp->status_srb; struct scsi_cmnd *cp; if (sp != NULL && sp->request_sense_length != 0) { cp = sp->cmd; if (cp == NULL) { - DEBUG2(printk("%s(): Cmd already returned back to OS " - "sp=%p.\n", __func__, sp)); - qla_printk(KERN_INFO, ha, - "cmd is NULL: already returned to OS (sp=%p)\n", + ql_log(ql_log_warn, vha, 0x3025, + "cmd is NULL: already returned to OS (sp=%p).\n", sp); rsp->status_srb = NULL; @@ -1856,7 +1825,8 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) if (IS_FWI2_CAPABLE(ha)) host_to_fcp_swap(pkt->data, sizeof(pkt->data)); memcpy(sp->request_sense_ptr, pkt->data, sense_sz); - DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz)); + ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, + sp->request_sense_ptr, sense_sz); sp->request_sense_ptr += sense_sz; sp->request_sense_length -= sense_sz; @@ -1882,21 +1852,25 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) uint32_t handle = LSW(pkt->handle); uint16_t que = MSW(pkt->handle); struct req_que *req = ha->req_q_map[que]; -#if defined(QL_DEBUG_LEVEL_2) + if (pkt->entry_status & RF_INV_E_ORDER) - qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); + ql_dbg(ql_dbg_async, vha, 0x502a, + "Invalid Entry Order.\n"); else if (pkt->entry_status & RF_INV_E_COUNT) - qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__); + ql_dbg(ql_dbg_async, vha, 0x502b, + "Invalid Entry Count.\n"); else if (pkt->entry_status & RF_INV_E_PARAM) - qla_printk(KERN_ERR, ha, - "%s: Invalid Entry Parameter\n", __func__); + ql_dbg(ql_dbg_async, vha, 0x502c, + "Invalid Entry Parameter.\n"); else if (pkt->entry_status & RF_INV_E_TYPE) - qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__); + ql_dbg(ql_dbg_async, vha, 0x502d, + "Invalid Entry Type.\n"); else if (pkt->entry_status & RF_BUSY) - qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__); + ql_dbg(ql_dbg_async, vha, 0x502e, + "Busy.\n"); else - qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__); -#endif + ql_dbg(ql_dbg_async, vha, 0x502f, + "UNKNOWN flag error.\n"); /* Validate handle. */ if (handle < MAX_OUTSTANDING_COMMANDS) @@ -1923,10 +1897,8 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7 || pkt->entry_type == COMMAND_TYPE_6) { - DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", - vha->host_no)); - qla_printk(KERN_WARNING, ha, - "Error entry - invalid handle\n"); + ql_log(ql_log_warn, vha, 0x5030, + "Error entry - invalid handle.\n"); if (IS_QLA82XX(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); @@ -1960,11 +1932,11 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) } if (ha->mcp) { - DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", - __func__, vha->host_no, ha->mcp->mb[0])); + ql_dbg(ql_dbg_async, vha, 0x504d, + "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]); } else { - DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", - __func__, vha->host_no)); + ql_dbg(ql_dbg_async, vha, 0x504e, + "MBX pointer ERROR.\n"); } } @@ -1993,8 +1965,8 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, } if (pkt->entry_status != 0) { - DEBUG3(printk(KERN_INFO - "scsi(%ld): Process error entry.\n", vha->host_no)); + ql_dbg(ql_dbg_async, vha, 0x5029, + "Process error entry.\n"); qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); ((response_t *)pkt)->signature = RESPONSE_PROCESSED; @@ -2030,10 +2002,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, break; default: /* Type Not Supported. */ - DEBUG4(printk(KERN_WARNING - "scsi(%ld): Received unknown response pkt type %x " + ql_dbg(ql_dbg_async, vha, 0x5042, + "Received unknown response pkt type %x " "entry status=%x.\n", - vha->host_no, pkt->entry_type, pkt->entry_status)); + pkt->entry_type, pkt->entry_status); break; } ((response_t *)pkt)->signature = RESPONSE_PROCESSED; @@ -2088,7 +2060,8 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha) next_test: if (RD_REG_DWORD(®->iobase_c8) & BIT_3) - qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n"); + ql_log(ql_log_info, vha, 0x504c, + "Additional code -- 0x55AA.\n"); done: WRT_REG_DWORD(®->iobase_window, 0x0000); @@ -2121,7 +2094,7 @@ qla24xx_intr_handler(int irq, void *dev_id) rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO - "%s(): NULL response queue pointer\n", __func__); + "%s(): NULL response queue pointer.\n", __func__); return IRQ_NONE; } @@ -2142,8 +2115,9 @@ qla24xx_intr_handler(int irq, void *dev_id) hccr = RD_REG_DWORD(®->hccr); - qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " - "Dumping firmware!\n", hccr); + ql_log(ql_log_warn, vha, 0x504b, + "RISC paused -- HCCR=%x, Dumping firmware.\n", + hccr); qla2xxx_check_risc_status(vha); @@ -2174,9 +2148,8 @@ qla24xx_intr_handler(int irq, void *dev_id) qla24xx_process_response_queue(vha, rsp); break; default: - DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " - "(%d).\n", - vha->host_no, stat & 0xff)); + ql_dbg(ql_dbg_async, vha, 0x504f, + "Unrecognized interrupt type (%d).\n", stat * 0xff); break; } WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); @@ -2205,7 +2178,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO - "%s(): NULL response queue pointer\n", __func__); + "%s(): NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; @@ -2235,7 +2208,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO - "%s(): NULL response queue pointer\n", __func__); + "%s(): NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; @@ -2268,8 +2241,8 @@ qla24xx_msix_default(int irq, void *dev_id) rsp = (struct rsp_que *) dev_id; if (!rsp) { - DEBUG(printk( - "%s(): NULL response queue pointer\n", __func__)); + printk(KERN_INFO + "%s(): NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; @@ -2286,8 +2259,9 @@ qla24xx_msix_default(int irq, void *dev_id) hccr = RD_REG_DWORD(®->hccr); - qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " - "Dumping firmware!\n", hccr); + ql_log(ql_log_info, vha, 0x5050, + "RISC paused -- HCCR=%x, Dumping firmware.\n", + hccr); qla2xxx_check_risc_status(vha); @@ -2318,9 +2292,8 @@ qla24xx_msix_default(int irq, void *dev_id) qla24xx_process_response_queue(vha, rsp); break; default: - DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " - "(%d).\n", - vha->host_no, stat & 0xff)); + ql_dbg(ql_dbg_async, vha, 0x5051, + "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); @@ -2358,6 +2331,7 @@ qla24xx_disable_msix(struct qla_hw_data *ha) { int i; struct qla_msix_entry *qentry; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); for (i = 0; i < ha->msix_count; i++) { qentry = &ha->msix_entries[i]; @@ -2368,6 +2342,8 @@ qla24xx_disable_msix(struct qla_hw_data *ha) kfree(ha->msix_entries); ha->msix_entries = NULL; ha->flags.msix_enabled = 0; + ql_dbg(ql_dbg_init, vha, 0x0042, + "Disabled the MSI.\n"); } static int @@ -2377,11 +2353,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) int i, ret; struct msix_entry *entries; struct qla_msix_entry *qentry; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, GFP_KERNEL); - if (!entries) + if (!entries) { + ql_log(ql_log_warn, vha, 0x00bc, + "Failed to allocate memory for msix_entry.\n"); return -ENOMEM; + } for (i = 0; i < ha->msix_count; i++) entries[i].entry = i; @@ -2391,16 +2371,18 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) if (ret < MIN_MSIX_COUNT) goto msix_failed; - qla_printk(KERN_WARNING, ha, - "MSI-X: Failed to enable support -- %d/%d\n" - " Retry with %d vectors\n", ha->msix_count, ret, ret); + ql_log(ql_log_warn, vha, 0x00c6, + "MSI-X: Failed to enable support " + "-- %d/%d\n Retry with %d vectors.\n", + ha->msix_count, ret, ret); ha->msix_count = ret; ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); if (ret) { msix_failed: - qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable" - " support, giving up -- %d/%d\n", - ha->msix_count, ret); + ql_log(ql_log_fatal, vha, 0x00c7, + "MSI-X: Failed to enable support, " + "giving up -- %d/%d.\n", + ha->msix_count, ret); goto msix_out; } ha->max_rsp_queues = ha->msix_count - 1; @@ -2408,6 +2390,8 @@ msix_failed: ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * ha->msix_count, GFP_KERNEL); if (!ha->msix_entries) { + ql_log(ql_log_fatal, vha, 0x00c8, + "Failed to allocate memory for ha->msix_entries.\n"); ret = -ENOMEM; goto msix_out; } @@ -2434,9 +2418,9 @@ msix_failed: 0, msix_entries[i].name, rsp); } if (ret) { - qla_printk(KERN_WARNING, ha, - "MSI-X: Unable to register handler -- %x/%d.\n", - qentry->vector, ret); + ql_log(ql_log_fatal, vha, 0x00cb, + "MSI-X: unable to register handler -- %x/%d.\n", + qentry->vector, ret); qla24xx_disable_msix(ha); ha->mqenable = 0; goto msix_out; @@ -2449,6 +2433,12 @@ msix_failed: /* Enable MSI-X vector for response queue update for queue 0 */ if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) ha->mqenable = 1; + ql_dbg(ql_dbg_multiq, vha, 0xc005, + "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", + ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); + ql_dbg(ql_dbg_init, vha, 0x0055, + "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", + ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); msix_out: kfree(entries); @@ -2460,6 +2450,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) { int ret; device_reg_t __iomem *reg = ha->iobase; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); /* If possible, enable MSI-X. */ if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && @@ -2470,30 +2461,30 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) (ha->pdev->subsystem_device == 0x7040 || ha->pdev->subsystem_device == 0x7041 || ha->pdev->subsystem_device == 0x1705)) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n", + ql_log(ql_log_warn, vha, 0x0034, + "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", ha->pdev->subsystem_vendor, - ha->pdev->subsystem_device)); + ha->pdev->subsystem_device); goto skip_msi; } if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", - ha->pdev->revision, ha->fw_attributes)); + ql_log(ql_log_warn, vha, 0x0035, + "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", + ha->pdev->revision, ha->fw_attributes); goto skip_msix; } ret = qla24xx_enable_msix(ha, rsp); if (!ret) { - DEBUG2(qla_printk(KERN_INFO, ha, - "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, - ha->fw_attributes)); + ql_dbg(ql_dbg_init, vha, 0x0036, + "MSI-X: Enabled (0x%X, 0x%X).\n", + ha->chip_revision, ha->fw_attributes); goto clear_risc_ints; } - qla_printk(KERN_WARNING, ha, - "MSI-X: Falling back-to MSI mode -- %d.\n", ret); + ql_log(ql_log_info, vha, 0x0037, + "MSI-X Falling back-to MSI mode -%d.\n", ret); skip_msix: if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && @@ -2502,18 +2493,19 @@ skip_msix: ret = pci_enable_msi(ha->pdev); if (!ret) { - DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n")); + ql_dbg(ql_dbg_init, vha, 0x0038, + "MSI: Enabled.\n"); ha->flags.msi_enabled = 1; } else - qla_printk(KERN_WARNING, ha, - "MSI-X: Falling back-to INTa mode -- %d.\n", ret); + ql_log(ql_log_warn, vha, 0x0039, + "MSI-X; Falling back-to INTa mode -- %d.\n", ret); skip_msi: ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, ha->flags.msi_enabled ? 0 : IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp); if (ret) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x003a, "Failed to reserve interrupt %d already in use.\n", ha->pdev->irq); goto fail; @@ -2563,13 +2555,14 @@ int qla25xx_request_irq(struct rsp_que *rsp) struct qla_hw_data *ha = rsp->hw; struct qla_init_msix_entry *intr = &msix_entries[2]; struct qla_msix_entry *msix = rsp->msix; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); int ret; ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); if (ret) { - qla_printk(KERN_WARNING, ha, - "MSI-X: Unable to register handler -- %x/%d.\n", - msix->vector, ret); + ql_log(ql_log_fatal, vha, 0x00e6, + "MSI-X: Unable to register handler -- %x/%d.\n", + msix->vector, ret); return ret; } msix->have_irq = 1; diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index c26f0acdfec..f7604ea1af8 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -46,14 +46,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - if (ha->pdev->error_state > pci_channel_io_frozen) + ql_dbg(ql_dbg_mbx, base_vha, 0x1000, "Entered %s.\n", __func__); + + if (ha->pdev->error_state > pci_channel_io_frozen) { + ql_log(ql_log_warn, base_vha, 0x1001, + "error_state is greater than pci_channel_io_frozen, " + "exiting.\n"); return QLA_FUNCTION_TIMEOUT; + } if (vha->device_flags & DFLG_DEV_FAILED) { - DEBUG2_3_11(qla_printk(KERN_WARNING, ha, - "%s(%ld): Device in failed state, " - "timeout MBX Exiting.\n", - __func__, base_vha->host_no)); + ql_log(ql_log_warn, base_vha, 0x1002, + "Device in failed state, exiting.\n"); return QLA_FUNCTION_TIMEOUT; } @@ -63,17 +67,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) rval = QLA_SUCCESS; abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); - DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no)); if (ha->flags.pci_channel_io_perm_failure) { - DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX " - "Exiting.\n", __func__, vha->host_no)); + ql_log(ql_log_warn, base_vha, 0x1003, + "Perm failure on EEH timeout MBX, exiting.\n"); return QLA_FUNCTION_TIMEOUT; } if (ha->flags.isp82xx_fw_hung) { /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; + ql_log(ql_log_warn, base_vha, 0x1004, + "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); rval = QLA_FUNCTION_FAILED; goto premature_exit; } @@ -85,8 +90,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) */ if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { /* Timeout occurred. Return error. */ - DEBUG2_3_11(printk("%s(%ld): cmd access timeout. " - "Exiting.\n", __func__, base_vha->host_no)); + ql_log(ql_log_warn, base_vha, 0x1005, + "Cmd access timeout, Exiting.\n"); return QLA_FUNCTION_TIMEOUT; } @@ -94,8 +99,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) /* Save mailbox command for debug */ ha->mcp = mcp; - DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n", - base_vha->host_no, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, base_vha, 0x1006, + "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); spin_lock_irqsave(&ha->hardware_lock, flags); @@ -123,27 +128,30 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) iptr++; } -#if defined(QL_DEBUG_LEVEL_1) - printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n", - __func__, base_vha->host_no); - qla2x00_dump_buffer((uint8_t *)mcp->mb, 16); - printk("\n"); - qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16); - printk("\n"); - qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8); - printk("\n"); - printk("%s(%ld): I/O address = %p.\n", __func__, base_vha->host_no, - optr); - qla2x00_dump_regs(base_vha); -#endif + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1111, + "Loaded MBX registers (displayed in bytes) =.\n"); + ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1112, + (uint8_t *)mcp->mb, 16); + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1113, + ".\n"); + ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1114, + ((uint8_t *)mcp->mb + 0x10), 16); + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1115, + ".\n"); + ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1116, + ((uint8_t *)mcp->mb + 0x20), 8); + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1117, + "I/O Address = %p.\n", optr); + ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x100e); /* Issue set host interrupt command to send cmd out. */ ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); /* Unlock mbx registers and wait for interrupt */ - DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. " - "jiffies=%lx.\n", __func__, base_vha->host_no, jiffies)); + ql_dbg(ql_dbg_mbx, base_vha, 0x100f, + "Going to unlock irq & waiting for interrupts. " + "jiffies=%lx.\n", jiffies); /* Wait for mbx cmd completion until timeout */ @@ -155,9 +163,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) HINT_MBX_INT_PENDING) { spin_unlock_irqrestore(&ha->hardware_lock, flags); - DEBUG2_3_11(printk(KERN_INFO - "%s(%ld): Pending Mailbox timeout. " - "Exiting.\n", __func__, base_vha->host_no)); + ql_dbg(ql_dbg_mbx, base_vha, 0x1010, + "Pending mailbox timeout, exiting.\n"); rval = QLA_FUNCTION_TIMEOUT; goto premature_exit; } @@ -173,17 +180,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); } else { - DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__, - base_vha->host_no, command)); + ql_dbg(ql_dbg_mbx, base_vha, 0x1011, + "Cmd=%x Polling Mode.\n", command); if (IS_QLA82XX(ha)) { if (RD_REG_DWORD(®->isp82.hint) & HINT_MBX_INT_PENDING) { spin_unlock_irqrestore(&ha->hardware_lock, flags); - DEBUG2_3_11(printk(KERN_INFO - "%s(%ld): Pending Mailbox timeout. " - "Exiting.\n", __func__, base_vha->host_no)); + ql_dbg(ql_dbg_mbx, base_vha, 0x1012, + "Pending mailbox timeout, exiting.\n"); rval = QLA_FUNCTION_TIMEOUT; goto premature_exit; } @@ -207,17 +213,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) command == MBC_LOAD_RISC_RAM_EXTENDED)) msleep(10); } /* while */ - DEBUG17(qla_printk(KERN_WARNING, ha, - "Waited %d sec\n", - (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ))); + ql_dbg(ql_dbg_mbx, base_vha, 0x1013, + "Waited %d sec.\n", + (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); } /* Check whether we timed out */ if (ha->flags.mbox_int) { uint16_t *iptr2; - DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__, - base_vha->host_no, command)); + ql_dbg(ql_dbg_mbx, base_vha, 0x1014, + "Cmd=%x completed.\n", command); /* Got interrupt. Clear the flag. */ ha->flags.mbox_int = 0; @@ -229,6 +235,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) mcp->mb[0] = MBS_LINK_DOWN_ERROR; ha->mcp = NULL; rval = QLA_FUNCTION_FAILED; + ql_log(ql_log_warn, base_vha, 0x1015, + "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); goto premature_exit; } @@ -249,8 +257,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) } } else { -#if defined(QL_DEBUG_LEVEL_2) || defined(QL_DEBUG_LEVEL_3) || \ - defined(QL_DEBUG_LEVEL_11) uint16_t mb0; uint32_t ictrl; @@ -261,14 +267,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) mb0 = RD_MAILBOX_REG(ha, ®->isp, 0); ictrl = RD_REG_WORD(®->isp.ictrl); } - printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n", - __func__, base_vha->host_no, command); - printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__, - base_vha->host_no, ictrl, jiffies); - printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__, - base_vha->host_no, mb0); - qla2x00_dump_regs(base_vha); -#endif + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1119, + "MBX Command timeout for cmd %x.\n", command); + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111a, + "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies); + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111b, + "mb[0] = 0x%x.\n", mb0); + ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1019); rval = QLA_FUNCTION_TIMEOUT; } @@ -279,8 +284,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ha->mcp = NULL; if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { - DEBUG11(printk("%s(%ld): checking for additional resp " - "interrupt.\n", __func__, base_vha->host_no)); + ql_dbg(ql_dbg_mbx, base_vha, 0x101a, + "Checking for additional resp interrupt.\n"); /* polling mode for non isp_abort commands. */ qla2x00_poll(ha->rsp_q_map[0]); @@ -291,38 +296,32 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) if (!io_lock_on || (mcp->flags & IOCTL_CMD) || ha->flags.eeh_busy) { /* not in dpc. schedule it for dpc to take over. */ - DEBUG(printk("%s(%ld): timeout schedule " - "isp_abort_needed.\n", __func__, - base_vha->host_no)); - DEBUG2_3_11(printk("%s(%ld): timeout schedule " - "isp_abort_needed.\n", __func__, - base_vha->host_no)); + ql_dbg(ql_dbg_mbx, base_vha, 0x101b, + "Timeout, schedule isp_abort_needed.\n"); if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { - qla_printk(KERN_WARNING, ha, - "Mailbox command timeout occurred. " - "Scheduling ISP " "abort. eeh_busy: 0x%x\n", - ha->flags.eeh_busy); + ql_log(ql_log_info, base_vha, 0x101c, + "Mailbox cmd timeout occured. " + "Scheduling ISP abort eeh_busy=0x%x.\n", + ha->flags.eeh_busy); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } else if (!abort_active) { /* call abort directly since we are in the DPC thread */ - DEBUG(printk("%s(%ld): timeout calling abort_isp\n", - __func__, base_vha->host_no)); - DEBUG2_3_11(printk("%s(%ld): timeout calling " - "abort_isp\n", __func__, base_vha->host_no)); + ql_dbg(ql_dbg_mbx, base_vha, 0x101d, + "Timeout, calling abort_isp.\n"); if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { - qla_printk(KERN_WARNING, ha, - "Mailbox command timeout occurred. " - "Issuing ISP abort.\n"); + ql_log(ql_log_info, base_vha, 0x101e, + "Mailbox cmd timeout occured. " + "Scheduling ISP abort.\n"); set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -332,11 +331,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) &vha->dpc_flags); } clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); - DEBUG(printk("%s(%ld): finished abort_isp\n", - __func__, vha->host_no)); - DEBUG2_3_11(printk( - "%s(%ld): finished abort_isp\n", - __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, base_vha, 0x101f, + "Finished abort_isp.\n"); } } } @@ -346,12 +342,11 @@ premature_exit: complete(&ha->mbx_cmd_comp); if (rval) { - DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, " - "mbx2=%x, cmd=%x ****\n", __func__, base_vha->host_no, - mcp->mb[0], mcp->mb[1], mcp->mb[2], command)); + ql_dbg(ql_dbg_mbx, base_vha, 0x1020, + "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n", + mcp->mb[0], mcp->mb[1], mcp->mb[2], command); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, - base_vha->host_no)); + ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); } return rval; @@ -366,7 +361,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__); if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; @@ -397,10 +392,10 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, - vha->host_no, rval, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x1023, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__); } return rval; @@ -430,7 +425,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__); mcp->mb[0] = MBC_EXECUTE_FIRMWARE; mcp->out_mb = MBX_0; @@ -461,15 +456,14 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, - vha->host_no, rval, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x1026, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { if (IS_FWI2_CAPABLE(ha)) { - DEBUG11(printk("%s(%ld): done exchanges=%x.\n", - __func__, vha->host_no, mcp->mb[1])); + ql_dbg(ql_dbg_mbx, vha, 0x1027, + "Done exchanges=%x.\n", mcp->mb[1]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__); } } @@ -501,7 +495,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; mcp->out_mb = MBX_0; @@ -535,11 +529,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, failed: if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); } else { /*EMPTY*/ - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__); } return rval; } @@ -565,7 +558,7 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; mcp->out_mb = MBX_0; @@ -576,15 +569,14 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); } else { fwopts[0] = mcp->mb[0]; fwopts[1] = mcp->mb[1]; fwopts[2] = mcp->mb[2]; fwopts[3] = mcp->mb[3]; - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__); } return rval; @@ -612,7 +604,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; mcp->mb[1] = fwopts[1]; @@ -636,11 +628,11 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__, - vha->host_no, rval, mcp->mb[0], mcp->mb[1])); + ql_dbg(ql_dbg_mbx, vha, 0x1030, + "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); } else { /*EMPTY*/ - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__); } return rval; @@ -668,7 +660,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__); mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; mcp->mb[1] = 0xAAAA; @@ -695,12 +687,10 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha) if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); } else { /*EMPTY*/ - DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__); } return rval; @@ -728,7 +718,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__); mcp->mb[0] = MBC_VERIFY_CHECKSUM; mcp->out_mb = MBX_0; @@ -749,11 +739,11 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__, - vha->host_no, rval, IS_FWI2_CAPABLE(vha->hw) ? - (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1])); + ql_dbg(ql_dbg_mbx, vha, 0x1036, + "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? + (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__); } return rval; @@ -785,6 +775,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__); + mcp->mb[0] = MBC_IOCB_COMMAND_A64; mcp->mb[1] = 0; mcp->mb[2] = MSW(phys_addr); @@ -799,14 +791,14 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); } else { sts_entry_t *sts_entry = (sts_entry_t *) buffer; /* Mask reserved bits. */ sts_entry->entry_status &= IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; + ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__); } return rval; @@ -847,7 +839,7 @@ qla2x00_abort_command(srb_t *sp) struct qla_hw_data *ha = vha->hw; struct req_que *req = vha->req; - DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__); spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { @@ -876,11 +868,9 @@ qla2x00_abort_command(srb_t *sp) rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); } else { - DEBUG11(printk("qla2x00_abort_command(%ld): done.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__); } return rval; @@ -896,10 +886,11 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) struct req_que *req; struct rsp_que *rsp; - DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); - l = l; vha = fcport->vha; + + ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__); + req = vha->hw->req_q_map[0]; rsp = req->rsp; mcp->mb[0] = MBC_ABORT_TARGET; @@ -919,18 +910,17 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval); } /* Issue marker IOCB. */ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0, MK_SYNC_ID); if (rval2 != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " - "(%x).\n", __func__, vha->host_no, rval2)); + ql_dbg(ql_dbg_mbx, vha, 0x1040, + "Failed to issue marker IOCB (%x).\n", rval2); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__); } return rval; @@ -946,9 +936,10 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag) struct req_que *req; struct rsp_que *rsp; - DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); - vha = fcport->vha; + + ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__); + req = vha->hw->req_q_map[0]; rsp = req->rsp; mcp->mb[0] = MBC_LUN_RESET; @@ -966,18 +957,17 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag) mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); } /* Issue marker IOCB. */ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, MK_SYNC_ID_LUN); if (rval2 != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " - "(%x).\n", __func__, vha->host_no, rval2)); + ql_dbg(ql_dbg_mbx, vha, 0x1044, + "Failed to issue marker IOCB (%x).\n", rval2); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__); } return rval; @@ -1011,8 +1001,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; mcp->mb[9] = vha->vp_idx; @@ -1038,11 +1027,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); } else { - DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__); if (IS_QLA8XXX_TYPE(vha->hw)) { vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; @@ -1083,8 +1070,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_RETRY_COUNT; mcp->out_mb = MBX_0; @@ -1095,8 +1081,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n", - vha->host_no, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x104a, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { /* Convert returned data and check our values. */ *r_a_tov = mcp->mb[3] / 2; @@ -1107,8 +1093,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, *tov = ratov; } - DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d " - "ratov=%d.\n", vha->host_no, mcp->mb[3], ratov)); + ql_dbg(ql_dbg_mbx, vha, 0x104b, + "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); } return rval; @@ -1139,8 +1125,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__); if (IS_QLA82XX(ha) && ql2xdbwr) qla82xx_wr_32(ha, ha->nxdb_wr_ptr, @@ -1174,13 +1159,11 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x " - "mb0=%x.\n", - vha->host_no, rval, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x104d, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { /*EMPTY*/ - DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__); } return rval; @@ -1213,13 +1196,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) dma_addr_t pd_dma; struct qla_hw_data *ha = vha->hw; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__); pd24 = NULL; pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); if (pd == NULL) { - DEBUG2_3(printk("%s(%ld): failed to allocate Port Database " - "structure.\n", __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x1050, + "Failed to allocate port database structure.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); @@ -1261,12 +1244,10 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) /* Check for logged in state. */ if (pd24->current_login_state != PDS_PRLI_COMPLETE && pd24->last_login_state != PDS_PRLI_COMPLETE) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "scsi(%ld): Unable to verify login-state (%x/%x) " - " - portid=%02x%02x%02x.\n", vha->host_no, - pd24->current_login_state, pd24->last_login_state, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa)); + ql_dbg(ql_dbg_mbx, vha, 0x1051, + "Unable to verify login-state (%x/%x) for " + "loop_id %x.\n", pd24->current_login_state, + pd24->last_login_state, fcport->loop_id); rval = QLA_FUNCTION_FAILED; goto gpd_error_out; } @@ -1290,12 +1271,11 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) /* Check for logged in state. */ if (pd->master_state != PD_STATE_PORT_LOGGED_IN && pd->slave_state != PD_STATE_PORT_LOGGED_IN) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "scsi(%ld): Unable to verify login-state (%x/%x) " - " - portid=%02x%02x%02x.\n", vha->host_no, - pd->master_state, pd->slave_state, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa)); + ql_dbg(ql_dbg_mbx, vha, 0x100a, + "Unable to verify login-state (%x/%x) - " + "portid=%02x%02x%02x.\n", pd->master_state, + pd->slave_state, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); rval = QLA_FUNCTION_FAILED; goto gpd_error_out; } @@ -1325,10 +1305,11 @@ gpd_error_out: dma_pool_free(ha->s_dma_pool, pd, pd_dma); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", - __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); + ql_dbg(ql_dbg_mbx, vha, 0x1052, + "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, + mcp->mb[0], mcp->mb[1]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__); } return rval; @@ -1357,8 +1338,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_FIRMWARE_STATE; mcp->out_mb = MBX_0; @@ -1381,12 +1361,10 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): " - "failed=%x.\n", vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); } else { /*EMPTY*/ - DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__); } return rval; @@ -1418,8 +1396,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_PORT_NAME; mcp->mb[9] = vha->vp_idx; @@ -1439,8 +1416,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); } else { if (name != NULL) { /* This function returns name in big endian. */ @@ -1454,8 +1430,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, name[7] = LSB(mcp->mb[7]); } - DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__); } return rval; @@ -1483,7 +1458,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__); if (IS_QLA8XXX_TYPE(vha->hw)) { /* Logout across all FCFs. */ @@ -1517,11 +1492,10 @@ qla2x00_lip_reset(scsi_qla_host_t *vha) if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", - __func__, vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); } else { /*EMPTY*/ - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__); } return rval; @@ -1553,12 +1527,11 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__); - DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total " - "tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout, - mcp->tov)); + ql_dbg(ql_dbg_mbx, vha, 0x105e, + "Retry cnt=%d ratov=%d total tov=%d.\n", + vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); mcp->mb[0] = MBC_SEND_SNS_COMMAND; mcp->mb[1] = cmd_size; @@ -1575,13 +1548,12 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " - "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1])); - DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " - "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1])); + ql_dbg(ql_dbg_mbx, vha, 0x105f, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); } else { /*EMPTY*/ - DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__); } return rval; @@ -1600,7 +1572,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, struct req_que *req; struct rsp_que *rsp; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__); if (ha->flags.cpu_affinity_enabled) req = ha->req_q_map[0]; @@ -1610,8 +1582,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { - DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", - __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x1062, + "Failed to allocate login IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(lg, 0, sizeof(struct logio_entry_24xx)); @@ -1631,21 +1603,21 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, lg->vp_index = vha->vp_idx; rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB " - "(%x).\n", __func__, vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x1063, + "Failed to issue login IOCB (%x).\n", rval); } else if (lg->entry_status != 0) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- error status (%x).\n", __func__, vha->host_no, - lg->entry_status)); + ql_dbg(ql_dbg_mbx, vha, 0x1064, + "Failed to complete IOCB -- error status (%x).\n", + lg->entry_status); rval = QLA_FUNCTION_FAILED; } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { iop[0] = le32_to_cpu(lg->io_parameter[0]); iop[1] = le32_to_cpu(lg->io_parameter[1]); - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- completion status (%x) ioparam=%x/%x.\n", __func__, - vha->host_no, le16_to_cpu(lg->comp_status), iop[0], - iop[1])); + ql_dbg(ql_dbg_mbx, vha, 0x1065, + "Failed to complete IOCB -- completion status (%x) " + "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), + iop[0], iop[1]); switch (iop[0]) { case LSC_SCODE_PORTID_USED: @@ -1673,7 +1645,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, break; } } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__); iop[0] = le32_to_cpu(lg->io_parameter[0]); @@ -1728,7 +1700,7 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__); mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; @@ -1771,13 +1743,12 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, rval = QLA_SUCCESS; /*EMPTY*/ - DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x " - "mb[0]=%x mb[1]=%x mb[2]=%x.\n", vha->host_no, rval, - mcp->mb[0], mcp->mb[1], mcp->mb[2])); + ql_dbg(ql_dbg_mbx, vha, 0x1068, + "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { /*EMPTY*/ - DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__); } return rval; @@ -1808,13 +1779,13 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; + ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__); + if (IS_FWI2_CAPABLE(ha)) return qla24xx_login_fabric(vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, mb_ret, opt); - DEBUG3(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - mcp->mb[0] = MBC_LOGIN_LOOP_PORT; if (HAS_EXTENDED_IDS(ha)) mcp->mb[1] = fcport->loop_id; @@ -1845,15 +1816,12 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) rval = QLA_SUCCESS; - DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " - "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval, - mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7])); - DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " - "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval, - mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7])); + ql_dbg(ql_dbg_mbx, vha, 0x106b, + "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); } else { /*EMPTY*/ - DEBUG3(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__); } return (rval); @@ -1870,12 +1838,12 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, struct req_que *req; struct rsp_que *rsp; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__); lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { - DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n", - __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x106e, + "Failed to allocate logout IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(lg, 0, sizeof(struct logio_entry_24xx)); @@ -1899,22 +1867,22 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " - "(%x).\n", __func__, vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x106f, + "Failed to issue logout IOCB (%x).\n", rval); } else if (lg->entry_status != 0) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- error status (%x).\n", __func__, vha->host_no, - lg->entry_status)); + ql_dbg(ql_dbg_mbx, vha, 0x1070, + "Failed to complete IOCB -- error status (%x).\n", + lg->entry_status); rval = QLA_FUNCTION_FAILED; } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { - DEBUG2_3_11(printk("%s(%ld %d): failed to complete IOCB " - "-- completion status (%x) ioparam=%x/%x.\n", __func__, - vha->host_no, vha->vp_idx, le16_to_cpu(lg->comp_status), + ql_dbg(ql_dbg_mbx, vha, 0x1071, + "Failed to complete IOCB -- completion status (%x) " + "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), le32_to_cpu(lg->io_parameter[0]), - le32_to_cpu(lg->io_parameter[1]))); + le32_to_cpu(lg->io_parameter[1])); } else { /*EMPTY*/ - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, lg, lg_dma); @@ -1946,8 +1914,7 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__); mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; mcp->out_mb = MBX_1|MBX_0; @@ -1966,12 +1933,11 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x " - "mbx1=%x.\n", vha->host_no, rval, mcp->mb[1])); + ql_dbg(ql_dbg_mbx, vha, 0x1074, + "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); } else { /*EMPTY*/ - DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__); } return rval; @@ -1999,8 +1965,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__); mcp->mb[0] = MBC_LIP_FULL_LOGIN; mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; @@ -2014,12 +1979,10 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha) if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); } else { /*EMPTY*/ - DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__); } return rval; @@ -2045,8 +2008,7 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__); if (id_list == NULL) return QLA_FUNCTION_FAILED; @@ -2075,12 +2037,10 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n", - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); } else { *entries = mcp->mb[1]; - DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n", - vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__); } return rval; @@ -2108,7 +2068,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; mcp->out_mb = MBX_0; @@ -2121,14 +2081,14 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__, - vha->host_no, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x107d, + "Failed mb[0]=%x.\n", mcp->mb[0]); } else { - DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x " - "mb7=%x mb10=%x mb11=%x mb12=%x.\n", __func__, - vha->host_no, mcp->mb[1], mcp->mb[2], mcp->mb[3], - mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11], - mcp->mb[12])); + ql_dbg(ql_dbg_mbx, vha, 0x107e, + "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " + "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], + mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], + mcp->mb[11], mcp->mb[12]); if (cur_xchg_cnt) *cur_xchg_cnt = mcp->mb[3]; @@ -2147,7 +2107,6 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, return (rval); } -#if defined(QL_DEBUG_LEVEL_3) /* * qla2x00_get_fcal_position_map * Get FCAL (LILP) position map using mailbox command @@ -2172,10 +2131,12 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) dma_addr_t pmap_dma; struct qla_hw_data *ha = vha->hw; + ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__); + pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); if (pmap == NULL) { - DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", - __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x1080, + "Memory alloc failed.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(pmap, 0, FCAL_MAP_SIZE); @@ -2193,10 +2154,11 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { - DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map " - "size (%x)\n", __func__, vha->host_no, mcp->mb[0], - mcp->mb[1], (unsigned)pmap[0])); - DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1)); + ql_dbg(ql_dbg_mbx, vha, 0x1081, + "mb0/mb1=%x/%X FC/AL position map size (%x).\n", + mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); + ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, + pmap, pmap[0] + 1); if (pos_map) memcpy(pos_map, pmap, FCAL_MAP_SIZE); @@ -2204,15 +2166,13 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__); } return rval; } -#endif /* * qla2x00_get_link_status @@ -2237,7 +2197,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, uint32_t *siter, *diter, dwords; struct qla_hw_data *ha = vha->hw; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_LINK_STATUS; mcp->mb[2] = MSW(stats_dma); @@ -2266,11 +2226,12 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { - DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", - __func__, vha->host_no, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x1085, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); rval = QLA_FUNCTION_FAILED; } else { /* Copy over data -- firmware data is LE. */ + ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__); dwords = offsetof(struct link_statistics, unused1) / 4; siter = diter = &stats->link_fail_cnt; while (dwords--) @@ -2278,8 +2239,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, } } else { /* Failed. */ - DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); } return rval; @@ -2294,7 +2254,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, mbx_cmd_t *mcp = &mc; uint32_t *siter, *diter, dwords; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; mcp->mb[2] = MSW(stats_dma); @@ -2312,10 +2272,11 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { - DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", - __func__, vha->host_no, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x1089, + "Failed mb[0]=%x.\n", mcp->mb[0]); rval = QLA_FUNCTION_FAILED; } else { + ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__); /* Copy over data -- firmware data is LE. */ dwords = sizeof(struct link_statistics) / 4; siter = diter = &stats->link_fail_cnt; @@ -2324,8 +2285,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, } } else { /* Failed. */ - DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); } return rval; @@ -2345,7 +2305,7 @@ qla24xx_abort_command(srb_t *sp) struct qla_hw_data *ha = vha->hw; struct req_que *req = vha->req; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__); spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { @@ -2360,8 +2320,8 @@ qla24xx_abort_command(srb_t *sp) abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); if (abt == NULL) { - DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n", - __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x108d, + "Failed to allocate abort IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(abt, 0, sizeof(struct abort_entry_24xx)); @@ -2380,20 +2340,20 @@ qla24xx_abort_command(srb_t *sp) rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n", - __func__, vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x108e, + "Failed to issue IOCB (%x).\n", rval); } else if (abt->entry_status != 0) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- error status (%x).\n", __func__, vha->host_no, - abt->entry_status)); + ql_dbg(ql_dbg_mbx, vha, 0x108f, + "Failed to complete IOCB -- error status (%x).\n", + abt->entry_status); rval = QLA_FUNCTION_FAILED; } else if (abt->nport_handle != __constant_cpu_to_le16(0)) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- completion status (%x).\n", __func__, vha->host_no, - le16_to_cpu(abt->nport_handle))); + ql_dbg(ql_dbg_mbx, vha, 0x1090, + "Failed to complete IOCB -- completion status (%x).\n", + le16_to_cpu(abt->nport_handle)); rval = QLA_FUNCTION_FAILED; } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, abt, abt_dma); @@ -2421,19 +2381,20 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, struct req_que *req; struct rsp_que *rsp; - DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); - vha = fcport->vha; ha = vha->hw; req = vha->req; + + ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__); + if (ha->flags.cpu_affinity_enabled) rsp = ha->rsp_q_map[tag + 1]; else rsp = req->rsp; tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); if (tsk == NULL) { - DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " - "IOCB.\n", __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x1093, + "Failed to allocate task management IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(tsk, 0, sizeof(struct tsk_mgmt_cmd)); @@ -2457,30 +2418,30 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, sts = &tsk->p.sts; rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB " - "(%x).\n", __func__, vha->host_no, name, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x1094, + "Failed to issue %s reset IOCB (%x).\n", name, rval); } else if (sts->entry_status != 0) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- error status (%x).\n", __func__, vha->host_no, - sts->entry_status)); + ql_dbg(ql_dbg_mbx, vha, 0x1095, + "Failed to complete IOCB -- error status (%x).\n", + sts->entry_status); rval = QLA_FUNCTION_FAILED; } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- completion status (%x).\n", __func__, - vha->host_no, le16_to_cpu(sts->comp_status))); + ql_dbg(ql_dbg_mbx, vha, 0x1096, + "Failed to complete IOCB -- completion status (%x).\n", + le16_to_cpu(sts->comp_status)); rval = QLA_FUNCTION_FAILED; } else if (le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID) { if (le32_to_cpu(sts->rsp_data_len) < 4) { - DEBUG2_3_11(printk("%s(%ld): ignoring inconsistent " - "data length -- not enough response info (%d).\n", - __func__, vha->host_no, - le32_to_cpu(sts->rsp_data_len))); + ql_dbg(ql_dbg_mbx, vha, 0x1097, + "Ignoring inconsistent data length -- not enough " + "response info (%d).\n", + le32_to_cpu(sts->rsp_data_len)); } else if (sts->data[3]) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- response (%x).\n", __func__, - vha->host_no, sts->data[3])); + ql_dbg(ql_dbg_mbx, vha, 0x1098, + "Failed to complete IOCB -- response (%x).\n", + sts->data[3]); rval = QLA_FUNCTION_FAILED; } } @@ -2489,10 +2450,10 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); if (rval2 != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " - "(%x).\n", __func__, vha->host_no, rval2)); + ql_dbg(ql_dbg_mbx, vha, 0x1099, + "Failed to issue marker IOCB (%x).\n", rval2); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); @@ -2533,7 +2494,7 @@ qla2x00_system_error(scsi_qla_host_t *vha) if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; mcp->out_mb = MBX_0; @@ -2543,10 +2504,9 @@ qla2x00_system_error(scsi_qla_host_t *vha) rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__); } return rval; @@ -2566,7 +2526,7 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SERDES_PARAMS; mcp->mb[1] = BIT_0; @@ -2581,11 +2541,11 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, if (rval != QLA_SUCCESS) { /*EMPTY*/ - DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, - vha->host_no, rval, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x109f, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { /*EMPTY*/ - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__); } return rval; @@ -2601,7 +2561,7 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha) if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__); mcp->mb[0] = MBC_STOP_FIRMWARE; mcp->out_mb = MBX_0; @@ -2611,12 +2571,11 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha) rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); if (mcp->mb[0] == MBS_INVALID_COMMAND) rval = QLA_INVALID_COMMAND; } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__); } return rval; @@ -2630,14 +2589,14 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__); + if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_EFT_ENABLE; mcp->mb[2] = LSW(eft_dma); @@ -2652,10 +2611,11 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", - __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); + ql_dbg(ql_dbg_mbx, vha, 0x10a5, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__); } return rval; @@ -2668,14 +2628,14 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__); + if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_EFT_DISABLE; mcp->out_mb = MBX_1|MBX_0; @@ -2684,10 +2644,11 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha) mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", - __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); + ql_dbg(ql_dbg_mbx, vha, 0x10a8, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__); } return rval; @@ -2701,14 +2662,14 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__); + if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_FCE_ENABLE; mcp->mb[2] = LSW(fce_dma); @@ -2727,10 +2688,11 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", - __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); + ql_dbg(ql_dbg_mbx, vha, 0x10ab, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__); if (mb) memcpy(mb, mcp->mb, 8 * sizeof(*mb)); @@ -2748,14 +2710,14 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__); + if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_FCE_DISABLE; mcp->mb[2] = TC_FCE_DISABLE_TRACE; @@ -2766,10 +2728,11 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", - __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); + ql_dbg(ql_dbg_mbx, vha, 0x10ae, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__); if (wr) *wr = (uint64_t) mcp->mb[5] << 48 | @@ -2794,11 +2757,11 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__); + if (!IS_IIDMA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; mcp->mb[2] = mcp->mb[3] = 0; @@ -2817,10 +2780,9 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, } if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__); if (port_speed) *port_speed = mcp->mb[3]; } @@ -2836,11 +2798,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__); + if (!IS_IIDMA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; mcp->mb[2] = BIT_0; @@ -2863,10 +2825,9 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, } if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__); } return rval; @@ -2882,33 +2843,36 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, scsi_qla_host_t *vp; unsigned long flags; + ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__); + if (rptid_entry->entry_status != 0) return; if (rptid_entry->format == 0) { - DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d," - " number of VPs acquired %d\n", __func__, vha->host_no, - MSB(le16_to_cpu(rptid_entry->vp_count)), - LSB(le16_to_cpu(rptid_entry->vp_count)))); - DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__, - rptid_entry->port_id[2], rptid_entry->port_id[1], - rptid_entry->port_id[0])); + ql_dbg(ql_dbg_mbx, vha, 0x10b7, + "Format 0 : Number of VPs setup %d, number of " + "VPs acquired %d.\n", + MSB(le16_to_cpu(rptid_entry->vp_count)), + LSB(le16_to_cpu(rptid_entry->vp_count))); + ql_dbg(ql_dbg_mbx, vha, 0x10b8, + "Primary port id %02x%02x%02x.\n", + rptid_entry->port_id[2], rptid_entry->port_id[1], + rptid_entry->port_id[0]); } else if (rptid_entry->format == 1) { vp_idx = LSB(stat); - DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled " - "- status %d - " - "with port id %02x%02x%02x\n", __func__, vha->host_no, - vp_idx, MSB(stat), + ql_dbg(ql_dbg_mbx, vha, 0x10b9, + "Format 1: VP[%d] enabled - status %d - with " + "port id %02x%02x%02x.\n", vp_idx, MSB(stat), rptid_entry->port_id[2], rptid_entry->port_id[1], - rptid_entry->port_id[0])); + rptid_entry->port_id[0]); vp = vha; if (vp_idx == 0 && (MSB(stat) != 1)) goto reg_needed; if (MSB(stat) == 1) { - DEBUG2(printk("scsi(%ld): Could not acquire ID for " - "VP[%d].\n", vha->host_no, vp_idx)); + ql_dbg(ql_dbg_mbx, vha, 0x10ba, + "Could not acquire ID for VP[%d].\n", vp_idx); return; } @@ -2963,10 +2927,12 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha) /* This can be called by the parent */ + ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__); + vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); if (!vpmod) { - DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP " - "IOCB.\n", __func__, vha->host_no)); + ql_log(ql_log_warn, vha, 0x10bc, + "Failed to allocate modify VP IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } @@ -2983,22 +2949,21 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha) rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB" - "(%x).\n", __func__, base_vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x10bd, + "Failed to issue VP config IOCB (%x).\n", rval); } else if (vpmod->comp_status != 0) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- error status (%x).\n", __func__, base_vha->host_no, - vpmod->comp_status)); + ql_dbg(ql_dbg_mbx, vha, 0x10be, + "Failed to complete IOCB -- error status (%x).\n", + vpmod->comp_status); rval = QLA_FUNCTION_FAILED; } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- completion status (%x).\n", __func__, base_vha->host_no, - le16_to_cpu(vpmod->comp_status))); + ql_dbg(ql_dbg_mbx, vha, 0x10bf, + "Failed to complete IOCB -- completion status (%x).\n", + le16_to_cpu(vpmod->comp_status)); rval = QLA_FUNCTION_FAILED; } else { /* EMPTY */ - DEBUG11(printk("%s(%ld): done.\n", __func__, - base_vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__); fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); } dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); @@ -3032,17 +2997,16 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) int vp_index = vha->vp_idx; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__, - vha->host_no, vp_index)); + ql_dbg(ql_dbg_mbx, vha, 0x10c1, + "Entered %s enabling index %d.\n", __func__, vp_index); if (vp_index == 0 || vp_index >= ha->max_npiv_vports) return QLA_PARAMETER_ERROR; vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma); if (!vce) { - DEBUG2_3(printk("%s(%ld): " - "failed to allocate VP Control IOCB.\n", __func__, - base_vha->host_no)); + ql_log(ql_log_warn, vha, 0x10c2, + "Failed to allocate VP control IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx)); @@ -3063,28 +3027,20 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB" - "(%x).\n", __func__, base_vha->host_no, rval)); - printk("%s(%ld): failed to issue VP control IOCB" - "(%x).\n", __func__, base_vha->host_no, rval); + ql_dbg(ql_dbg_mbx, vha, 0x10c3, + "Failed to issue VP control IOCB (%x).\n", rval); } else if (vce->entry_status != 0) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- error status (%x).\n", __func__, base_vha->host_no, - vce->entry_status)); - printk("%s(%ld): failed to complete IOCB " - "-- error status (%x).\n", __func__, base_vha->host_no, + ql_dbg(ql_dbg_mbx, vha, 0x10c4, + "Failed to complete IOCB -- error status (%x).\n", vce->entry_status); rval = QLA_FUNCTION_FAILED; } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- completion status (%x).\n", __func__, base_vha->host_no, - le16_to_cpu(vce->comp_status))); - printk("%s(%ld): failed to complete IOCB " - "-- completion status (%x).\n", __func__, base_vha->host_no, + ql_dbg(ql_dbg_mbx, vha, 0x10c5, + "Failed to complet IOCB -- completion status (%x).\n", le16_to_cpu(vce->comp_status)); rval = QLA_FUNCTION_FAILED; } else { - DEBUG2(printk("%s(%ld): done.\n", __func__, base_vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, vce, vce_dma); @@ -3121,6 +3077,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__); + /* * This command is implicitly executed by firmware during login for the * physical hosts @@ -3155,7 +3113,7 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__); if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; @@ -3186,10 +3144,10 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, - vha->host_no, rval, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x1008, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__); } return rval; @@ -3214,12 +3172,10 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) unsigned long flags; struct qla_hw_data *ha = vha->hw; - DEBUG16(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__); mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); if (mn == NULL) { - DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX " - "IOCB.\n", __func__, vha->host_no)); return QLA_MEMORY_ALLOC_FAILED; } @@ -3237,43 +3193,43 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) mn->p.req.entry_count = 1; mn->p.req.options = cpu_to_le16(options); - DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__, - vha->host_no)); - DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, - sizeof(*mn))); + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, + "Dump of Verify Request.\n"); + ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, + (uint8_t *)mn, sizeof(*mn)); rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); if (rval != QLA_SUCCESS) { - DEBUG2_16(printk("%s(%ld): failed to issue Verify " - "IOCB (%x).\n", __func__, vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x10cb, + "Failed to issue verify IOCB (%x).\n", rval); goto verify_done; } - DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__, - vha->host_no)); - DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, - sizeof(*mn))); + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, + "Dump of Verify Response.\n"); + ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, + (uint8_t *)mn, sizeof(*mn)); status[0] = le16_to_cpu(mn->p.rsp.comp_status); status[1] = status[0] == CS_VCS_CHIP_FAILURE ? le16_to_cpu(mn->p.rsp.failure_code) : 0; - DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__, - vha->host_no, status[0], status[1])); + ql_dbg(ql_dbg_mbx, vha, 0x10ce, + "cs=%x fc=%x.\n", status[0], status[1]); if (status[0] != CS_COMPLETE) { rval = QLA_FUNCTION_FAILED; if (!(options & VCO_DONT_UPDATE_FW)) { - DEBUG2_16(printk("%s(%ld): Firmware update " - "failed. Retrying without update " - "firmware.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10cf, + "Firmware update failed. Retrying " + "without update firmware.\n"); options |= VCO_DONT_UPDATE_FW; options &= ~VCO_FORCE_UPDATE; retry = 1; } } else { - DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n", - __func__, vha->host_no, - le32_to_cpu(mn->p.rsp.fw_ver))); + ql_dbg(ql_dbg_mbx, vha, 0x10d0, + "Firmware updated to %x.\n", + le32_to_cpu(mn->p.rsp.fw_ver)); /* NOTE: we only update OP firmware. */ spin_lock_irqsave(&ha->cs84xx->access_lock, flags); @@ -3288,10 +3244,9 @@ verify_done: dma_pool_free(ha->s_dma_pool, mn, mn_dma); if (rval != QLA_SUCCESS) { - DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__, - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval); } else { - DEBUG16(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__); } return rval; @@ -3307,6 +3262,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) struct device_reg_25xxmq __iomem *reg; struct qla_hw_data *ha = vha->hw; + ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__); + mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = req->options; mcp->mb[2] = MSW(LSD(req->dma)); @@ -3344,9 +3301,13 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); - if (rval != QLA_SUCCESS) - DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x mb0=%x.\n", - __func__, vha->host_no, rval, mcp->mb[0])); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10d4, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__); + } + return rval; } @@ -3360,6 +3321,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) struct device_reg_25xxmq __iomem *reg; struct qla_hw_data *ha = vha->hw; + ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__); + mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = rsp->options; mcp->mb[2] = MSW(LSD(rsp->dma)); @@ -3393,10 +3356,13 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); - if (rval != QLA_SUCCESS) - DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x " - "mb0=%x.\n", __func__, - vha->host_no, rval, mcp->mb[0])); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10d7, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__); + } + return rval; } @@ -3407,7 +3373,7 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__); mcp->mb[0] = MBC_IDC_ACK; memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); @@ -3418,10 +3384,10 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, - vha->host_no, rval, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x10da, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__); } return rval; @@ -3434,11 +3400,11 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__); + if (!IS_QLA81XX(vha->hw)) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; mcp->out_mb = MBX_1|MBX_0; @@ -3448,10 +3414,11 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", - __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); + ql_dbg(ql_dbg_mbx, vha, 0x10dd, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__); *sector_size = mcp->mb[1]; } @@ -3468,7 +3435,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) if (!IS_QLA81XX(vha->hw)) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__); mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : @@ -3480,10 +3447,11 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", - __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); + ql_dbg(ql_dbg_mbx, vha, 0x10e0, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__); } return rval; @@ -3499,7 +3467,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) if (!IS_QLA81XX(vha->hw)) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__); mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; @@ -3514,11 +3482,11 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " - "mb[2]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0], - mcp->mb[1], mcp->mb[2])); + ql_dbg(ql_dbg_mbx, vha, 0x10e3, + "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__); } return rval; @@ -3531,7 +3499,7 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__); mcp->mb[0] = MBC_RESTART_MPI_FW; mcp->out_mb = MBX_0; @@ -3541,10 +3509,11 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n", - __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); + ql_dbg(ql_dbg_mbx, vha, 0x10e6, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__); } return rval; @@ -3559,11 +3528,11 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; + ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__); + if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - if (len == 1) opt |= BIT_0; @@ -3586,10 +3555,10 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, *sfp = mcp->mb[1]; if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, - vha->host_no, rval, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x10e9, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__); } return rval; @@ -3604,11 +3573,11 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; + ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__); + if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - if (len == 1) opt |= BIT_0; @@ -3631,10 +3600,10 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, - vha->host_no, rval, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x10ec, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__); } return rval; @@ -3648,11 +3617,11 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__); + if (!IS_QLA8XXX_TYPE(vha->hw)) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - mcp->mb[0] = MBC_GET_XGMAC_STATS; mcp->mb[2] = MSW(stats_dma); mcp->mb[3] = LSW(stats_dma); @@ -3666,11 +3635,12 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " - "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, - mcp->mb[0], mcp->mb[1], mcp->mb[2])); + ql_dbg(ql_dbg_mbx, vha, 0x10ef, + "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__); + *actual_size = mcp->mb[2] << 2; } @@ -3686,11 +3656,11 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__); + if (!IS_QLA8XXX_TYPE(vha->hw)) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - mcp->mb[0] = MBC_GET_DCBX_PARAMS; mcp->mb[1] = 0; mcp->mb[2] = MSW(tlv_dma); @@ -3705,11 +3675,11 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " - "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, - mcp->mb[0], mcp->mb[1], mcp->mb[2])); + ql_dbg(ql_dbg_mbx, vha, 0x10f2, + "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__); } return rval; @@ -3722,11 +3692,11 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__); + if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - mcp->mb[0] = MBC_READ_RAM_EXTENDED; mcp->mb[1] = LSW(risc_addr); mcp->mb[8] = MSW(risc_addr); @@ -3736,10 +3706,10 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, - vha->host_no, rval, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x10f5, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__); *data = mcp->mb[3] << 16 | mcp->mb[2]; } @@ -3755,7 +3725,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, mbx_cmd_t *mcp = &mc; uint32_t iter_cnt = 0x1; - DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; @@ -3794,15 +3764,12 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2(printk(KERN_WARNING - "(%ld): failed=%x mb[0]=0x%x " - "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x " - "mb[19]=0x%x.\n", - vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], - mcp->mb[3], mcp->mb[18], mcp->mb[19])); + ql_dbg(ql_dbg_mbx, vha, 0x10f8, + "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " + "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], + mcp->mb[3], mcp->mb[18], mcp->mb[19]); } else { - DEBUG2(printk(KERN_WARNING - "scsi(%ld): done.\n", vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__); } /* Copy mailbox information */ @@ -3819,7 +3786,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; @@ -3858,12 +3825,11 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2(printk(KERN_WARNING - "(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n", - vha->host_no, rval, mcp->mb[0], mcp->mb[1])); + ql_dbg(ql_dbg_mbx, vha, 0x10fb, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); } else { - DEBUG2(printk(KERN_WARNING - "scsi(%ld): done.\n", vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__); } /* Copy mailbox information */ @@ -3872,14 +3838,14 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, } int -qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic) +qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG16(printk("%s(%ld): enable_diag=%d entered.\n", __func__, - ha->host_no, enable_diagnostic)); + ql_dbg(ql_dbg_mbx, vha, 0x10fd, + "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); mcp->mb[0] = MBC_ISP84XX_RESET; mcp->mb[1] = enable_diagnostic; @@ -3887,13 +3853,12 @@ qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic) mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; - rval = qla2x00_mailbox_command(ha, mcp); + rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) - DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no, - rval)); + ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); else - DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__); return rval; } @@ -3905,11 +3870,11 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__); + if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; mcp->mb[1] = LSW(risc_addr); mcp->mb[2] = LSW(data); @@ -3921,10 +3886,10 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, - vha->host_no, rval, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x1101, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__); } return rval; @@ -3941,8 +3906,7 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) rval = QLA_SUCCESS; - DEBUG11(qla_printk(KERN_INFO, ha, - "%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); @@ -3982,11 +3946,10 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) rval = QLA_FUNCTION_FAILED; if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", - __func__, vha->host_no, rval, mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x1104, + "Failed=%x mb[0]=%x.\n", rval, mb[0]); } else { - DEBUG11(printk(KERN_INFO - "%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__); } return rval; @@ -3999,12 +3962,11 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha) mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; + ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__); + if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; - DEBUG11(qla_printk(KERN_INFO, ha, - "%s(%ld): entered.\n", __func__, vha->host_no)); - mcp->mb[0] = MBC_DATA_RATE; mcp->mb[1] = 0; mcp->out_mb = MBX_1|MBX_0; @@ -4013,11 +3975,10 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha) mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", - __func__, vha->host_no, rval, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x1107, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - DEBUG11(printk(KERN_INFO - "%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__); if (mcp->mb[1] != 0x7) ha->link_data_rate = mcp->mb[1]; } @@ -4033,8 +3994,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - DEBUG11(printk(KERN_INFO - "%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__); if (!IS_QLA81XX(ha)) return QLA_FUNCTION_FAILED; @@ -4047,15 +4007,13 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk(KERN_WARNING - "%s(%ld): failed=%x (%x).\n", __func__, - vha->host_no, rval, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x110a, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { /* Copy all bits to preserve original value */ memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); - DEBUG11(printk(KERN_INFO - "%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__); } return rval; } @@ -4067,8 +4025,7 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - DEBUG11(printk(KERN_INFO - "%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_PORT_CONFIG; /* Copy all bits to preserve original setting */ @@ -4080,12 +4037,10 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk(KERN_WARNING - "%s(%ld): failed=%x (%x).\n", __func__, - vha->host_no, rval, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x110d, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else - DEBUG11(printk(KERN_INFO - "%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__); return rval; } @@ -4100,12 +4055,11 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; + ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__); + if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) return QLA_FUNCTION_FAILED; - DEBUG11(printk(KERN_INFO - "%s(%ld): entered.\n", __func__, vha->host_no)); - mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; if (ha->flags.fcp_prio_enabled) @@ -4127,12 +4081,9 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, } if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk(KERN_WARNING - "%s(%ld): failed=%x.\n", __func__, - vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); } else { - DEBUG11(printk(KERN_INFO - "%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__); } return rval; @@ -4145,13 +4096,12 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac) uint8_t byte; struct qla_hw_data *ha = vha->hw; - DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__); /* Integer part */ rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk(KERN_WARNING - "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval); ha->flags.thermal_supported = 0; goto fail; } @@ -4160,14 +4110,13 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac) /* Fraction part */ rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk(KERN_WARNING - "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); + ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval); ha->flags.thermal_supported = 0; goto fail; } *frac = (byte >> 6) * 25; - DEBUG11(printk(KERN_INFO "%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__); fail: return rval; } @@ -4180,12 +4129,11 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__); + if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; - DEBUG11(qla_printk(KERN_INFO, ha, - "%s(%ld): entered.\n", __func__, vha->host_no)); - memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_TOGGLE_INTERRUPT; mcp->mb[1] = 1; @@ -4197,12 +4145,10 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(qla_printk(KERN_WARNING, ha, - "%s(%ld): failed=%x mb[0]=%x.\n", __func__, - vha->host_no, rval, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x1016, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - DEBUG11(qla_printk(KERN_INFO, ha, - "%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__); } return rval; @@ -4216,12 +4162,11 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__); + if (!IS_QLA82XX(ha)) return QLA_FUNCTION_FAILED; - DEBUG11(qla_printk(KERN_INFO, ha, - "%s(%ld): entered.\n", __func__, vha->host_no)); - memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_TOGGLE_INTERRUPT; mcp->mb[1] = 0; @@ -4233,12 +4178,10 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - DEBUG2_3_11(qla_printk(KERN_WARNING, ha, - "%s(%ld): failed=%x mb[0]=%x.\n", __func__, - vha->host_no, rval, mcp->mb[0])); + ql_dbg(ql_dbg_mbx, vha, 0x100c, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - DEBUG11(qla_printk(KERN_INFO, ha, - "%s(%ld): done.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__); } return rval; diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 5e343919aca..c706ed37000 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -36,8 +36,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha) mutex_lock(&ha->vport_lock); vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1); if (vp_id > ha->max_npiv_vports) { - DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n", - vp_id, ha->max_npiv_vports)); + ql_dbg(ql_dbg_vport, vha, 0xa000, + "vp_id %d is bigger than max-supported %d.\n", + vp_id, ha->max_npiv_vports); mutex_unlock(&ha->vport_lock); return vp_id; } @@ -131,9 +132,9 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) fc_port_t *fcport; list_for_each_entry(fcport, &vha->vp_fcports, list) { - DEBUG15(printk("scsi(%ld): Marking port dead, " - "loop_id=0x%04x :%x\n", - vha->host_no, fcport->loop_id, fcport->vp_idx)); + ql_dbg(ql_dbg_vport, vha, 0xa001, + "Marking port dead, loop_id=0x%04x : %x.\n", + fcport->loop_id, fcport->vp_idx); qla2x00_mark_device_lost(vha, fcport, 0, 0); qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); @@ -187,13 +188,13 @@ qla24xx_enable_vp(scsi_qla_host_t *vha) goto enable_failed; } - DEBUG15(qla_printk(KERN_INFO, ha, - "Virtual port with id: %d - Enabled\n", vha->vp_idx)); + ql_dbg(ql_dbg_taskm, vha, 0x801a, + "Virtual port with id: %d - Enabled.\n", vha->vp_idx); return 0; enable_failed: - DEBUG15(qla_printk(KERN_INFO, ha, - "Virtual port with id: %d - Disabled\n", vha->vp_idx)); + ql_dbg(ql_dbg_taskm, vha, 0x801b, + "Virtual port with id: %d - Disabled.\n", vha->vp_idx); return 1; } @@ -205,12 +206,12 @@ qla24xx_configure_vp(scsi_qla_host_t *vha) fc_vport = vha->fc_vport; - DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n", - vha->host_no, __func__)); + ql_dbg(ql_dbg_vport, vha, 0xa002, + "%s: change request #3.\n", __func__); ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); if (ret != QLA_SUCCESS) { - DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable " - "receiving of RSCN requests: 0x%x\n", ret)); + ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable " + "receiving of RSCN requests: 0x%x.\n", ret); return; } else { /* Corresponds to SCR enabled */ @@ -248,9 +249,9 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) case MBA_CHG_IN_CONNECTION: case MBA_PORT_UPDATE: case MBA_RSCN_UPDATE: - DEBUG15(printk("scsi(%ld)%s: Async_event for" - " VP[%d], mb = 0x%x, vha=%p\n", - vha->host_no, __func__, i, *mb, vha)); + ql_dbg(ql_dbg_async, vha, 0x5024, + "Async_event for VP[%d], mb=0x%x vha=%p.\n", + i, *mb, vha); qla2x00_async_event(vha, rsp, mb); break; } @@ -286,37 +287,49 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha) if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); - DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n", - vha->host_no, vha->vp_idx)); + ql_dbg(ql_dbg_taskm, vha, 0x801d, + "Scheduling enable of Vport %d.\n", vha->vp_idx); return qla24xx_enable_vp(vha); } static int qla2x00_do_dpc_vp(scsi_qla_host_t *vha) { + ql_dbg(ql_dbg_dpc, vha, 0x4012, + "Entering %s.\n", __func__); + ql_dbg(ql_dbg_dpc, vha, 0x4013, + "vp_flags: 0x%lx.\n", vha->vp_flags); + qla2x00_do_work(vha); if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { /* VP acquired. complete port configuration */ + ql_dbg(ql_dbg_dpc, vha, 0x4014, + "Configure VP scheduled.\n"); qla24xx_configure_vp(vha); + ql_dbg(ql_dbg_dpc, vha, 0x4015, + "Configure VP end.\n"); return 0; } if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) { + ql_dbg(ql_dbg_dpc, vha, 0x4016, + "FCPort update scheduled.\n"); qla2x00_update_fcports(vha); clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags); + ql_dbg(ql_dbg_dpc, vha, 0x4017, + "FCPort update end.\n"); } if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) && !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && atomic_read(&vha->loop_state) != LOOP_DOWN) { - DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", - vha->host_no)); + ql_dbg(ql_dbg_dpc, vha, 0x4018, + "Relogin needed scheduled.\n"); qla2x00_relogin(vha); - - DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", - vha->host_no)); + ql_dbg(ql_dbg_dpc, vha, 0x4019, + "Relogin needed end.\n"); } if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && @@ -326,11 +339,17 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha) if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { + ql_dbg(ql_dbg_dpc, vha, 0x401a, + "Loop resync scheduled.\n"); qla2x00_loop_resync(vha); clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); + ql_dbg(ql_dbg_dpc, vha, 0x401b, + "Loop resync end.\n"); } } + ql_dbg(ql_dbg_dpc, vha, 0x401c, + "Exiting %s.\n", __func__); return 0; } @@ -396,9 +415,10 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport) /* Check up max-npiv-supports */ if (ha->num_vhosts > ha->max_npiv_vports) { - DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than " - "max_npv_vports %ud.\n", base_vha->host_no, - ha->num_vhosts, ha->max_npiv_vports)); + ql_dbg(ql_dbg_vport, vha, 0xa004, + "num_vhosts %ud is bigger " + "than max_npiv_vports %ud.\n", + ha->num_vhosts, ha->max_npiv_vports); return VPCERR_UNSUPPORTED; } return 0; @@ -415,7 +435,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) vha = qla2x00_create_host(sht, ha); if (!vha) { - DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n")); + ql_log(ql_log_warn, vha, 0xa005, + "scsi_host_alloc() failed for vport.\n"); return(NULL); } @@ -429,8 +450,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) vha->device_flags = 0; vha->vp_idx = qla24xx_allocate_vp_id(vha); if (vha->vp_idx > ha->max_npiv_vports) { - DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n", - vha->host_no)); + ql_dbg(ql_dbg_vport, vha, 0xa006, + "Couldn't allocate vp_id.\n"); goto create_vhost_failed; } vha->mgmt_svr_loop_id = 10 + vha->vp_idx; @@ -461,8 +482,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) host->max_id = MAX_TARGETS_2200; host->transportt = qla2xxx_transport_vport_template; - DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n", - vha->host_no, vha)); + ql_dbg(ql_dbg_vport, vha, 0xa007, + "Detect vport hba %ld at address = %p.\n", + vha->host_no, vha); vha->flags.init_done = 1; @@ -567,9 +589,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) if (req) { ret = qla25xx_delete_req_que(vha, req); if (ret != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Couldn't delete req que %d\n", - req->id); + ql_log(ql_log_warn, vha, 0x00ea, + "Couldn't delete req que %d.\n", + req->id); return ret; } } @@ -581,9 +603,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) if (rsp) { ret = qla25xx_delete_rsp_que(vha, rsp); if (ret != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Couldn't delete rsp que %d\n", - rsp->id); + ql_log(ql_log_warn, vha, 0x00eb, + "Couldn't delete rsp que %d.\n", + rsp->id); return ret; } } @@ -604,8 +626,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, req = kzalloc(sizeof(struct req_que), GFP_KERNEL); if (req == NULL) { - qla_printk(KERN_WARNING, ha, "could not allocate memory" - "for request que\n"); + ql_log(ql_log_fatal, base_vha, 0x00d9, + "Failed to allocate memory for request queue.\n"); goto failed; } @@ -614,8 +636,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, (req->length + 1) * sizeof(request_t), &req->dma, GFP_KERNEL); if (req->ring == NULL) { - qla_printk(KERN_WARNING, ha, - "Memory Allocation failed - request_ring\n"); + ql_log(ql_log_fatal, base_vha, 0x00da, + "Failed to allocte memory for request_ring.\n"); goto que_failed; } @@ -623,8 +645,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); if (que_id >= ha->max_req_queues) { mutex_unlock(&ha->vport_lock); - qla_printk(KERN_INFO, ha, "No resources to create " - "additional request queue\n"); + ql_log(ql_log_warn, base_vha, 0x00db, + "No resources to create additional request queue.\n"); goto que_failed; } set_bit(que_id, ha->req_qid_map); @@ -633,6 +655,12 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, req->vp_idx = vp_idx; req->qos = qos; + ql_dbg(ql_dbg_multiq, base_vha, 0xc002, + "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", + que_id, req->rid, req->vp_idx, req->qos); + ql_dbg(ql_dbg_init, base_vha, 0x00dc, + "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", + que_id, req->rid, req->vp_idx, req->qos); if (rsp_que < 0) req->rsp = NULL; else @@ -645,6 +673,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, options |= BIT_5; req->options = options; + ql_dbg(ql_dbg_multiq, base_vha, 0xc003, + "options=0x%x.\n", req->options); + ql_dbg(ql_dbg_init, base_vha, 0x00dd, + "options=0x%x.\n", req->options); for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) req->outstanding_cmds[cnt] = NULL; req->current_outstanding_cmd = 1; @@ -656,10 +688,21 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, reg = ISP_QUE_REG(ha, que_id); req->max_q_depth = ha->req_q_map[0]->max_q_depth; mutex_unlock(&ha->vport_lock); + ql_dbg(ql_dbg_multiq, base_vha, 0xc004, + "ring_ptr=%p ring_index=%d, " + "cnt=%d id=%d max_q_depth=%d.\n", + req->ring_ptr, req->ring_index, + req->cnt, req->id, req->max_q_depth); + ql_dbg(ql_dbg_init, base_vha, 0x00de, + "ring_ptr=%p ring_index=%d, " + "cnt=%d id=%d max_q_depth=%d.\n", + req->ring_ptr, req->ring_index, req->cnt, + req->id, req->max_q_depth); ret = qla25xx_init_req_que(base_vha, req); if (ret != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); + ql_log(ql_log_fatal, base_vha, 0x00df, + "%s failed.\n", __func__); mutex_lock(&ha->vport_lock); clear_bit(que_id, ha->req_qid_map); mutex_unlock(&ha->vport_lock); @@ -700,8 +743,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); if (rsp == NULL) { - qla_printk(KERN_WARNING, ha, "could not allocate memory for" - " response que\n"); + ql_log(ql_log_warn, base_vha, 0x0066, + "Failed to allocate memory for response queue.\n"); goto failed; } @@ -710,8 +753,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, (rsp->length + 1) * sizeof(response_t), &rsp->dma, GFP_KERNEL); if (rsp->ring == NULL) { - qla_printk(KERN_WARNING, ha, - "Memory Allocation failed - response_ring\n"); + ql_log(ql_log_warn, base_vha, 0x00e1, + "Failed to allocate memory for response ring.\n"); goto que_failed; } @@ -719,8 +762,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); if (que_id >= ha->max_rsp_queues) { mutex_unlock(&ha->vport_lock); - qla_printk(KERN_INFO, ha, "No resources to create " - "additional response queue\n"); + ql_log(ql_log_warn, base_vha, 0x00e2, + "No resources to create additional request queue.\n"); goto que_failed; } set_bit(que_id, ha->rsp_qid_map); @@ -728,12 +771,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, if (ha->flags.msix_enabled) rsp->msix = &ha->msix_entries[que_id + 1]; else - qla_printk(KERN_WARNING, ha, "msix not enabled\n"); + ql_log(ql_log_warn, base_vha, 0x00e3, + "MSIX not enalbled.\n"); ha->rsp_q_map[que_id] = rsp; rsp->rid = rid; rsp->vp_idx = vp_idx; rsp->hw = ha; + ql_dbg(ql_dbg_init, base_vha, 0x00e4, + "queue_id=%d rid=%d vp_idx=%d hw=%p.\n", + que_id, rsp->rid, rsp->vp_idx, rsp->hw); /* Use alternate PCI bus number */ if (MSB(rsp->rid)) options |= BIT_4; @@ -750,6 +797,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, rsp->rsp_q_in = ®->isp25mq.rsp_q_in; rsp->rsp_q_out = ®->isp25mq.rsp_q_out; mutex_unlock(&ha->vport_lock); + ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, + "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", + rsp->options, rsp->id, rsp->rsp_q_in, + rsp->rsp_q_out); + ql_dbg(ql_dbg_init, base_vha, 0x00e5, + "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", + rsp->options, rsp->id, rsp->rsp_q_in, + rsp->rsp_q_out); ret = qla25xx_request_irq(rsp); if (ret) @@ -757,7 +812,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, ret = qla25xx_init_rsp_que(base_vha, rsp); if (ret != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); + ql_log(ql_log_fatal, base_vha, 0x00e7, + "%s failed.\n", __func__); mutex_lock(&ha->vport_lock); clear_bit(que_id, ha->rsp_qid_map); mutex_unlock(&ha->vport_lock); diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index e1138bcc834..5cbf33a50b1 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -348,6 +348,7 @@ static void qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off) { u32 win_read; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ha->crb_win = CRB_HI(*off); writel(ha->crb_win, @@ -358,9 +359,10 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off) */ win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase)); if (win_read != ha->crb_win) { - DEBUG2(qla_printk(KERN_INFO, ha, - "%s: Written crbwin (0x%x) != Read crbwin (0x%x), " - "off=0x%lx\n", __func__, ha->crb_win, win_read, *off)); + ql_dbg(ql_dbg_p3p, vha, 0xb000, + "%s: Written crbwin (0x%x) " + "!= Read crbwin (0x%x), off=0x%lx.\n", + ha->crb_win, win_read, *off); } *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; } @@ -368,6 +370,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off) static inline unsigned long qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) { + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); /* See if we are currently pointing to the region we want to use next */ if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) { /* No need to change window. PCIX and PCIEregs are in both @@ -398,9 +401,10 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) return off; } /* strange address given */ - qla_printk(KERN_WARNING, ha, - "%s: Warning: unm_nic_pci_set_crbwindow called with" - " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off); + ql_dbg(ql_dbg_p3p, vha, 0xb001, + "%x: Warning: unm_nic_pci_set_crbwindow " + "called with an unknown address(%llx).\n", + QLA2XXX_DRIVER_NAME, off); return off; } @@ -563,6 +567,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) { int window; u32 win_read; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX)) { @@ -574,8 +579,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) win_read = qla82xx_rd_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); if ((win_read << 17) != window) { - qla_printk(KERN_WARNING, ha, - "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n", + ql_dbg(ql_dbg_p3p, vha, 0xb003, + "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n", __func__, window, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; @@ -583,7 +588,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) QLA82XX_ADDR_OCM0_MAX)) { unsigned int temp1; if ((addr & 0x00ff800) == 0xff800) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0xb004, "%s: QM access not handled.\n", __func__); addr = -1UL; } @@ -596,8 +601,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) temp1 = ((window & 0x1FF) << 7) | ((window & 0x0FFFE0000) >> 17); if (win_read != temp1) { - qla_printk(KERN_WARNING, ha, - "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n", + ql_log(ql_log_warn, vha, 0xb005, + "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n", __func__, temp1, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; @@ -612,8 +617,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) win_read = qla82xx_rd_32(ha, ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); if (win_read != window) { - qla_printk(KERN_WARNING, ha, - "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n", + ql_log(ql_log_warn, vha, 0xb006, + "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n", __func__, window, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET; @@ -624,9 +629,9 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) */ if ((qla82xx_pci_set_window_warning_count++ < 8) || (qla82xx_pci_set_window_warning_count%64 == 0)) { - qla_printk(KERN_WARNING, ha, - "%s: Warning:%s Unknown address range!\n", __func__, - QLA2XXX_DRIVER_NAME); + ql_log(ql_log_warn, vha, 0xb007, + "%s: Warning:%s Unknown address range!.\n", + __func__, QLA2XXX_DRIVER_NAME); } addr = -1UL; } @@ -671,6 +676,7 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha, uint8_t *mem_ptr = NULL; unsigned long mem_base; unsigned long mem_page; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); write_lock_irqsave(&ha->hw_lock, flags); @@ -682,9 +688,10 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha, if ((start == -1UL) || (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { write_unlock_irqrestore(&ha->hw_lock, flags); - qla_printk(KERN_ERR, ha, - "%s out of bound pci memory access. " - "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); + ql_log(ql_log_fatal, vha, 0xb008, + "%s out of bound pci memory " + "access, offset is 0x%llx.\n", + QLA2XXX_DRIVER_NAME, off); return -1; } @@ -741,6 +748,7 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha, uint8_t *mem_ptr = NULL; unsigned long mem_base; unsigned long mem_page; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); write_lock_irqsave(&ha->hw_lock, flags); @@ -752,9 +760,10 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha, if ((start == -1UL) || (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { write_unlock_irqrestore(&ha->hw_lock, flags); - qla_printk(KERN_ERR, ha, - "%s out of bound pci memory access. " - "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); + ql_log(ql_log_fatal, vha, 0xb009, + "%s out of bount memory " + "access, offset is 0x%llx.\n", + QLA2XXX_DRIVER_NAME, off); return -1; } @@ -855,15 +864,16 @@ qla82xx_wait_rom_busy(struct qla_hw_data *ha) { long timeout = 0; long done = 0 ; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (done == 0) { done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); done &= 4; timeout++; if (timeout >= rom_max_timeout) { - DEBUG(qla_printk(KERN_INFO, ha, - "%s: Timeout reached waiting for rom busy", - QLA2XXX_DRIVER_NAME)); + ql_dbg(ql_dbg_p3p, vha, 0xb00a, + "%s: Timeout reached waiting for rom busy.\n", + QLA2XXX_DRIVER_NAME); return -1; } } @@ -875,15 +885,16 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha) { long timeout = 0; long done = 0 ; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (done == 0) { done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); done &= 2; timeout++; if (timeout >= rom_max_timeout) { - DEBUG(qla_printk(KERN_INFO, ha, - "%s: Timeout reached waiting for rom done", - QLA2XXX_DRIVER_NAME)); + ql_dbg(ql_dbg_p3p, vha, 0xb00b, + "%s: Timeout reached waiting for rom done.\n", + QLA2XXX_DRIVER_NAME); return -1; } } @@ -893,15 +904,16 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha) static int qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) { + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb); qla82xx_wait_rom_busy(ha); if (qla82xx_wait_rom_done(ha)) { - qla_printk(KERN_WARNING, ha, - "%s: Error waiting for rom done\n", - QLA2XXX_DRIVER_NAME); + ql_log(ql_log_fatal, vha, 0x00ba, + "Error waiting for rom done.\n"); return -1; } /* Reset abyte_cnt and dummy_byte_cnt */ @@ -917,6 +929,7 @@ static int qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) { int ret, loops = 0; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { udelay(100); @@ -924,9 +937,8 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) loops++; } if (loops >= 50000) { - qla_printk(KERN_INFO, ha, - "%s: qla82xx_rom_lock failed\n", - QLA2XXX_DRIVER_NAME); + ql_log(ql_log_fatal, vha, 0x00b9, + "Failed to aquire SEM2 lock.\n"); return -1; } ret = qla82xx_do_rom_fast_read(ha, addr, valp); @@ -937,11 +949,12 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) static int qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) { + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); qla82xx_wait_rom_busy(ha); if (qla82xx_wait_rom_done(ha)) { - qla_printk(KERN_WARNING, ha, - "Error waiting for rom done\n"); + ql_log(ql_log_warn, vha, 0xb00c, + "Error waiting for rom done.\n"); return -1; } *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); @@ -955,6 +968,7 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) uint32_t done = 1 ; uint32_t val; int ret = 0; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); while ((done != 0) && (ret == 0)) { @@ -964,8 +978,8 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) udelay(10); cond_resched(); if (timeout >= 50000) { - qla_printk(KERN_WARNING, ha, - "Timeout reached waiting for write finish"); + ql_log(ql_log_warn, vha, 0xb00d, + "Timeout reached waiting for write finish.\n"); return -1; } } @@ -992,13 +1006,14 @@ qla82xx_flash_set_write_enable(struct qla_hw_data *ha) static int qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) { + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); if (qla82xx_flash_set_write_enable(ha)) return -1; qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1); if (qla82xx_wait_rom_done(ha)) { - qla_printk(KERN_WARNING, ha, - "Error waiting for rom done\n"); + ql_log(ql_log_warn, vha, 0xb00e, + "Error waiting for rom done.\n"); return -1; } return qla82xx_flash_wait_write_finish(ha); @@ -1007,10 +1022,11 @@ qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) static int qla82xx_write_disable_flash(struct qla_hw_data *ha) { + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); if (qla82xx_wait_rom_done(ha)) { - qla_printk(KERN_WARNING, ha, - "Error waiting for rom done\n"); + ql_log(ql_log_warn, vha, 0xb00f, + "Error waiting for rom done.\n"); return -1; } return 0; @@ -1020,13 +1036,16 @@ static int ql82xx_rom_lock_d(struct qla_hw_data *ha) { int loops = 0; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { udelay(100); cond_resched(); loops++; } if (loops >= 50000) { - qla_printk(KERN_WARNING, ha, "ROM lock failed\n"); + ql_log(ql_log_warn, vha, 0xb010, + "ROM lock failed.\n"); return -1; } return 0;; @@ -1037,10 +1056,12 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, uint32_t data) { int ret = 0; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { - qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); + ql_log(ql_log_warn, vha, 0xb011, + "ROM lock failed.\n"); return ret; } @@ -1053,8 +1074,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP); qla82xx_wait_rom_busy(ha); if (qla82xx_wait_rom_done(ha)) { - qla_printk(KERN_WARNING, ha, - "Error waiting for rom done\n"); + ql_log(ql_log_warn, vha, 0xb012, + "Error waiting for rom done.\n"); ret = -1; goto done_write; } @@ -1159,8 +1180,8 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) */ if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || qla82xx_rom_fast_read(ha, 4, &n) != 0) { - qla_printk(KERN_WARNING, ha, - "[ERROR] Reading crb_init area: n: %08x\n", n); + ql_log(ql_log_fatal, vha, 0x006e, + "Error Reading crb_init area: n: %08x.\n", n); return -1; } @@ -1172,20 +1193,18 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) /* number of addr/value pair should not exceed 1024 enteries */ if (n >= 1024) { - qla_printk(KERN_WARNING, ha, - "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n", - QLA2XXX_DRIVER_NAME, __func__, n); + ql_log(ql_log_fatal, vha, 0x0071, + "Card flash not initialized:n=0x%x.\n", n); return -1; } - qla_printk(KERN_INFO, ha, - "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n); + ql_log(ql_log_info, vha, 0x0072, + "%d CRB init values found in ROM.\n", n); buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL); if (buf == NULL) { - qla_printk(KERN_WARNING, ha, - "%s: [ERROR] Unable to malloc memory.\n", - QLA2XXX_DRIVER_NAME); + ql_log(ql_log_fatal, vha, 0x010c, + "Unable to allocate memory.\n"); return -1; } @@ -1236,9 +1255,8 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) continue; if (off == ADDR_ERROR) { - qla_printk(KERN_WARNING, ha, - "%s: [ERROR] Unknown addr: 0x%08lx\n", - QLA2XXX_DRIVER_NAME, buf[i].addr); + ql_log(ql_log_fatal, vha, 0x0116, + "Unknow addr: 0x%08lx.\n", buf[i].addr); continue; } @@ -1370,7 +1388,7 @@ qla82xx_pci_mem_write_2M(struct qla_hw_data *ha, if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&ha->pdev->dev, - "failed to write through agent\n"); + "failed to write through agent.\n"); ret = -1; break; } @@ -1460,7 +1478,7 @@ qla82xx_pci_mem_read_2M(struct qla_hw_data *ha, if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&ha->pdev->dev, - "failed to read through agent\n"); + "failed to read through agent.\n"); break; } @@ -1633,17 +1651,15 @@ qla82xx_iospace_config(struct qla_hw_data *ha) uint32_t len = 0; if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) { - qla_printk(KERN_WARNING, ha, - "Failed to reserve selected regions (%s)\n", - pci_name(ha->pdev)); + ql_log_pci(ql_log_fatal, ha->pdev, 0x000c, + "Failed to reserver selected regions.\n"); goto iospace_error_exit; } /* Use MMIO operations for all accesses. */ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { - qla_printk(KERN_ERR, ha, - "region #0 not an MMIO resource (%s), aborting\n", - pci_name(ha->pdev)); + ql_log_pci(ql_log_fatal, ha->pdev, 0x000d, + "Region #0 not an MMIO resource, aborting.\n"); goto iospace_error_exit; } @@ -1651,9 +1667,8 @@ qla82xx_iospace_config(struct qla_hw_data *ha) ha->nx_pcibase = (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len); if (!ha->nx_pcibase) { - qla_printk(KERN_ERR, ha, - "cannot remap pcibase MMIO (%s), aborting\n", - pci_name(ha->pdev)); + ql_log_pci(ql_log_fatal, ha->pdev, 0x000e, + "Cannot remap pcibase MMIO, aborting.\n"); pci_release_regions(ha->pdev); goto iospace_error_exit; } @@ -1667,9 +1682,8 @@ qla82xx_iospace_config(struct qla_hw_data *ha) (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) + (ha->pdev->devfn << 12)), 4); if (!ha->nxdb_wr_ptr) { - qla_printk(KERN_ERR, ha, - "cannot remap MMIO (%s), aborting\n", - pci_name(ha->pdev)); + ql_log_pci(ql_log_fatal, ha->pdev, 0x000f, + "Cannot remap MMIO, aborting.\n"); pci_release_regions(ha->pdev); goto iospace_error_exit; } @@ -1687,6 +1701,16 @@ qla82xx_iospace_config(struct qla_hw_data *ha) ha->max_req_queues = ha->max_rsp_queues = 1; ha->msix_count = ha->max_rsp_queues + 1; + ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006, + "nx_pci_base=%p iobase=%p " + "max_req_queues=%d msix_count=%d.\n", + ha->nx_pcibase, ha->iobase, + ha->max_req_queues, ha->msix_count); + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010, + "nx_pci_base=%p iobase=%p " + "max_req_queues=%d msix_count=%d.\n", + ha->nx_pcibase, ha->iobase, + ha->max_req_queues, ha->msix_count); return 0; iospace_error_exit: @@ -1712,6 +1736,9 @@ qla82xx_pci_config(scsi_qla_host_t *vha) pci_set_master(ha->pdev); ret = pci_set_mwi(ha->pdev); ha->chip_revision = ha->pdev->revision; + ql_dbg(ql_dbg_init, vha, 0x0043, + "Chip revision:%ld.\n", + ha->chip_revision); return 0; } @@ -1877,6 +1904,7 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) { u32 val = 0; int retries = 60; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); do { read_lock(&ha->hw_lock); @@ -1892,15 +1920,15 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) default: break; } - qla_printk(KERN_WARNING, ha, - "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n", - val, retries); + ql_log(ql_log_info, vha, 0x00a8, + "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n", + val, retries); msleep(500); } while (--retries); - qla_printk(KERN_INFO, ha, + ql_log(ql_log_fatal, vha, 0x00a9, "Cmd Peg initialization failed: 0x%x.\n", val); val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); @@ -1915,6 +1943,7 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) { u32 val = 0; int retries = 60; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); do { read_lock(&ha->hw_lock); @@ -1930,17 +1959,16 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) default: break; } - - qla_printk(KERN_WARNING, ha, - "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n", - val, retries); + ql_log(ql_log_info, vha, 0x00ab, + "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n", + val, retries); msleep(500); } while (--retries); - qla_printk(KERN_INFO, ha, - "Rcv Peg initialization failed: 0x%x.\n", val); + ql_log(ql_log_fatal, vha, 0x00ac, + "Rcv Peg initializatin failed: 0x%x.\n", val); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED); read_unlock(&ha->hw_lock); @@ -1989,13 +2017,11 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) } if (ha->mcp) { - DEBUG3_11(printk(KERN_INFO "%s(%ld): " - "Got mailbox completion. cmd=%x.\n", - __func__, vha->host_no, ha->mcp->mb[0])); + ql_dbg(ql_dbg_async, vha, 0x5052, + "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]); } else { - qla_printk(KERN_INFO, ha, - "%s(%ld): MBX pointer ERROR!\n", - __func__, vha->host_no); + ql_dbg(ql_dbg_async, vha, 0x5053, + "MBX pointer ERROR.\n"); } } @@ -2019,13 +2045,13 @@ qla82xx_intr_handler(int irq, void *dev_id) int status = 0, status1 = 0; unsigned long flags; unsigned long iter; - uint32_t stat; + uint32_t stat = 0; uint16_t mb[4]; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO - "%s(): NULL response queue pointer\n", __func__); + "%s(): NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; @@ -2075,9 +2101,9 @@ qla82xx_intr_handler(int irq, void *dev_id) qla24xx_process_response_queue(vha, rsp); break; default: - DEBUG2(printk("scsi(%ld): " - " Unrecognized interrupt type (%d).\n", - vha->host_no, stat & 0xff)); + ql_dbg(ql_dbg_async, vha, 0x5054, + "Unrecognized interrupt type (%d).\n", + stat & 0xff); break; } } @@ -2089,8 +2115,8 @@ qla82xx_intr_handler(int irq, void *dev_id) #ifdef QL_DEBUG_LEVEL_17 if (!irq && ha->flags.eeh_busy) - qla_printk(KERN_WARNING, ha, - "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", + ql_log(ql_log_warn, vha, 0x503d, + "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n", status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); #endif @@ -2111,13 +2137,13 @@ qla82xx_msix_default(int irq, void *dev_id) struct device_reg_82xx __iomem *reg; int status = 0; unsigned long flags; - uint32_t stat; + uint32_t stat = 0; uint16_t mb[4]; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO - "%s(): NULL response queue pointer\n", __func__); + "%s(): NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; @@ -2149,9 +2175,9 @@ qla82xx_msix_default(int irq, void *dev_id) qla24xx_process_response_queue(vha, rsp); break; default: - DEBUG2(printk("scsi(%ld): " - " Unrecognized interrupt type (%d).\n", - vha->host_no, stat & 0xff)); + ql_dbg(ql_dbg_async, vha, 0x5041, + "Unrecognized interrupt type (%d).\n", + stat & 0xff); break; } } @@ -2162,9 +2188,9 @@ qla82xx_msix_default(int irq, void *dev_id) #ifdef QL_DEBUG_LEVEL_17 if (!irq && ha->flags.eeh_busy) - qla_printk(KERN_WARNING, ha, - "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", - status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); + ql_log(ql_log_warn, vha, 0x5044, + "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n", + status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); #endif if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && @@ -2186,7 +2212,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id) rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO - "%s(): NULL response queue pointer\n", __func__); + "%s(): NULL response queue pointer.\n", __func__); return IRQ_NONE; } @@ -2215,7 +2241,7 @@ qla82xx_poll(int irq, void *dev_id) rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO - "%s(): NULL response queue pointer\n", __func__); + "%s(): NULL response queue pointer.\n", __func__); return; } ha = rsp->hw; @@ -2245,9 +2271,9 @@ qla82xx_poll(int irq, void *dev_id) qla24xx_process_response_queue(vha, rsp); break; default: - DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " - "(%d).\n", - vha->host_no, stat & 0xff)); + ql_dbg(ql_dbg_p3p, vha, 0xb013, + "Unrecognized interrupt type (%d).\n", + stat * 0xff); break; } } @@ -2347,9 +2373,8 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha) drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); } drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); - qla_printk(KERN_INFO, ha, - "%s(%ld):drv_state = 0x%x\n", - __func__, vha->host_no, drv_state); + ql_log(ql_log_info, vha, 0x00bb, + "drv_state = 0x%x.\n", drv_state); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); } @@ -2392,8 +2417,8 @@ qla82xx_load_fw(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) { - qla_printk(KERN_ERR, ha, - "%s: Error during CRB Initialization\n", __func__); + ql_log(ql_log_fatal, vha, 0x009f, + "Error during CRB initialization.\n"); return QLA_FUNCTION_FAILED; } udelay(500); @@ -2411,27 +2436,27 @@ qla82xx_load_fw(scsi_qla_host_t *vha) if (ql2xfwloadbin == 2) goto try_blob_fw; - qla_printk(KERN_INFO, ha, - "Attempting to load firmware from flash\n"); + ql_log(ql_log_info, vha, 0x00a0, + "Attempting to load firmware from flash.\n"); if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { - qla_printk(KERN_ERR, ha, - "Firmware loaded successfully from flash\n"); + ql_log(ql_log_info, vha, 0x00a1, + "Firmware loaded successully from flash.\n"); return QLA_SUCCESS; } else { - qla_printk(KERN_ERR, ha, - "Firmware load from flash failed\n"); + ql_log(ql_log_warn, vha, 0x0108, + "Firmware load from flash failed.\n"); } try_blob_fw: - qla_printk(KERN_INFO, ha, - "Attempting to load firmware from blob\n"); + ql_log(ql_log_info, vha, 0x00a2, + "Attempting to load firmware from blob.\n"); /* Load firmware blob. */ blob = ha->hablob = qla2x00_request_firmware(vha); if (!blob) { - qla_printk(KERN_ERR, ha, - "Firmware image not present.\n"); + ql_log(ql_log_fatal, vha, 0x00a3, + "Firmware image not preset.\n"); goto fw_load_failed; } @@ -2441,20 +2466,19 @@ try_blob_fw: /* Fallback to URI format */ if (qla82xx_validate_firmware_blob(vha, QLA82XX_UNIFIED_ROMIMAGE)) { - qla_printk(KERN_ERR, ha, - "No valid firmware image found!!!"); + ql_log(ql_log_fatal, vha, 0x00a4, + "No valid firmware image found.\n"); return QLA_FUNCTION_FAILED; } } if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) { - qla_printk(KERN_ERR, ha, - "%s: Firmware loaded successfully " - " from binary blob\n", __func__); + ql_log(ql_log_info, vha, 0x00a5, + "Firmware loaded successfully from binary blob.\n"); return QLA_SUCCESS; } else { - qla_printk(KERN_ERR, ha, - "Firmware load failed from binary blob\n"); + ql_log(ql_log_fatal, vha, 0x00a6, + "Firmware load failed for binary blob.\n"); blob->fw = NULL; blob = NULL; goto fw_load_failed; @@ -2486,15 +2510,15 @@ qla82xx_start_firmware(scsi_qla_host_t *vha) qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); if (qla82xx_load_fw(vha) != QLA_SUCCESS) { - qla_printk(KERN_INFO, ha, - "%s: Error trying to start fw!\n", __func__); + ql_log(ql_log_fatal, vha, 0x00a7, + "Error trying to start fw.\n"); return QLA_FUNCTION_FAILED; } /* Handshake with the card before we register the devices. */ if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) { - qla_printk(KERN_INFO, ha, - "%s: Error during card handshake!\n", __func__); + ql_log(ql_log_fatal, vha, 0x00aa, + "Error during card handshake.\n"); return QLA_FUNCTION_FAILED; } @@ -2663,8 +2687,11 @@ qla82xx_start_scsi(srb_t *sp) /* Send marker if required */ if (vha->marker_needed != 0) { if (qla2x00_marker(vha, req, - rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) + rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x300c, + "qla2x00_marker failed for cmd=%p.\n", cmd); return QLA_FUNCTION_FAILED; + } vha->marker_needed = 0; } @@ -2701,8 +2728,13 @@ qla82xx_start_scsi(srb_t *sp) uint16_t i; more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds); - if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) + if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) { + ql_dbg(ql_dbg_io, vha, 0x300d, + "Num of DSD list %d is than %d for cmd=%p.\n", + more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN, + cmd); goto queuing_error; + } if (more_dsd_lists <= ha->gbl_dsd_avail) goto sufficient_dsds; @@ -2711,13 +2743,20 @@ qla82xx_start_scsi(srb_t *sp) for (i = 0; i < more_dsd_lists; i++) { dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); - if (!dsd_ptr) + if (!dsd_ptr) { + ql_log(ql_log_fatal, vha, 0x300e, + "Failed to allocate memory for dsd_dma " + "for cmd=%p.\n", cmd); goto queuing_error; + } dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &dsd_ptr->dsd_list_dma); if (!dsd_ptr->dsd_addr) { kfree(dsd_ptr); + ql_log(ql_log_fatal, vha, 0x300f, + "Failed to allocate memory for dsd_addr " + "for cmd=%p.\n", cmd); goto queuing_error; } list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); @@ -2742,17 +2781,16 @@ sufficient_dsds: ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); if (!sp->ctx) { - DEBUG(printk(KERN_INFO - "%s(%ld): failed to allocate" - " ctx.\n", __func__, vha->host_no)); + ql_log(ql_log_fatal, vha, 0x3010, + "Failed to allocate ctx for cmd=%p.\n", cmd); goto queuing_error; } memset(ctx, 0, sizeof(struct ct6_dsd)); ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool, GFP_ATOMIC, &ctx->fcp_cmnd_dma); if (!ctx->fcp_cmnd) { - DEBUG2_3(printk("%s(%ld): failed to allocate" - " fcp_cmnd.\n", __func__, vha->host_no)); + ql_log(ql_log_fatal, vha, 0x3011, + "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); goto queuing_error_fcp_cmnd; } @@ -2766,6 +2804,9 @@ sufficient_dsds: /* SCSI command bigger than 16 bytes must be * multiple of 4 */ + ql_log(ql_log_warn, vha, 0x3012, + "scsi cmd len %d not multiple of 4 " + "for cmd=%p.\n", cmd->cmd_len, cmd); goto queuing_error_fcp_cmnd; } ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; @@ -2845,7 +2886,7 @@ sufficient_dsds: cmd_pkt->entry_status = (uint8_t) rsp->id; } else { struct cmd_type_7 *cmd_pkt; - req_cnt = qla24xx_calc_iocbs(tot_dsds); + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); if (req->cnt < (req_cnt + 2)) { cnt = (uint16_t)RD_REG_DWORD_RELAXED( ®->req_q_out[0]); @@ -2979,8 +3020,8 @@ qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, /* Dword reads to flash. */ for (i = 0; i < length/4; i++, faddr += 4) { if (qla82xx_rom_fast_read(ha, faddr, &val)) { - qla_printk(KERN_WARNING, ha, - "Do ROM fast read failed\n"); + ql_log(ql_log_warn, vha, 0x0106, + "Do ROM fast read failed.\n"); goto done_read; } dwptr[i] = __constant_cpu_to_le32(val); @@ -2994,10 +3035,12 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha) { int ret; uint32_t val; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { - qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); + ql_log(ql_log_warn, vha, 0xb014, + "ROM Lock failed.\n"); return ret; } @@ -3013,7 +3056,8 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha) } if (qla82xx_write_disable_flash(ha) != 0) - qla_printk(KERN_WARNING, ha, "Write disable failed\n"); + ql_log(ql_log_warn, vha, 0xb015, + "Write disable failed.\n"); done_unprotect: qla82xx_rom_unlock(ha); @@ -3025,10 +3069,12 @@ qla82xx_protect_flash(struct qla_hw_data *ha) { int ret; uint32_t val; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { - qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); + ql_log(ql_log_warn, vha, 0xb016, + "ROM Lock failed.\n"); return ret; } @@ -3040,10 +3086,12 @@ qla82xx_protect_flash(struct qla_hw_data *ha) /* LOCK all sectors */ ret = qla82xx_write_status_reg(ha, val); if (ret < 0) - qla_printk(KERN_WARNING, ha, "Write status register failed\n"); + ql_log(ql_log_warn, vha, 0xb017, + "Write status register failed.\n"); if (qla82xx_write_disable_flash(ha) != 0) - qla_printk(KERN_WARNING, ha, "Write disable failed\n"); + ql_log(ql_log_warn, vha, 0xb018, + "Write disable failed.\n"); done_protect: qla82xx_rom_unlock(ha); return ret; @@ -3053,10 +3101,12 @@ static int qla82xx_erase_sector(struct qla_hw_data *ha, int addr) { int ret = 0; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { - qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); + ql_log(ql_log_warn, vha, 0xb019, + "ROM Lock failed.\n"); return ret; } @@ -3066,8 +3116,8 @@ qla82xx_erase_sector(struct qla_hw_data *ha, int addr) qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE); if (qla82xx_wait_rom_done(ha)) { - qla_printk(KERN_WARNING, ha, - "Error waiting for rom done\n"); + ql_log(ql_log_warn, vha, 0xb01a, + "Error waiting for rom done.\n"); ret = -1; goto done; } @@ -3110,10 +3160,10 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, &optrom_dma, GFP_KERNEL); if (!optrom) { - qla_printk(KERN_DEBUG, ha, - "Unable to allocate memory for optrom " - "burst write (%x KB).\n", - OPTROM_BURST_SIZE / 1024); + ql_log(ql_log_warn, vha, 0xb01b, + "Unable to allocate memory " + "for optron burst write (%x KB).\n", + OPTROM_BURST_SIZE / 1024); } } @@ -3122,8 +3172,8 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, ret = qla82xx_unprotect_flash(ha); if (ret) { - qla_printk(KERN_WARNING, ha, - "Unable to unprotect flash for update.\n"); + ql_log(ql_log_warn, vha, 0xb01c, + "Unable to unprotect flash for update.\n"); goto write_done; } @@ -3133,9 +3183,9 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, ret = qla82xx_erase_sector(ha, faddr); if (ret) { - DEBUG9(qla_printk(KERN_ERR, ha, - "Unable to erase sector: " - "address=%x.\n", faddr)); + ql_log(ql_log_warn, vha, 0xb01d, + "Unable to erase sector: address=%x.\n", + faddr); break; } } @@ -3149,12 +3199,12 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, (ha->flash_data_off | faddr), OPTROM_BURST_DWORDS); if (ret != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0xb01e, "Unable to burst-write optrom segment " "(%x/%x/%llx).\n", ret, (ha->flash_data_off | faddr), (unsigned long long)optrom_dma); - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0xb01f, "Reverting to slow-write.\n"); dma_free_coherent(&ha->pdev->dev, @@ -3171,16 +3221,16 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, ret = qla82xx_write_flash_dword(ha, faddr, cpu_to_le32(*dwptr)); if (ret) { - DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program" - "flash address=%x data=%x.\n", __func__, - ha->host_no, faddr, *dwptr)); + ql_dbg(ql_dbg_p3p, vha, 0xb020, + "Unable to program flash address=%x data=%x.\n", + faddr, *dwptr); break; } } ret = qla82xx_protect_flash(ha); if (ret) - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0xb021, "Unable to protect flash after update.\n"); write_done: if (optrom) @@ -3244,9 +3294,12 @@ qla82xx_start_iocbs(srb_t *sp) void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) { + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + if (qla82xx_rom_lock(ha)) /* Someone else is holding the lock. */ - qla_printk(KERN_INFO, ha, "Resetting rom_lock\n"); + ql_log(ql_log_info, vha, 0xb022, + "Resetting rom_lock.\n"); /* * Either we got the lock, or someone @@ -3313,7 +3366,8 @@ qla82xx_device_bootstrap(scsi_qla_host_t *vha) dev_initialize: /* set to DEV_INITIALIZING */ - qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); + ql_log(ql_log_info, vha, 0x009e, + "HW State: INITIALIZING.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING); /* Driver that sets device state to initializating sets IDC version */ @@ -3324,14 +3378,16 @@ dev_initialize: qla82xx_idc_lock(ha); if (rval != QLA_SUCCESS) { - qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); + ql_log(ql_log_fatal, vha, 0x00ad, + "HW State: FAILED.\n"); qla82xx_clear_drv_active(ha); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); return rval; } dev_ready: - qla_printk(KERN_INFO, ha, "HW State: READY\n"); + ql_log(ql_log_info, vha, 0x00ae, + "HW State: READY.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); return QLA_SUCCESS; @@ -3376,15 +3432,15 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) /* quiescence timeout, other functions didn't ack * changing the state to DEV_READY */ - qla_printk(KERN_INFO, ha, - "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME); - qla_printk(KERN_INFO, ha, - "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active, - drv_state); + ql_log(ql_log_info, vha, 0xb023, + "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME); + ql_log(ql_log_info, vha, 0xb024, + "DRV_ACTIVE:%d DRV_STATE:%d.\n", + drv_active, drv_state); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, - QLA82XX_DEV_READY); - qla_printk(KERN_INFO, ha, - "HW State: DEV_READY\n"); + QLA82XX_DEV_READY); + ql_log(ql_log_info, vha, 0xb025, + "HW State: DEV_READY.\n"); qla82xx_idc_unlock(ha); qla2x00_perform_loop_resync(vha); qla82xx_idc_lock(ha); @@ -3404,7 +3460,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); /* everyone acked so set the state to DEV_QUIESCENCE */ if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) { - qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n"); + ql_log(ql_log_info, vha, 0xb026, + "HW State: DEV_QUIESCENT.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT); } } @@ -3441,7 +3498,8 @@ qla82xx_dev_failed_handler(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; /* Disable the board */ - qla_printk(KERN_INFO, ha, "Disabling the board\n"); + ql_log(ql_log_fatal, vha, 0x00b8, + "Disabling the board.\n"); qla82xx_idc_lock(ha); qla82xx_clear_drv_active(ha); @@ -3492,8 +3550,8 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha) while (drv_state != drv_active) { if (time_after_eq(jiffies, reset_timeout)) { - qla_printk(KERN_INFO, ha, - "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME); + ql_log(ql_log_warn, vha, 0x00b5, + "Reset timeout.\n"); break; } qla82xx_idc_unlock(ha); @@ -3504,12 +3562,15 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha) } dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state, - dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); + ql_log(ql_log_info, vha, 0x00b6, + "Device state is 0x%x = %s.\n", + dev_state, + dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); /* Force to DEV_COLD unless someone else is starting a reset */ if (dev_state != QLA82XX_DEV_INITIALIZING) { - qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); + ql_log(ql_log_info, vha, 0x00b7, + "HW State: COLD/RE-INIT.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); } } @@ -3523,8 +3584,12 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha) fw_heartbeat_counter = qla82xx_rd_32(vha->hw, QLA82XX_PEG_ALIVE_COUNTER); /* all 0xff, assume AER/EEH in progress, ignore */ - if (fw_heartbeat_counter == 0xffffffff) + if (fw_heartbeat_counter == 0xffffffff) { + ql_dbg(ql_dbg_timer, vha, 0x6003, + "FW heartbeat counter is 0xffffffff, " + "returning status=%d.\n", status); return status; + } if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { vha->seconds_since_last_heartbeat++; /* FW not alive after 2 seconds */ @@ -3535,6 +3600,9 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha) } else vha->seconds_since_last_heartbeat = 0; vha->fw_heartbeat_counter = fw_heartbeat_counter; + if (status) + ql_dbg(ql_dbg_timer, vha, 0x6004, + "Returning status=%d.\n", status); return status; } @@ -3565,8 +3633,10 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); old_dev_state = dev_state; - qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, - dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); + ql_log(ql_log_info, vha, 0x009b, + "Device state is 0x%x = %s.\n", + dev_state, + dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); /* wait for 30 seconds for device to go ready */ dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); @@ -3574,9 +3644,8 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) while (1) { if (time_after_eq(jiffies, dev_init_timeout)) { - DEBUG(qla_printk(KERN_INFO, ha, - "%s: device init failed!\n", - QLA2XXX_DRIVER_NAME)); + ql_log(ql_log_fatal, vha, 0x009c, + "Device init failed.\n"); rval = QLA_FUNCTION_FAILED; break; } @@ -3586,10 +3655,11 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) old_dev_state = dev_state; } if (loopcount < 5) { - qla_printk(KERN_INFO, ha, - "2:Device state is 0x%x = %s\n", dev_state, - dev_state < MAX_STATES ? - qdev_state[dev_state] : "Unknown"); + ql_log(ql_log_info, vha, 0x009d, + "Device state is 0x%x = %s.\n", + dev_state, + dev_state < MAX_STATES ? qdev_state[dev_state] : + "Unknown"); } switch (dev_state) { @@ -3656,29 +3726,26 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (dev_state == QLA82XX_DEV_NEED_RESET && !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { - qla_printk(KERN_WARNING, ha, - "scsi(%ld) %s: Adapter reset needed!\n", - vha->host_no, __func__); + ql_log(ql_log_warn, vha, 0x6001, + "Adapter reset needed.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { - DEBUG(qla_printk(KERN_INFO, ha, - "scsi(%ld) %s - detected quiescence needed\n", - vha->host_no, __func__)); + ql_log(ql_log_warn, vha, 0x6002, + "Quiescent needed.\n"); set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } else { if (qla82xx_check_fw_alive(vha)) { halt_status = qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS1); - qla_printk(KERN_INFO, ha, - "scsi(%ld): %s, Dumping hw/fw registers:\n " - " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n " - " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n " - " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n " - " PEG_NET_4_PC: 0x%x\n", - vha->host_no, __func__, halt_status, + ql_dbg(ql_dbg_timer, vha, 0x6005, + "dumping hw/fw registers:.\n " + " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n " + " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n " + " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n " + " PEG_NET_4_PC: 0x%x.\n", halt_status, qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c), @@ -3694,9 +3761,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); } else { - qla_printk(KERN_INFO, ha, - "scsi(%ld): %s - detect abort needed\n", - vha->host_no, __func__); + ql_log(ql_log_info, vha, 0x6006, + "Detect abort needed.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } @@ -3704,10 +3770,10 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) ha->flags.isp82xx_fw_hung = 1; if (ha->flags.mbox_busy) { ha->flags.mbox_int = 1; - DEBUG2(qla_printk(KERN_ERR, ha, - "scsi(%ld) Due to fw hung, doing " + ql_log(ql_log_warn, vha, 0x6007, + "Due to FW hung, doing " "premature completion of mbx " - "command\n", vha->host_no)); + "command.\n"); if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) complete(&ha->mbx_intr_comp); @@ -3742,9 +3808,8 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) uint32_t dev_state; if (vha->device_flags & DFLG_DEV_FAILED) { - qla_printk(KERN_WARNING, ha, - "%s(%ld): Device in failed state, " - "Exiting.\n", __func__, vha->host_no); + ql_log(ql_log_warn, vha, 0x8024, + "Device in failed state, exiting.\n"); return QLA_SUCCESS; } ha->flags.isp82xx_reset_hdlr_active = 1; @@ -3752,13 +3817,14 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) qla82xx_idc_lock(ha); dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (dev_state == QLA82XX_DEV_READY) { - qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); + ql_log(ql_log_info, vha, 0x8025, + "HW State: NEED RESET.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_NEED_RESET); } else - qla_printk(KERN_INFO, ha, "HW State: %s\n", - dev_state < MAX_STATES ? - qdev_state[dev_state] : "Unknown"); + ql_log(ql_log_info, vha, 0x8026, + "Hw State: %s.\n", dev_state < MAX_STATES ? + qdev_state[dev_state] : "Unknown"); qla82xx_idc_unlock(ha); rval = qla82xx_device_state_handler(vha); @@ -3777,9 +3843,9 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) vha->flags.online = 1; if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { if (ha->isp_abort_cnt == 0) { - qla_printk(KERN_WARNING, ha, - "ISP error recovery failed - " - "board disabled\n"); + ql_log(ql_log_warn, vha, 0x8027, + "ISP error recover failed - board " + "disabled.\n"); /* * The next call disables the board * completely. @@ -3791,16 +3857,16 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) rval = QLA_SUCCESS; } else { /* schedule another ISP abort */ ha->isp_abort_cnt--; - DEBUG(qla_printk(KERN_INFO, ha, - "qla%ld: ISP abort - retry remaining %d\n", - vha->host_no, ha->isp_abort_cnt)); + ql_log(ql_log_warn, vha, 0x8036, + "ISP abort - retry remaining %d.\n", + ha->isp_abort_cnt); rval = QLA_FUNCTION_FAILED; } } else { ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; - DEBUG(qla_printk(KERN_INFO, ha, - "(%ld): ISP error recovery - retrying (%d) " - "more times\n", vha->host_no, ha->isp_abort_cnt)); + ql_dbg(ql_dbg_taskm, vha, 0x8029, + "ISP error recovery - retrying (%d) more times.\n", + ha->isp_abort_cnt); set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); rval = QLA_FUNCTION_FAILED; } @@ -3872,8 +3938,8 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha) break; } } - DEBUG2(printk(KERN_INFO - "%s status=%d\n", __func__, status)); + ql_dbg(ql_dbg_p3p, vha, 0xb027, + "%s status=%d.\n", status); return status; } @@ -3902,6 +3968,9 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) } } } + ql_dbg(ql_dbg_init, vha, 0x00b0, + "Entered %s fw_hung=%d.\n", + __func__, ha->flags.isp82xx_fw_hung); /* Abort all commands gracefully if fw NOT hung */ if (!ha->flags.isp82xx_fw_hung) { @@ -3922,13 +3991,13 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) spin_unlock_irqrestore( &ha->hardware_lock, flags); if (ha->isp_ops->abort_command(sp)) { - qla_printk(KERN_INFO, ha, - "scsi(%ld): mbx abort command failed in %s\n", - vha->host_no, __func__); + ql_log(ql_log_info, vha, + 0x00b1, + "mbx abort failed.\n"); } else { - qla_printk(KERN_INFO, ha, - "scsi(%ld): mbx abort command success in %s\n", - vha->host_no, __func__); + ql_log(ql_log_info, vha, + 0x00b2, + "mbx abort success.\n"); } spin_lock_irqsave(&ha->hardware_lock, flags); } @@ -3940,8 +4009,9 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) /* Wait for pending cmds (physical and virtual) to complete */ if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == QLA_SUCCESS) { - DEBUG2(qla_printk(KERN_INFO, ha, - "Done wait for pending commands\n")); + ql_dbg(ql_dbg_init, vha, 0x00b3, + "Done wait for " + "pending commands.\n"); } } } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index f461925a9df..e02df276804 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -35,6 +35,10 @@ static struct kmem_cache *srb_cachep; * CT6 CTX allocation cache */ static struct kmem_cache *ctx_cachep; +/* + * error level for logging + */ +int ql_errlev = ql_log_all; int ql2xlogintimeout = 20; module_param(ql2xlogintimeout, int, S_IRUGO); @@ -69,8 +73,17 @@ MODULE_PARM_DESC(ql2xallocfwdump, int ql2xextended_error_logging; module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xextended_error_logging, - "Option to enable extended error logging, " - "Default is 0 - no logging. 1 - log errors."); + "Option to enable extended error logging,\n" + "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" + "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n" + "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n" + "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n" + "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n" + "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" + "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" + "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" + "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" + "\t\tDo LOGICAL OR of the value to enable more than one level"); int ql2xshiftctondsd = 6; module_param(ql2xshiftctondsd, int, S_IRUGO); @@ -128,8 +141,8 @@ MODULE_PARM_DESC(ql2xmultique_tag, int ql2xfwloadbin; module_param(ql2xfwloadbin, int, S_IRUGO); MODULE_PARM_DESC(ql2xfwloadbin, - "Option to specify location from which to load ISP firmware:\n" - " 2 -- load firmware via the request_firmware() (hotplug)\n" + "Option to specify location from which to load ISP firmware:.\n" + " 2 -- load firmware via the request_firmware() (hotplug).\n" " interface.\n" " 1 -- load firmware from flash.\n" " 0 -- use default semantics.\n"); @@ -143,7 +156,7 @@ MODULE_PARM_DESC(ql2xetsenable, int ql2xdbwr = 1; module_param(ql2xdbwr, int, S_IRUGO); MODULE_PARM_DESC(ql2xdbwr, - "Option to specify scheme for request queue posting\n" + "Option to specify scheme for request queue posting.\n" " 0 -- Regular doorbell.\n" " 1 -- CAMRAM doorbell (faster).\n"); @@ -168,7 +181,7 @@ MODULE_PARM_DESC(ql2xasynctmfenable, int ql2xdontresethba; module_param(ql2xdontresethba, int, S_IRUGO); MODULE_PARM_DESC(ql2xdontresethba, - "Option to specify reset behaviour\n" + "Option to specify reset behaviour.\n" " 0 (Default) -- Reset on failure.\n" " 1 -- Do not reset on failure.\n"); @@ -247,8 +260,11 @@ static inline void qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) { /* Currently used for 82XX only. */ - if (vha->device_flags & DFLG_DEV_FAILED) + if (vha->device_flags & DFLG_DEV_FAILED) { + ql_dbg(ql_dbg_timer, vha, 0x600d, + "Device in a failed state, returning.\n"); return; + } mod_timer(&vha->timer, jiffies + interval * HZ); } @@ -273,19 +289,20 @@ static void qla2x00_sp_free_dma(srb_t *); /* -------------------------------------------------------------------------- */ static int qla2x00_alloc_queues(struct qla_hw_data *ha) { + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, GFP_KERNEL); if (!ha->req_q_map) { - qla_printk(KERN_WARNING, ha, - "Unable to allocate memory for request queue ptrs\n"); + ql_log(ql_log_fatal, vha, 0x003b, + "Unable to allocate memory for request queue ptrs.\n"); goto fail_req_map; } ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues, GFP_KERNEL); if (!ha->rsp_q_map) { - qla_printk(KERN_WARNING, ha, - "Unable to allocate memory for response queue ptrs\n"); + ql_log(ql_log_fatal, vha, 0x003c, + "Unable to allocate memory for response queue ptrs.\n"); goto fail_rsp_map; } set_bit(0, ha->rsp_qid_map); @@ -349,8 +366,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha) struct qla_hw_data *ha = vha->hw; if (!(ha->fw_attributes & BIT_6)) { - qla_printk(KERN_INFO, ha, - "Firmware is not multi-queue capable\n"); + ql_log(ql_log_warn, vha, 0x00d8, + "Firmware is not multi-queue capable.\n"); goto fail; } if (ql2xmultique_tag) { @@ -359,8 +376,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha) req = qla25xx_create_req_que(ha, options, 0, 0, -1, QLA_DEFAULT_QUE_QOS); if (!req) { - qla_printk(KERN_WARNING, ha, - "Can't create request queue\n"); + ql_log(ql_log_warn, vha, 0x00e0, + "Failed to create request queue.\n"); goto fail; } ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); @@ -369,17 +386,20 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha) for (ques = 1; ques < ha->max_rsp_queues; ques++) { ret = qla25xx_create_rsp_que(ha, options, 0, 0, req); if (!ret) { - qla_printk(KERN_WARNING, ha, - "Response Queue create failed\n"); + ql_log(ql_log_warn, vha, 0x00e8, + "Failed to create response queue.\n"); goto fail2; } } ha->flags.cpu_affinity_enabled = 1; - - DEBUG2(qla_printk(KERN_INFO, ha, - "CPU affinity mode enabled, no. of response" - " queues:%d, no. of request queues:%d\n", - ha->max_rsp_queues, ha->max_req_queues)); + ql_dbg(ql_dbg_multiq, vha, 0xc007, + "CPU affinity mode enalbed, " + "no. of response queues:%d no. of request queues:%d.\n", + ha->max_rsp_queues, ha->max_req_queues); + ql_dbg(ql_dbg_init, vha, 0x00e9, + "CPU affinity mode enalbed, " + "no. of response queues:%d no. of request queues:%d.\n", + ha->max_rsp_queues, ha->max_req_queues); } return 0; fail2: @@ -526,8 +546,11 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, struct qla_hw_data *ha = vha->hw; sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); - if (!sp) + if (!sp) { + ql_log(ql_log_warn, vha, 0x3006, + "Memory allocation failed for sp.\n"); return sp; + } atomic_set(&sp->ref_count, 1); sp->fcport = fcport; @@ -551,30 +574,43 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) int rval; if (ha->flags.eeh_busy) { - if (ha->flags.pci_channel_io_perm_failure) + if (ha->flags.pci_channel_io_perm_failure) { + ql_dbg(ql_dbg_io, vha, 0x3001, + "PCI Channel IO permanent failure, exiting " + "cmd=%p.\n", cmd); cmd->result = DID_NO_CONNECT << 16; - else + } else { + ql_dbg(ql_dbg_io, vha, 0x3002, + "EEH_Busy, Requeuing the cmd=%p.\n", cmd); cmd->result = DID_REQUEUE << 16; + } goto qc24_fail_command; } rval = fc_remote_port_chkready(rport); if (rval) { cmd->result = rval; + ql_dbg(ql_dbg_io, vha, 0x3003, + "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", + cmd, rval); goto qc24_fail_command; } if (!vha->flags.difdix_supported && scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { - DEBUG2(qla_printk(KERN_ERR, ha, - "DIF Cap Not Reg, fail DIF capable cmd's:%x\n", - cmd->cmnd[0])); + ql_dbg(ql_dbg_io, vha, 0x3004, + "DIF Cap not reg, fail DIF capable cmd's:%p.\n", + cmd); cmd->result = DID_NO_CONNECT << 16; goto qc24_fail_command; } if (atomic_read(&fcport->state) != FCS_ONLINE) { if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || atomic_read(&base_vha->loop_state) == LOOP_DEAD) { + ql_dbg(ql_dbg_io, vha, 0x3005, + "Returning DNC, fcport_state=%d loop_state=%d.\n", + atomic_read(&fcport->state), + atomic_read(&base_vha->loop_state)); cmd->result = DID_NO_CONNECT << 16; goto qc24_fail_command; } @@ -586,8 +622,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto qc24_host_busy; rval = ha->isp_ops->start_scsi(sp); - if (rval != QLA_SUCCESS) + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_io, vha, 0x3013, + "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); goto qc24_host_busy_free_sp; + } return 0; @@ -630,7 +669,8 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) int ret = QLA_SUCCESS; if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { - DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n")); + ql_dbg(ql_dbg_taskm, vha, 0x8005, + "Return:eh_wait.\n"); return ret; } @@ -723,7 +763,8 @@ qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha) else return_status = QLA_FUNCTION_FAILED; - DEBUG2(printk("%s return_status=%d\n", __func__, return_status)); + ql_dbg(ql_dbg_taskm, vha, 0x8019, + "%s return status=%d.\n", __func__, return_status); return return_status; } @@ -831,10 +872,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) int wait = 0; struct qla_hw_data *ha = vha->hw; + ql_dbg(ql_dbg_taskm, vha, 0x8000, + "Entered %s for cmd=%p.\n", __func__, cmd); if (!CMD_SP(cmd)) return SUCCESS; ret = fc_block_scsi_eh(cmd); + ql_dbg(ql_dbg_taskm, vha, 0x8001, + "Return value of fc_block_scsi_eh=%d.\n", ret); if (ret != 0) return ret; ret = SUCCESS; @@ -849,20 +894,19 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) return SUCCESS; } - DEBUG2(printk("%s(%ld): aborting sp %p from RISC.", - __func__, vha->host_no, sp)); + ql_dbg(ql_dbg_taskm, vha, 0x8002, + "Aborting sp=%p cmd=%p from RISC ", sp, cmd); /* Get a reference to the sp and drop the lock.*/ sp_get(sp); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (ha->isp_ops->abort_command(sp)) { - DEBUG2(printk("%s(%ld): abort_command " - "mbx failed.\n", __func__, vha->host_no)); - ret = FAILED; + ql_dbg(ql_dbg_taskm, vha, 0x8003, + "Abort command mbx failed for cmd=%p.\n", cmd); } else { - DEBUG3(printk("%s(%ld): abort_command " - "mbx success.\n", __func__, vha->host_no)); + ql_dbg(ql_dbg_taskm, vha, 0x8004, + "Abort command mbx success.\n"); wait = 1; } qla2x00_sp_compl(ha, sp); @@ -870,16 +914,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) /* Wait for the command to be returned. */ if (wait) { if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { - qla_printk(KERN_ERR, ha, - "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n", - vha->host_no, id, lun, ret); + ql_log(ql_log_warn, vha, 0x8006, + "Abort handler timed out for cmd=%p.\n", cmd); ret = FAILED; } } - qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n", - vha->host_no, id, lun, wait, ret); + ql_log(ql_log_info, vha, 0x801c, + "Abort command issued -- %d %x.\n", wait, ret); return ret; } @@ -947,40 +989,59 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; int err; - if (!fcport) + if (!fcport) { + ql_log(ql_log_warn, vha, 0x8007, + "fcport is NULL.\n"); return FAILED; + } err = fc_block_scsi_eh(cmd); + ql_dbg(ql_dbg_taskm, vha, 0x8008, + "fc_block_scsi_eh ret=%d.\n", err); if (err != 0) return err; - qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", - vha->host_no, cmd->device->id, cmd->device->lun, name); + ql_log(ql_log_info, vha, 0x8009, + "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name, + cmd->device->id, cmd->device->lun, cmd); err = 0; - if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) + if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x800a, + "Wait for hba online failed for cmd=%p.\n", cmd); goto eh_reset_failed; + } err = 1; - if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) + if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x800b, + "Wait for loop ready failed for cmd=%p.\n", cmd); goto eh_reset_failed; + } err = 2; if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) - != QLA_SUCCESS) + != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x800c, + "do_reset failed for cmd=%p.\n", cmd); goto eh_reset_failed; + } err = 3; if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, - cmd->device->lun, type) != QLA_SUCCESS) + cmd->device->lun, type) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x800d, + "wait for peding cmds failed for cmd=%p.\n", cmd); goto eh_reset_failed; + } - qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n", - vha->host_no, cmd->device->id, cmd->device->lun, name); + ql_log(ql_log_info, vha, 0x800e, + "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name, + cmd->device->id, cmd->device->lun, cmd); return SUCCESS; eh_reset_failed: - qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n" - , vha->host_no, cmd->device->id, cmd->device->lun, name, - reset_errors[err]); + ql_log(ql_log_info, vha, 0x800f, + "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name, + reset_errors[err], cmd->device->id, cmd->device->lun); return FAILED; } @@ -1030,19 +1091,25 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) id = cmd->device->id; lun = cmd->device->lun; - if (!fcport) + if (!fcport) { + ql_log(ql_log_warn, vha, 0x8010, + "fcport is NULL.\n"); return ret; + } ret = fc_block_scsi_eh(cmd); + ql_dbg(ql_dbg_taskm, vha, 0x8011, + "fc_block_scsi_eh ret=%d.\n", ret); if (ret != 0) return ret; ret = FAILED; - qla_printk(KERN_INFO, vha->hw, - "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun); + ql_log(ql_log_info, vha, 0x8012, + "BUS RESET ISSUED for id %d lun %d.\n", id, lun); if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { - DEBUG2(printk("%s failed:board disabled\n",__func__)); + ql_log(ql_log_fatal, vha, 0x8013, + "Wait for hba online failed board disabled.\n"); goto eh_bus_reset_done; } @@ -1055,12 +1122,15 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) /* Flush outstanding commands. */ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != - QLA_SUCCESS) + QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8014, + "Wait for pending commands failed.\n"); ret = FAILED; + } eh_bus_reset_done: - qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__, - (ret == FAILED) ? "failed" : "succeeded"); + ql_log(ql_log_warn, vha, 0x802b, + "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED"); return ret; } @@ -1093,16 +1163,21 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) id = cmd->device->id; lun = cmd->device->lun; - if (!fcport) + if (!fcport) { + ql_log(ql_log_warn, vha, 0x8016, + "fcport is NULL.\n"); return ret; + } ret = fc_block_scsi_eh(cmd); + ql_dbg(ql_dbg_taskm, vha, 0x8017, + "fc_block_scsi_eh ret=%d.\n", ret); if (ret != 0) return ret; ret = FAILED; - qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun); + ql_log(ql_log_info, vha, 0x8018, + "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun); if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) goto eh_host_reset_lock; @@ -1137,8 +1212,11 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) /* failed. schedule dpc to try */ set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); - if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) + if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x802a, + "wait for hba online failed.\n"); goto eh_host_reset_lock; + } } clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); } @@ -1149,7 +1227,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) ret = SUCCESS; eh_host_reset_lock: - qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, + qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__, (ret == FAILED) ? "failed" : "succeeded"); return ret; @@ -1179,9 +1257,9 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) ret = ha->isp_ops->target_reset(fcport, 0, 0); if (ret != QLA_SUCCESS) { - DEBUG2_3(printk("%s(%ld): bus_reset failed: " - "target_reset=%d d_id=%x.\n", __func__, - vha->host_no, ret, fcport->d_id.b24)); + ql_dbg(ql_dbg_taskm, vha, 0x802c, + "Bus Reset failed: Target Reset=%d " + "d_id=%x.\n", ret, fcport->d_id.b24); } } } @@ -1189,9 +1267,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) { ret = qla2x00_full_login_lip(vha); if (ret != QLA_SUCCESS) { - DEBUG2_3(printk("%s(%ld): failed: " - "full_login_lip=%d.\n", __func__, vha->host_no, - ret)); + ql_dbg(ql_dbg_taskm, vha, 0x802d, + "full_login_lip=%d.\n", ret); } atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); @@ -1202,8 +1279,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) if (ha->flags.enable_lip_reset) { ret = qla2x00_lip_reset(vha); if (ret != QLA_SUCCESS) { - DEBUG2_3(printk("%s(%ld): failed: " - "lip_reset=%d.\n", __func__, vha->host_no, ret)); + ql_dbg(ql_dbg_taskm, vha, 0x802e, + "lip_reset failed (%d).\n", ret); } else qla2x00_wait_for_loop_ready(vha); } @@ -1302,17 +1379,17 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth) if (!scsi_track_queue_full(sdev, qdepth)) return; - DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw, - "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", - fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, - sdev->queue_depth)); + ql_dbg(ql_dbg_io, fcport->vha, 0x3029, + "Queue depth adjusted-down " + "to %d for scsi(%ld:%d:%d:%d).\n", + sdev->queue_depth, fcport->vha->host_no, + sdev->channel, sdev->id, sdev->lun); } static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) { fc_port_t *fcport = sdev->hostdata; struct scsi_qla_host *vha = fcport->vha; - struct qla_hw_data *ha = vha->hw; struct req_que *req = NULL; req = vha->req; @@ -1327,10 +1404,11 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) else scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); - DEBUG2(qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", - fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, - sdev->queue_depth)); + ql_dbg(ql_dbg_io, vha, 0x302a, + "Queue depth adjusted-up to %d for " + "scsi(%ld:%d:%d:%d).\n", + sdev->queue_depth, fcport->vha->host_no, + sdev->channel, sdev->id, sdev->lun); } static int @@ -1776,6 +1854,9 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->flags.port0 = 1; else ha->flags.port0 = 0; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, + "device_type=0x%x port=%d fw_srisc_address=%p.\n", + ha->device_type, ha->flags.port0, ha->fw_srisc_address); } static int @@ -1790,10 +1871,9 @@ qla2x00_iospace_config(struct qla_hw_data *ha) if (pci_request_selected_regions(ha->pdev, ha->bars, QLA2XXX_DRIVER_NAME)) { - qla_printk(KERN_WARNING, ha, - "Failed to reserve PIO/MMIO regions (%s)\n", + ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, + "Failed to reserve PIO/MMIO regions (%s), aborting.\n", pci_name(ha->pdev)); - goto iospace_error_exit; } if (!(ha->bars & 1)) @@ -1803,39 +1883,42 @@ qla2x00_iospace_config(struct qla_hw_data *ha) pio = pci_resource_start(ha->pdev, 0); if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { - qla_printk(KERN_WARNING, ha, - "Invalid PCI I/O region size (%s)...\n", - pci_name(ha->pdev)); + ql_log_pci(ql_log_warn, ha->pdev, 0x0012, + "Invalid pci I/O region size (%s).\n", + pci_name(ha->pdev)); pio = 0; } } else { - qla_printk(KERN_WARNING, ha, - "region #0 not a PIO resource (%s)...\n", + ql_log_pci(ql_log_warn, ha->pdev, 0x0013, + "Region #0 no a PIO resource (%s).\n", pci_name(ha->pdev)); pio = 0; } ha->pio_address = pio; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014, + "PIO address=%p.\n", + ha->pio_address); skip_pio: /* Use MMIO operations for all accesses. */ if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { - qla_printk(KERN_ERR, ha, - "region #1 not an MMIO resource (%s), aborting\n", + ql_log_pci(ql_log_fatal, ha->pdev, 0x0015, + "Region #1 not an MMIO resource (%s), aborting.\n", pci_name(ha->pdev)); goto iospace_error_exit; } if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { - qla_printk(KERN_ERR, ha, - "Invalid PCI mem region size (%s), aborting\n", - pci_name(ha->pdev)); + ql_log_pci(ql_log_fatal, ha->pdev, 0x0016, + "Invalid PCI mem region size (%s), aborting.\n", + pci_name(ha->pdev)); goto iospace_error_exit; } ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); if (!ha->iobase) { - qla_printk(KERN_ERR, ha, - "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); - + ql_log_pci(ql_log_fatal, ha->pdev, 0x0017, + "Cannot remap MMIO (%s), aborting.\n", + pci_name(ha->pdev)); goto iospace_error_exit; } @@ -1849,6 +1932,8 @@ skip_pio: ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), pci_resource_len(ha->pdev, 3)); if (ha->mqiobase) { + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018, + "MQIO Base=%p.\n", ha->mqiobase); /* Read MSIX vector size of the board */ pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); ha->msix_count = msix; @@ -1861,17 +1946,24 @@ skip_pio: ha->max_req_queues = 2; } else if (ql2xmaxqueues > 1) { ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? - QLA_MQ_SIZE : ql2xmaxqueues; - DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no" - " of request queues:%d\n", ha->max_req_queues)); + QLA_MQ_SIZE : ql2xmaxqueues; + ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008, + "QoS mode set, max no of request queues:%d.\n", + ha->max_req_queues); + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019, + "QoS mode set, max no of request queues:%d.\n", + ha->max_req_queues); } - qla_printk(KERN_INFO, ha, - "MSI-X vector count: %d\n", msix); + ql_log_pci(ql_log_info, ha->pdev, 0x001a, + "MSI-X vector count: %d.\n", msix); } else - qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n"); + ql_log_pci(ql_log_info, ha->pdev, 0x001b, + "BAR 3 not enabled.\n"); mqiobase_exit: ha->msix_count = ha->max_rsp_queues + 1; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, + "MSIX Count:%d.\n", ha->msix_count); return (0); iospace_error_exit: @@ -1935,7 +2027,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) { bars = pci_select_bars(pdev, IORESOURCE_MEM); mem_only = 1; + ql_dbg_pci(ql_dbg_init, pdev, 0x0007, + "Mem only adapter.\n"); } + ql_dbg_pci(ql_dbg_init, pdev, 0x0008, + "Bars=%d.\n", bars); if (mem_only) { if (pci_enable_device_mem(pdev)) @@ -1950,9 +2046,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); if (!ha) { - DEBUG(printk("Unable to allocate memory for ha\n")); + ql_log_pci(ql_log_fatal, pdev, 0x0009, + "Unable to allocate memory for ha.\n"); goto probe_out; } + ql_dbg_pci(ql_dbg_init, pdev, 0x000a, + "Memory allocated for ha=%p.\n", ha); ha->pdev = pdev; /* Clear our data area */ @@ -1974,10 +2073,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto probe_hw_failed; - qla_printk(KERN_INFO, ha, - "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, - ha->iobase); - + ql_log_pci(ql_log_info, pdev, 0x001d, + "Found an ISP%04X irq %d iobase 0x%p.\n", + pdev->device, pdev->irq, ha->iobase); ha->prev_topology = 0; ha->init_cb_size = sizeof(init_cb_t); ha->link_data_rate = PORT_SPEED_UNKNOWN; @@ -2078,7 +2176,18 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; } - + ql_dbg_pci(ql_dbg_init, pdev, 0x001e, + "mbx_count=%d, req_length=%d, " + "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " + "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, .\n", + ha->mbx_count, req_length, rsp_length, ha->max_loop_id, + ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, + ha->nvram_npiv_size); + ql_dbg_pci(ql_dbg_init, pdev, 0x001f, + "isp_ops=%p, flash_conf_off=%d, " + "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", + ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, + ha->nvram_conf_off, ha->nvram_data_off); mutex_init(&ha->vport_lock); init_completion(&ha->mbx_cmd_comp); complete(&ha->mbx_cmd_comp); @@ -2088,10 +2197,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) set_bit(0, (unsigned long *) ha->vp_idx_map); qla2x00_config_dma_addressing(ha); + ql_dbg_pci(ql_dbg_init, pdev, 0x0020, + "64 Bit addressing is %s.\n", + ha->flags.enable_64bit_addressing ? "enable" : + "disable"); ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); if (!ret) { - qla_printk(KERN_WARNING, ha, - "[ERROR] Failed to allocate memory for adapter\n"); + ql_log_pci(ql_log_fatal, pdev, 0x0031, + "Failed to allocate memory for adapter, aborting.\n"); goto probe_hw_failed; } @@ -2103,9 +2216,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) base_vha = qla2x00_create_host(sht, ha); if (!base_vha) { - qla_printk(KERN_WARNING, ha, - "[ERROR] Failed to allocate memory for scsi_host\n"); - ret = -ENOMEM; qla2x00_mem_free(ha); qla2x00_free_req_que(ha, req); @@ -2132,7 +2242,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) if (!IS_QLA82XX(ha)) host->sg_tablesize = QLA_SG_ALL; } - + ql_dbg(ql_dbg_init, base_vha, 0x0032, + "can_queue=%d, req=%p, " + "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", + host->can_queue, base_vha->req, + base_vha->mgmt_svr_loop_id, host->sg_tablesize); host->max_id = max_id; host->this_id = 255; host->cmd_per_lun = 3; @@ -2146,6 +2260,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->transportt = qla2xxx_transport_template; sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); + ql_dbg(ql_dbg_init, base_vha, 0x0033, + "max_id=%d this_id=%d " + "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " + "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id, + host->this_id, host->cmd_per_lun, host->unique_id, + host->max_cmd_len, host->max_channel, host->max_lun, + host->transportt, sht->vendor_id); + /* Set up the irqs */ ret = qla2x00_request_irqs(ha, rsp); if (ret) @@ -2156,9 +2278,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) /* Alloc arrays of request and response ring ptrs */ que_init: if (!qla2x00_alloc_queues(ha)) { - qla_printk(KERN_WARNING, ha, - "[ERROR] Failed to allocate memory for queue" - " pointers\n"); + ql_log(ql_log_fatal, base_vha, 0x003d, + "Failed to allocate memory for queue pointers.. aborting.\n"); goto probe_init_failed; } @@ -2186,20 +2307,33 @@ que_init: rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; } - if (qla2x00_initialize_adapter(base_vha)) { - qla_printk(KERN_WARNING, ha, - "Failed to initialize adapter\n"); + ql_dbg(ql_dbg_multiq, base_vha, 0xc009, + "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", + ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); + ql_dbg(ql_dbg_multiq, base_vha, 0xc00a, + "req->req_q_in=%p req->req_q_out=%p " + "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", + req->req_q_in, req->req_q_out, + rsp->rsp_q_in, rsp->rsp_q_out); + ql_dbg(ql_dbg_init, base_vha, 0x003e, + "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", + ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); + ql_dbg(ql_dbg_init, base_vha, 0x003f, + "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", + req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); - DEBUG2(printk("scsi(%ld): Failed to initialize adapter - " - "Adapter flags %x.\n", - base_vha->host_no, base_vha->device_flags)); + if (qla2x00_initialize_adapter(base_vha)) { + ql_log(ql_log_fatal, base_vha, 0x00d6, + "Failed to initialize adapter - Adapter flags %x.\n", + base_vha->device_flags); if (IS_QLA82XX(ha)) { qla82xx_idc_lock(ha); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); qla82xx_idc_unlock(ha); - qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); + ql_log(ql_log_fatal, base_vha, 0x00d7, + "HW State: FAILED.\n"); } ret = -ENODEV; @@ -2208,9 +2342,8 @@ que_init: if (ha->mqenable) { if (qla25xx_setup_mode(base_vha)) { - qla_printk(KERN_WARNING, ha, - "Can't create queues, falling back to single" - " queue mode\n"); + ql_log(ql_log_warn, base_vha, 0x00ec, + "Failed to create queues, falling back to single queue mode.\n"); goto que_init; } } @@ -2222,13 +2355,15 @@ que_init: * Startup the kernel thread for this host adapter */ ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, - "%s_dpc", base_vha->host_str); + "%s_dpc", base_vha->host_str); if (IS_ERR(ha->dpc_thread)) { - qla_printk(KERN_WARNING, ha, - "Unable to start DPC thread!\n"); + ql_log(ql_log_fatal, base_vha, 0x00ed, + "Failed to start DPC thread.\n"); ret = PTR_ERR(ha->dpc_thread); goto probe_failed; } + ql_dbg(ql_dbg_init, base_vha, 0x00ee, + "DPC thread started successfully.\n"); skip_dpc: list_add_tail(&base_vha->list, &ha->vp_list); @@ -2236,16 +2371,18 @@ skip_dpc: /* Initialized the timer */ qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL); - - DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", - base_vha->host_no, ha)); + ql_dbg(ql_dbg_init, base_vha, 0x00ef, + "Started qla2x00_timer with " + "interval=%d.\n", WATCH_INTERVAL); + ql_dbg(ql_dbg_init, base_vha, 0x00f0, + "Detected hba at address=%p.\n", + ha); if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) { base_vha->flags.difdix_supported = 1; - DEBUG18(qla_printk(KERN_INFO, ha, - "Registering for DIF/DIX type 1 and 3" - " protection.\n")); + ql_dbg(ql_dbg_init, base_vha, 0x00f1, + "Registering for DIF/DIX type 1 and 3 protection.\n"); scsi_host_set_prot(host, SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION @@ -2267,6 +2404,9 @@ skip_dpc: base_vha->flags.init_done = 1; base_vha->flags.online = 1; + ql_dbg(ql_dbg_init, base_vha, 0x00f2, + "Init done and hba is online.\n"); + scsi_scan_host(host); qla2x00_alloc_sysfs_attr(base_vha); @@ -2275,14 +2415,17 @@ skip_dpc: qla2x00_dfs_setup(base_vha); - qla_printk(KERN_INFO, ha, "\n" - " QLogic Fibre Channel HBA Driver: %s\n" - " QLogic %s - %s\n" - " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n", - qla2x00_version_str, ha->model_number, - ha->model_desc ? ha->model_desc : "", pdev->device, - ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev), - ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no, + ql_log(ql_log_info, base_vha, 0x00fa, + "QLogic Fibre Channed HBA Driver: %s.\n", + qla2x00_version_str); + ql_log(ql_log_info, base_vha, 0x00fb, + "QLogic %s - %s.\n", + ha->model_number, ha->model_desc ? ha->model_desc : ""); + ql_log(ql_log_info, base_vha, 0x00fc, + "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", + pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info), + pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', + base_vha->host_no, ha->isp_ops->fw_version_str(base_vha, fw_str)); return 0; @@ -2580,20 +2723,15 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, fcport->login_retry = vha->hw->login_retry_count; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - DEBUG(printk("scsi(%ld): Port login retry: " + ql_dbg(ql_dbg_disc, vha, 0x2067, + "Port login retry " "%02x%02x%02x%02x%02x%02x%02x%02x, " - "id = 0x%04x retry cnt=%d\n", - vha->host_no, - fcport->port_name[0], - fcport->port_name[1], - fcport->port_name[2], - fcport->port_name[3], - fcport->port_name[4], - fcport->port_name[5], - fcport->port_name[6], - fcport->port_name[7], - fcport->loop_id, - fcport->login_retry)); + "id = 0x%04x retry cnt=%d.\n", + fcport->port_name[0], fcport->port_name[1], + fcport->port_name[2], fcport->port_name[3], + fcport->port_name[4], fcport->port_name[5], + fcport->port_name[6], fcport->port_name[7], + fcport->loop_id, fcport->login_retry); } } @@ -2676,6 +2814,9 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ctx_cachep); if (!ha->ctx_mempool) goto fail_free_srb_mempool; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021, + "ctx_cachep=%p ctx_mempool=%p.\n", + ctx_cachep, ha->ctx_mempool); } /* Get memory for cached NVRAM */ @@ -2690,22 +2831,29 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, if (!ha->s_dma_pool) goto fail_free_nvram; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022, + "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", + ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); + if (IS_QLA82XX(ha) || ql2xenabledif) { ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, DSD_LIST_DMA_POOL_SIZE, 8, 0); if (!ha->dl_dma_pool) { - qla_printk(KERN_WARNING, ha, - "Memory Allocation failed - dl_dma_pool\n"); + ql_log_pci(ql_log_fatal, ha->pdev, 0x0023, + "Failed to allocate memory for dl_dma_pool.\n"); goto fail_s_dma_pool; } ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, FCP_CMND_DMA_POOL_SIZE, 8, 0); if (!ha->fcp_cmnd_dma_pool) { - qla_printk(KERN_WARNING, ha, - "Memory Allocation failed - fcp_cmnd_dma_pool\n"); + ql_log_pci(ql_log_fatal, ha->pdev, 0x0024, + "Failed to allocate memory for fcp_cmnd_dma_pool.\n"); goto fail_dl_dma_pool; } + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025, + "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n", + ha->dl_dma_pool, ha->fcp_cmnd_dma_pool); } /* Allocate memory for SNS commands */ @@ -2715,6 +2863,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); if (!ha->sns_cmd) goto fail_dma_pool; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, + "sns_cmd.\n", ha->sns_cmd); } else { /* Get consistent memory allocated for MS IOCB */ ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, @@ -2726,12 +2876,16 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); if (!ha->ct_sns) goto fail_free_ms_iocb; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027, + "ms_iocb=%p ct_sns=%p.\n", + ha->ms_iocb, ha->ct_sns); } /* Allocate memory for request ring */ *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); if (!*req) { - DEBUG(printk("Unable to allocate memory for req\n")); + ql_log_pci(ql_log_fatal, ha->pdev, 0x0028, + "Failed to allocate memory for req.\n"); goto fail_req; } (*req)->length = req_len; @@ -2739,14 +2893,15 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ((*req)->length + 1) * sizeof(request_t), &(*req)->dma, GFP_KERNEL); if (!(*req)->ring) { - DEBUG(printk("Unable to allocate memory for req_ring\n")); + ql_log_pci(ql_log_fatal, ha->pdev, 0x0029, + "Failed to allocate memory for req_ring.\n"); goto fail_req_ring; } /* Allocate memory for response ring */ *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); if (!*rsp) { - qla_printk(KERN_WARNING, ha, - "Unable to allocate memory for rsp\n"); + ql_log_pci(ql_log_fatal, ha->pdev, 0x002a, + "Failed to allocate memory for rsp.\n"); goto fail_rsp; } (*rsp)->hw = ha; @@ -2755,19 +2910,24 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ((*rsp)->length + 1) * sizeof(response_t), &(*rsp)->dma, GFP_KERNEL); if (!(*rsp)->ring) { - qla_printk(KERN_WARNING, ha, - "Unable to allocate memory for rsp_ring\n"); + ql_log_pci(ql_log_fatal, ha->pdev, 0x002b, + "Failed to allocate memory for rsp_ring.\n"); goto fail_rsp_ring; } (*req)->rsp = *rsp; (*rsp)->req = *req; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c, + "req=%p req->length=%d req->ring=%p rsp=%p " + "rsp->length=%d rsp->ring=%p.\n", + *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length, + (*rsp)->ring); /* Allocate memory for NVRAM data for vports */ if (ha->nvram_npiv_size) { ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) * - ha->nvram_npiv_size, GFP_KERNEL); + ha->nvram_npiv_size, GFP_KERNEL); if (!ha->npiv_info) { - qla_printk(KERN_WARNING, ha, - "Unable to allocate memory for npiv info\n"); + ql_log_pci(ql_log_fatal, ha->pdev, 0x002d, + "Failed to allocate memory for npiv_info.\n"); goto fail_npiv_info; } } else @@ -2779,6 +2939,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, &ha->ex_init_cb_dma); if (!ha->ex_init_cb) goto fail_ex_init_cb; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e, + "ex_init_cb=%p.\n", ha->ex_init_cb); } INIT_LIST_HEAD(&ha->gbl_dsd_list); @@ -2789,6 +2951,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, &ha->async_pd_dma); if (!ha->async_pd) goto fail_async_pd; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f, + "async_pd=%p.\n", ha->async_pd); } INIT_LIST_HEAD(&ha->vp_list); @@ -2854,7 +3018,8 @@ fail_free_init_cb: ha->init_cb = NULL; ha->init_cb_dma = 0; fail: - DEBUG(printk("%s: Memory allocation failure\n", __func__)); + ql_log(ql_log_fatal, NULL, 0x0030, + "Memory allocation failure.\n"); return -ENOMEM; } @@ -3003,8 +3168,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); if (host == NULL) { - printk(KERN_WARNING - "qla2xxx: Couldn't allocate host from scsi layer!\n"); + ql_log_pci(ql_log_fatal, ha->pdev, 0x0107, + "Failed to allocate host from the scsi layer, aborting.\n"); goto fail; } @@ -3023,6 +3188,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, spin_lock_init(&vha->work_lock); sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); + ql_dbg(ql_dbg_init, vha, 0x0041, + "Allocated the host=%p hw=%p vha=%p dev_name=%s", + vha->host, vha->hw, vha, + dev_name(&(ha->pdev->dev))); + return vha; fail: @@ -3264,18 +3434,18 @@ void qla2x00_relogin(struct scsi_qla_host *vha) if (status == QLA_SUCCESS) { fcport->old_loop_id = fcport->loop_id; - DEBUG(printk("scsi(%ld): port login OK: logged " - "in ID 0x%x\n", vha->host_no, fcport->loop_id)); + ql_dbg(ql_dbg_disc, vha, 0x2003, + "Port login OK: logged in ID 0x%x.\n", + fcport->loop_id); qla2x00_update_fcport(vha, fcport); } else if (status == 1) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); /* retry the login again */ - DEBUG(printk("scsi(%ld): Retrying" - " %d login again loop_id 0x%x\n", - vha->host_no, fcport->login_retry, - fcport->loop_id)); + ql_dbg(ql_dbg_disc, vha, 0x2007, + "Retrying %d login again loop_id 0x%x.\n", + fcport->login_retry, fcport->loop_id); } else { fcport->login_retry = 0; } @@ -3315,26 +3485,27 @@ qla2x00_do_dpc(void *data) set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { - DEBUG3(printk("qla2x00: DPC handler sleeping\n")); + ql_dbg(ql_dbg_dpc, base_vha, 0x4000, + "DPC handler sleeping.\n"); schedule(); __set_current_state(TASK_RUNNING); - DEBUG3(printk("qla2x00: DPC handler waking up\n")); + ql_dbg(ql_dbg_dpc, base_vha, 0x4001, + "DPC handler waking up.\n"); + ql_dbg(ql_dbg_dpc, base_vha, 0x4002, + "dpc_flags=0x%lx.\n", base_vha->dpc_flags); /* Initialization not yet finished. Don't do anything yet. */ if (!base_vha->flags.init_done) continue; if (ha->flags.eeh_busy) { - DEBUG17(qla_printk(KERN_WARNING, ha, - "qla2x00_do_dpc: dpc_flags: %lx\n", - base_vha->dpc_flags)); + ql_dbg(ql_dbg_dpc, base_vha, 0x4003, + "eeh_busy=%d.\n", ha->flags.eeh_busy); continue; } - DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no)); - ha->dpc_active = 1; if (ha->flags.mbox_busy) { @@ -3351,8 +3522,8 @@ qla2x00_do_dpc(void *data) qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); qla82xx_idc_unlock(ha); - qla_printk(KERN_INFO, ha, - "HW State: FAILED\n"); + ql_log(ql_log_info, base_vha, 0x4004, + "HW State: FAILED.\n"); qla82xx_device_state_handler(base_vha); continue; } @@ -3360,10 +3531,8 @@ qla2x00_do_dpc(void *data) if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, &base_vha->dpc_flags)) { - DEBUG(printk(KERN_INFO - "scsi(%ld): dpc: sched " - "qla82xx_fcoe_ctx_reset ha = %p\n", - base_vha->host_no, ha)); + ql_dbg(ql_dbg_dpc, base_vha, 0x4005, + "FCoE context reset scheduled.\n"); if (!(test_and_set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags))) { if (qla82xx_fcoe_ctx_reset(base_vha)) { @@ -3377,18 +3546,16 @@ qla2x00_do_dpc(void *data) &base_vha->dpc_flags); } - DEBUG(printk("scsi(%ld): dpc:" - " qla82xx_fcoe_ctx_reset end\n", - base_vha->host_no)); + ql_dbg(ql_dbg_dpc, base_vha, 0x4006, + "FCoE context reset end.\n"); } } if (test_and_clear_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) { - DEBUG(printk("scsi(%ld): dpc: sched " - "qla2x00_abort_isp ha = %p\n", - base_vha->host_no, ha)); + ql_dbg(ql_dbg_dpc, base_vha, 0x4007, + "ISP abort scheduled.\n"); if (!(test_and_set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags))) { @@ -3401,8 +3568,8 @@ qla2x00_do_dpc(void *data) &base_vha->dpc_flags); } - DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", - base_vha->host_no)); + ql_dbg(ql_dbg_dpc, base_vha, 0x4008, + "ISP abort end.\n"); } if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) { @@ -3411,9 +3578,8 @@ qla2x00_do_dpc(void *data) } if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { - DEBUG(printk(KERN_INFO "scsi(%ld): dpc: sched " - "qla2x00_quiesce_needed ha = %p\n", - base_vha->host_no, ha)); + ql_dbg(ql_dbg_dpc, base_vha, 0x4009, + "Quiescence mode scheduled.\n"); qla82xx_device_state_handler(base_vha); clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags); if (!ha->flags.quiesce_owner) { @@ -3423,17 +3589,20 @@ qla2x00_do_dpc(void *data) qla82xx_clear_qsnt_ready(base_vha); qla82xx_idc_unlock(ha); } + ql_dbg(ql_dbg_dpc, base_vha, 0x400a, + "Quiescence mode end.\n"); } if (test_and_clear_bit(RESET_MARKER_NEEDED, &base_vha->dpc_flags) && (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { - DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n", - base_vha->host_no)); - + ql_dbg(ql_dbg_dpc, base_vha, 0x400b, + "Reset marker scheduled.\n"); qla2x00_rst_aen(base_vha); clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); + ql_dbg(ql_dbg_dpc, base_vha, 0x400c, + "Reset marker end.\n"); } /* Retry each device up to login retry count */ @@ -3442,19 +3611,18 @@ qla2x00_do_dpc(void *data) !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && atomic_read(&base_vha->loop_state) != LOOP_DOWN) { - DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", - base_vha->host_no)); + ql_dbg(ql_dbg_dpc, base_vha, 0x400d, + "Relogin scheduled.\n"); qla2x00_relogin(base_vha); - - DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", - base_vha->host_no)); + ql_dbg(ql_dbg_dpc, base_vha, 0x400e, + "Relogin end.\n"); } if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags)) { - DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", - base_vha->host_no)); + ql_dbg(ql_dbg_dpc, base_vha, 0x400f, + "Loop resync scheduled.\n"); if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &base_vha->dpc_flags))) { @@ -3465,8 +3633,8 @@ qla2x00_do_dpc(void *data) &base_vha->dpc_flags); } - DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n", - base_vha->host_no)); + ql_dbg(ql_dbg_dpc, base_vha, 0x4010, + "Loop resync end.\n"); } if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && @@ -3489,7 +3657,8 @@ qla2x00_do_dpc(void *data) } /* End of while(1) */ __set_current_state(TASK_RUNNING); - DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); + ql_dbg(ql_dbg_dpc, base_vha, 0x4011, + "DPC handler exiting.\n"); /* * Make sure that nobody tries to wake us up again. @@ -3596,9 +3765,11 @@ void qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp) { if (atomic_read(&sp->ref_count) == 0) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "SP reference-count to ZERO -- sp=%p\n", sp)); - DEBUG2(BUG()); + ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015, + "SP reference-count to ZERO -- sp=%p cmd=%p.\n", + sp, sp->cmd); + if (ql2xextended_error_logging & ql_dbg_io) + BUG(); return; } if (!atomic_dec_and_test(&sp->ref_count)) @@ -3626,6 +3797,9 @@ qla2x00_timer(scsi_qla_host_t *vha) struct req_que *req; if (ha->flags.eeh_busy) { + ql_dbg(ql_dbg_timer, vha, 0x6000, + "EEH = %d, restarting timer.\n", + ha->flags.eeh_busy); qla2x00_restart_timer(vha, WATCH_INTERVAL); return; } @@ -3650,9 +3824,8 @@ qla2x00_timer(scsi_qla_host_t *vha) if (atomic_read(&vha->loop_down_timer) == vha->loop_down_abort_time) { - DEBUG(printk("scsi(%ld): Loop Down - aborting the " - "queues before time expire\n", - vha->host_no)); + ql_log(ql_log_info, vha, 0x6008, + "Loop down - aborting the queues before time expires.\n"); if (!IS_QLA2100(ha) && vha->link_down_timeout) atomic_set(&vha->loop_state, LOOP_DEAD); @@ -3697,10 +3870,7 @@ qla2x00_timer(scsi_qla_host_t *vha) /* if the loop has been down for 4 minutes, reinit adapter */ if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { if (!(vha->device_flags & DFLG_NO_CABLE)) { - DEBUG(printk("scsi(%ld): Loop down - " - "aborting ISP.\n", - vha->host_no)); - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x6009, "Loop down - aborting ISP.\n"); if (IS_QLA82XX(ha)) @@ -3711,9 +3881,9 @@ qla2x00_timer(scsi_qla_host_t *vha) &vha->dpc_flags); } } - DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", - vha->host_no, - atomic_read(&vha->loop_down_timer))); + ql_dbg(ql_dbg_timer, vha, 0x600a, + "Loop down - seconds remaining %d.\n", + atomic_read(&vha->loop_down_timer)); } /* Check if beacon LED needs to be blinked for physical host only */ @@ -3736,8 +3906,27 @@ qla2x00_timer(scsi_qla_host_t *vha) test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || - test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) + test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) { + ql_dbg(ql_dbg_timer, vha, 0x600b, + "isp_abort_needed=%d loop_resync_needed=%d " + "fcport_update_needed=%d start_dpc=%d " + "reset_marker_needed=%d", + test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags), + test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags), + test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags), + start_dpc, + test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); + ql_dbg(ql_dbg_timer, vha, 0x600c, + "beacon_blink_needed=%d isp_unrecoverable=%d " + "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " + "relogin_needed=%d.\n", + test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), + test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), + test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), + test_bit(VP_DPC_NEEDED, &vha->dpc_flags), + test_bit(RELOGIN_NEEDED, &vha->dpc_flags)); qla2xxx_wake_dpc(vha); + } qla2x00_restart_timer(vha, WATCH_INTERVAL); } @@ -3806,8 +3995,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha) goto out; if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { - DEBUG2(printk("scsi(%ld): Failed to load firmware image " - "(%s).\n", vha->host_no, blob->name)); + ql_log(ql_log_warn, vha, 0x0063, + "Failed to load firmware image (%s).\n", blob->name); blob->fw = NULL; blob = NULL; goto out; @@ -3836,8 +4025,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) scsi_qla_host_t *vha = pci_get_drvdata(pdev); struct qla_hw_data *ha = vha->hw; - DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n", - state)); + ql_dbg(ql_dbg_aer, vha, 0x9000, + "PCI error detected, state %x.\n", state); switch (state) { case pci_channel_io_normal: @@ -3850,9 +4039,9 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) ha->flags.isp82xx_fw_hung = 1; if (ha->flags.mbox_busy) { ha->flags.mbox_int = 1; - DEBUG2(qla_printk(KERN_ERR, ha, - "Due to pci channel io frozen, doing premature " - "completion of mbx command\n")); + ql_dbg(ql_dbg_aer, vha, 0x9001, + "Due to pci channel io frozen, doing premature " + "completion of mbx command.\n"); complete(&ha->mbx_intr_comp); } } @@ -3900,8 +4089,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) spin_unlock_irqrestore(&ha->hardware_lock, flags); if (risc_paused) { - qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, " - "Dumping firmware!\n"); + ql_log(ql_log_info, base_vha, 0x9003, + "RISC paused -- mmio_enabled, Dumping firmware.\n"); ha->isp_ops->fw_dump(base_vha, 0); return PCI_ERS_RESULT_NEED_RESET; @@ -3917,8 +4106,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha) int fn; struct pci_dev *other_pdev = NULL; - DEBUG17(qla_printk(KERN_INFO, ha, - "scsi(%ld): In qla82xx_error_recovery\n", base_vha->host_no)); + ql_dbg(ql_dbg_aer, base_vha, 0x9006, + "Entered %s.\n", __func__); set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); @@ -3932,8 +4121,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha) fn = PCI_FUNC(ha->pdev->devfn); while (fn > 0) { fn--; - DEBUG17(qla_printk(KERN_INFO, ha, - "Finding pci device at function = 0x%x\n", fn)); + ql_dbg(ql_dbg_aer, base_vha, 0x9007, + "Finding pci device at function = 0x%x.\n", fn); other_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), @@ -3942,9 +4131,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha) if (!other_pdev) continue; if (atomic_read(&other_pdev->enable_cnt)) { - DEBUG17(qla_printk(KERN_INFO, ha, - "Found PCI func available and enabled at 0x%x\n", - fn)); + ql_dbg(ql_dbg_aer, base_vha, 0x9008, + "Found PCI func available and enable at 0x%x.\n", + fn); pci_dev_put(other_pdev); break; } @@ -3953,8 +4142,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha) if (!fn) { /* Reset owner */ - DEBUG17(qla_printk(KERN_INFO, ha, - "This devfn is reset owner = 0x%x\n", ha->pdev->devfn)); + ql_dbg(ql_dbg_aer, base_vha, 0x9009, + "This devfn is reset owner = 0x%x.\n", + ha->pdev->devfn); qla82xx_idc_lock(ha); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, @@ -3964,8 +4154,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha) QLA82XX_IDC_VERSION); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); - DEBUG17(qla_printk(KERN_INFO, ha, - "drv_active = 0x%x\n", drv_active)); + ql_dbg(ql_dbg_aer, base_vha, 0x900a, + "drv_active = 0x%x.\n", drv_active); qla82xx_idc_unlock(ha); /* Reset if device is not already reset @@ -3978,12 +4168,14 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha) qla82xx_idc_lock(ha); if (rval != QLA_SUCCESS) { - qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); + ql_log(ql_log_info, base_vha, 0x900b, + "HW State: FAILED.\n"); qla82xx_clear_drv_active(ha); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); } else { - qla_printk(KERN_INFO, ha, "HW State: READY\n"); + ql_log(ql_log_info, base_vha, 0x900c, + "HW State: READY.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); qla82xx_idc_unlock(ha); @@ -3996,8 +4188,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha) } qla82xx_idc_unlock(ha); } else { - DEBUG17(qla_printk(KERN_INFO, ha, - "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn)); + ql_dbg(ql_dbg_aer, base_vha, 0x900d, + "This devfn is not reset owner = 0x%x.\n", + ha->pdev->devfn); if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == QLA82XX_DEV_READY)) { ha->flags.isp82xx_fw_hung = 0; @@ -4021,7 +4214,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) struct rsp_que *rsp; int rc, retries = 10; - DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n")); + ql_dbg(ql_dbg_aer, base_vha, 0x9004, + "Slot Reset.\n"); /* Workaround: qla2xxx driver which access hardware earlier * needs error state to be pci_channel_io_online. @@ -4042,7 +4236,7 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) rc = pci_enable_device(pdev); if (rc) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, base_vha, 0x9005, "Can't re-enable PCI device after reset.\n"); goto exit_slot_reset; } @@ -4072,8 +4266,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) exit_slot_reset: - DEBUG17(qla_printk(KERN_WARNING, ha, - "slot_reset-return:ret=%x\n", ret)); + ql_dbg(ql_dbg_aer, base_vha, 0x900e, + "slot_reset return %x.\n", ret); return ret; } @@ -4085,13 +4279,13 @@ qla2xxx_pci_resume(struct pci_dev *pdev) struct qla_hw_data *ha = base_vha->hw; int ret; - DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n")); + ql_dbg(ql_dbg_aer, base_vha, 0x900f, + "pci_resume.\n"); ret = qla2x00_wait_for_hba_online(base_vha); if (ret != QLA_SUCCESS) { - qla_printk(KERN_ERR, ha, - "the device failed to resume I/O " - "from slot/link_reset"); + ql_log(ql_log_fatal, base_vha, 0x9002, + "The device failed to resume I/O from slot/link_reset.\n"); } pci_cleanup_aer_uncorrect_error_status(pdev); @@ -4155,8 +4349,8 @@ qla2x00_module_init(void) srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, SLAB_HWCACHE_ALIGN, NULL); if (srb_cachep == NULL) { - printk(KERN_ERR - "qla2xxx: Unable to allocate SRB cache...Failing load!\n"); + ql_log(ql_log_fatal, NULL, 0x0001, + "Unable to allocate SRB cache...Failing load!.\n"); return -ENOMEM; } @@ -4169,13 +4363,15 @@ qla2x00_module_init(void) fc_attach_transport(&qla2xxx_transport_functions); if (!qla2xxx_transport_template) { kmem_cache_destroy(srb_cachep); + ql_log(ql_log_fatal, NULL, 0x0002, + "fc_attach_transport failed...Failing load!.\n"); return -ENODEV; } apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); if (apidev_major < 0) { - printk(KERN_WARNING "qla2xxx: Unable to register char device " - "%s\n", QLA2XXX_APIDEV); + ql_log(ql_log_fatal, NULL, 0x0003, + "Unable to register char device %s.\n", QLA2XXX_APIDEV); } qla2xxx_transport_vport_template = @@ -4183,16 +4379,21 @@ qla2x00_module_init(void) if (!qla2xxx_transport_vport_template) { kmem_cache_destroy(srb_cachep); fc_release_transport(qla2xxx_transport_template); + ql_log(ql_log_fatal, NULL, 0x0004, + "fc_attach_transport vport failed...Failing load!.\n"); return -ENODEV; } - - printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n", + ql_log(ql_log_info, NULL, 0x0005, + "QLogic Fibre Channel HBA Driver: %s.\n", qla2x00_version_str); ret = pci_register_driver(&qla2xxx_pci_driver); if (ret) { kmem_cache_destroy(srb_cachep); fc_release_transport(qla2xxx_transport_template); fc_release_transport(qla2xxx_transport_vport_template); + ql_log(ql_log_fatal, NULL, 0x0006, + "pci_register_driver failed...ret=%d Failing load!.\n", + ret); } return ret; } diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 693647661ed..eff13563c82 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -189,6 +189,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data) uint16_t word; uint32_t nv_cmd, wait_cnt; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla2x00_nv_write(ha, NVR_DATA_OUT); qla2x00_nv_write(ha, 0); @@ -220,8 +221,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data) wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { - DEBUG9_10(qla_printk(KERN_WARNING, ha, - "NVRAM didn't go ready...\n")); + ql_dbg(ql_dbg_user, vha, 0x708d, + "NVRAM didn't go ready...\n"); break; } NVRAM_DELAY(); @@ -308,6 +309,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha) struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; uint32_t word, wait_cnt; uint16_t wprot, wprot_old; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); /* Clear NVRAM write protection. */ ret = QLA_FUNCTION_FAILED; @@ -350,8 +352,8 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha) wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { - DEBUG9_10(qla_printk(KERN_WARNING, ha, - "NVRAM didn't go ready...\n")); + ql_dbg(ql_dbg_user, vha, 0x708e, + "NVRAM didn't go ready...\n"); break; } NVRAM_DELAY(); @@ -371,6 +373,7 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat) { struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; uint32_t word, wait_cnt; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); if (stat != QLA_SUCCESS) return; @@ -409,8 +412,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat) wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { - DEBUG9_10(qla_printk(KERN_WARNING, ha, - "NVRAM didn't go ready...\n")); + ql_dbg(ql_dbg_user, vha, 0x708f, + "NVRAM didn't go ready...\n"); break; } NVRAM_DELAY(); @@ -607,9 +610,10 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) for (chksum = 0; cnt; cnt--) chksum += le16_to_cpu(*wptr++); if (chksum) { - qla_printk(KERN_ERR, ha, + ql_log(ql_log_fatal, vha, 0x0045, "Inconsistent FLTL detected: checksum=0x%x.\n", chksum); - qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location)); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e, + buf, sizeof(struct qla_flt_location)); return QLA_FUNCTION_FAILED; } @@ -618,7 +622,9 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) *start = (le16_to_cpu(fltl->start_hi) << 16 | le16_to_cpu(fltl->start_lo)) >> 2; end: - DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start)); + ql_dbg(ql_dbg_init, vha, 0x0046, + "FLTL[%s] = 0x%x.\n", + loc, *start); return QLA_SUCCESS; } @@ -685,10 +691,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) if (*wptr == __constant_cpu_to_le16(0xffff)) goto no_flash_data; if (flt->version != __constant_cpu_to_le16(1)) { - DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: " - "version=0x%x length=0x%x checksum=0x%x.\n", + ql_log(ql_log_warn, vha, 0x0047, + "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n", le16_to_cpu(flt->version), le16_to_cpu(flt->length), - le16_to_cpu(flt->checksum))); + le16_to_cpu(flt->checksum)); goto no_flash_data; } @@ -696,10 +702,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) for (chksum = 0; cnt; cnt--) chksum += le16_to_cpu(*wptr++); if (chksum) { - DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: " - "version=0x%x length=0x%x checksum=0x%x.\n", + ql_log(ql_log_fatal, vha, 0x0048, + "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n", le16_to_cpu(flt->version), le16_to_cpu(flt->length), - chksum)); + le16_to_cpu(flt->checksum)); goto no_flash_data; } @@ -708,10 +714,11 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) for ( ; cnt; cnt--, region++) { /* Store addresses as DWORD offsets. */ start = le32_to_cpu(region->start) >> 2; - - DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x " - "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start, - le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size))); + ql_dbg(ql_dbg_init, vha, 0x0049, + "FLT[%02x]: start=0x%x " + "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), + start, le32_to_cpu(region->end) >> 2, + le32_to_cpu(region->size)); switch (le32_to_cpu(region->code) & 0xff) { case FLT_REG_FW: @@ -796,12 +803,16 @@ no_flash_data: ha->flt_region_npiv_conf = ha->flags.port0 ? def_npiv_conf0[def] : def_npiv_conf1[def]; done: - DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " - "vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x " - "npiv=0x%x. fcp_prio_cfg=0x%x\n", loc, ha->flt_region_boot, - ha->flt_region_fw, ha->flt_region_vpd_nvram, ha->flt_region_vpd, - ha->flt_region_nvram, ha->flt_region_fdt, ha->flt_region_flt, - ha->flt_region_npiv_conf, ha->flt_region_fcp_prio)); + ql_dbg(ql_dbg_init, vha, 0x004a, + "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x.\n", + loc, ha->flt_region_boot, + ha->flt_region_fw, ha->flt_region_vpd_nvram, + ha->flt_region_vpd); + ql_dbg(ql_dbg_init, vha, 0x004b, + "nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n", + ha->flt_region_nvram, + ha->flt_region_fdt, ha->flt_region_flt, + ha->flt_region_npiv_conf, ha->flt_region_fcp_prio); } static void @@ -833,10 +844,12 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha) cnt++) chksum += le16_to_cpu(*wptr++); if (chksum) { - DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FDT detected: " - "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0], - le16_to_cpu(fdt->version))); - DEBUG9(qla2x00_dump_buffer((uint8_t *)fdt, sizeof(*fdt))); + ql_dbg(ql_dbg_init, vha, 0x004c, + "Inconsistent FDT detected:" + " checksum=0x%x id=%c version0x%x.\n", chksum, + fdt->sig[0], le16_to_cpu(fdt->version)); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113, + (uint8_t *)fdt, sizeof(*fdt)); goto no_flash_data; } @@ -890,11 +903,12 @@ no_flash_data: break; } done: - DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x " - "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid, + ql_dbg(ql_dbg_init, vha, 0x004d, + "FDT[%x]: (0x%x/0x%x) erase=0x%x " + "pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid, ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, - ha->fdt_unprotect_sec_cmd, ha->fdt_wrt_disable, - ha->fdt_block_size)); + ha->fdt_wrt_disable, ha->fdt_block_size); + } static void @@ -919,6 +933,10 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha) ha->nx_dev_init_timeout = le32_to_cpu(*wptr++); ha->nx_reset_timeout = le32_to_cpu(*wptr); } + ql_dbg(ql_dbg_init, vha, 0x004e, + "nx_dev_init_timeout=%d " + "nx_reset_timeout=%d.\n", ha->nx_dev_init_timeout, + ha->nx_reset_timeout); return; } @@ -963,17 +981,18 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) if (hdr.version == __constant_cpu_to_le16(0xffff)) return; if (hdr.version != __constant_cpu_to_le16(1)) { - DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config " + ql_dbg(ql_dbg_user, vha, 0x7090, + "Unsupported NPIV-Config " "detected: version=0x%x entries=0x%x checksum=0x%x.\n", le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), - le16_to_cpu(hdr.checksum))); + le16_to_cpu(hdr.checksum)); return; } data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL); if (!data) { - DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to " - "allocate memory.\n")); + ql_log(ql_log_warn, vha, 0x7091, + "Unable to allocate memory for data.\n"); return; } @@ -985,10 +1004,11 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) for (wptr = data, chksum = 0; cnt; cnt--) chksum += le16_to_cpu(*wptr++); if (chksum) { - DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config " + ql_dbg(ql_dbg_user, vha, 0x7092, + "Inconsistent NPIV-Config " "detected: version=0x%x entries=0x%x checksum=0x%x.\n", le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), - chksum)); + le16_to_cpu(hdr.checksum)); goto done; } @@ -1014,21 +1034,22 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) vid.port_name = wwn_to_u64(entry->port_name); vid.node_name = wwn_to_u64(entry->node_name); - DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx " - "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, - (unsigned long long)vid.port_name, - (unsigned long long)vid.node_name, - le16_to_cpu(entry->vf_id), - entry->q_qos, entry->f_qos)); + ql_dbg(ql_dbg_user, vha, 0x7093, + "NPIV[%02x]: wwpn=%llx " + "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, + (unsigned long long)vid.port_name, + (unsigned long long)vid.node_name, + le16_to_cpu(entry->vf_id), + entry->q_qos, entry->f_qos); if (i < QLA_PRECONFIG_VPORTS) { vport = fc_vport_create(vha->host, 0, &vid); if (!vport) - qla_printk(KERN_INFO, ha, - "NPIV-Config: Failed to create vport [%02x]: " - "wwpn=%llx wwnn=%llx.\n", cnt, - (unsigned long long)vid.port_name, - (unsigned long long)vid.node_name); + ql_log(ql_log_warn, vha, 0x7094, + "NPIV-Config Failed to create vport [%02x]: " + "wwpn=%llx wwnn=%llx.\n", cnt, + (unsigned long long)vid.port_name, + (unsigned long long)vid.node_name); } } done: @@ -1127,9 +1148,10 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, &optrom_dma, GFP_KERNEL); if (!optrom) { - qla_printk(KERN_DEBUG, ha, - "Unable to allocate memory for optrom burst write " - "(%x KB).\n", OPTROM_BURST_SIZE / 1024); + ql_log(ql_log_warn, vha, 0x7095, + "Unable to allocate " + "memory for optrom burst write (%x KB).\n", + OPTROM_BURST_SIZE / 1024); } } @@ -1138,7 +1160,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, ret = qla24xx_unprotect_flash(vha); if (ret != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x7096, "Unable to unprotect flash for update.\n"); goto done; } @@ -1156,9 +1178,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, 0xff0000) | ((fdata >> 16) & 0xff)); ret = qla24xx_erase_sector(vha, fdata); if (ret != QLA_SUCCESS) { - DEBUG9(qla_printk(KERN_WARNING, ha, - "Unable to erase sector: address=%x.\n", - faddr)); + ql_dbg(ql_dbg_user, vha, 0x7007, + "Unable to erase erase sector: address=%x.\n", + faddr); break; } } @@ -1172,12 +1194,12 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, flash_data_addr(ha, faddr), OPTROM_BURST_DWORDS); if (ret != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x7097, "Unable to burst-write optrom segment " "(%x/%x/%llx).\n", ret, flash_data_addr(ha, faddr), (unsigned long long)optrom_dma); - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x7098, "Reverting to slow-write.\n"); dma_free_coherent(&ha->pdev->dev, @@ -1194,9 +1216,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, ret = qla24xx_write_flash_dword(ha, flash_data_addr(ha, faddr), cpu_to_le32(*dwptr)); if (ret != QLA_SUCCESS) { - DEBUG9(printk("%s(%ld) Unable to program flash " - "address=%x data=%x.\n", __func__, - vha->host_no, faddr, *dwptr)); + ql_dbg(ql_dbg_user, vha, 0x7006, + "Unable to program flash address=%x data=%x.\n", + faddr, *dwptr); break; } @@ -1211,7 +1233,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, ret = qla24xx_protect_flash(vha); if (ret != QLA_SUCCESS) - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x7099, "Unable to protect flash after update.\n"); done: if (optrom) @@ -1324,9 +1346,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, ret = qla24xx_write_flash_dword(ha, nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr)); if (ret != QLA_SUCCESS) { - DEBUG9(qla_printk(KERN_WARNING, ha, + ql_dbg(ql_dbg_user, vha, 0x709a, "Unable to program nvram address=%x data=%x.\n", - naddr, *dwptr)); + naddr, *dwptr); break; } } @@ -1476,7 +1498,7 @@ qla2x00_beacon_on(struct scsi_qla_host *vha) ha->fw_options[1] |= FO1_DISABLE_GPIO6_7; if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x709b, "Unable to update fw options (beacon on).\n"); return QLA_FUNCTION_FAILED; } @@ -1541,7 +1563,7 @@ qla2x00_beacon_off(struct scsi_qla_host *vha) rval = qla2x00_set_fw_options(vha, ha->fw_options); if (rval != QLA_SUCCESS) - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x709c, "Unable to update fw options (beacon off).\n"); return rval; } @@ -1616,7 +1638,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha) if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x7009, "Unable to update fw options (beacon on).\n"); return QLA_FUNCTION_FAILED; } @@ -1670,14 +1692,14 @@ qla24xx_beacon_off(struct scsi_qla_host *vha) ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Unable to update fw options (beacon off).\n"); + ql_log(ql_log_warn, vha, 0x704d, + "Unable to update fw options (beacon on).\n"); return QLA_FUNCTION_FAILED; } if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Unable to get fw options (beacon off).\n"); + ql_log(ql_log_warn, vha, 0x704e, + "Unable to update fw options (beacon on).\n"); return QLA_FUNCTION_FAILED; } @@ -2389,10 +2411,9 @@ try_fast: optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, &optrom_dma, GFP_KERNEL); if (!optrom) { - qla_printk(KERN_DEBUG, ha, - "Unable to allocate memory for optrom burst read " - "(%x KB).\n", OPTROM_BURST_SIZE / 1024); - + ql_log(ql_log_warn, vha, 0x00cc, + "Unable to allocate memory for optrom burst read (%x KB).\n", + OPTROM_BURST_SIZE / 1024); goto slow_read; } @@ -2407,12 +2428,11 @@ try_fast: rval = qla2x00_dump_ram(vha, optrom_dma, flash_data_addr(ha, faddr), burst); if (rval) { - qla_printk(KERN_WARNING, ha, - "Unable to burst-read optrom segment " - "(%x/%x/%llx).\n", rval, - flash_data_addr(ha, faddr), + ql_log(ql_log_warn, vha, 0x00f5, + "Unable to burst-read optrom segment (%x/%x/%llx).\n", + rval, flash_data_addr(ha, faddr), (unsigned long long)optrom_dma); - qla_printk(KERN_WARNING, ha, + ql_log(ql_log_warn, vha, 0x00f6, "Reverting to slow-read.\n"); dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, @@ -2556,8 +2576,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf) if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 || qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) { /* No signature */ - DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM " - "signature.\n")); + ql_log(ql_log_fatal, vha, 0x0050, + "No matching ROM signature.\n"); ret = QLA_FUNCTION_FAILED; break; } @@ -2573,8 +2593,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf) qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' || qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') { /* Incorrect header. */ - DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not " - "found pcir_adr=%x.\n", pcids)); + ql_log(ql_log_fatal, vha, 0x0051, + "PCI data struct not found pcir_adr=%x.\n", pcids); ret = QLA_FUNCTION_FAILED; break; } @@ -2588,8 +2608,9 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf) qla2x00_read_flash_byte(ha, pcids + 0x12); ha->bios_revision[1] = qla2x00_read_flash_byte(ha, pcids + 0x13); - DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n", - ha->bios_revision[1], ha->bios_revision[0])); + ql_dbg(ql_dbg_init, vha, 0x0052, + "Read BIOS %d.%d.\n", + ha->bios_revision[1], ha->bios_revision[0]); break; case ROM_CODE_TYPE_FCODE: /* Open Firmware standard for PCI (FCode). */ @@ -2602,12 +2623,14 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf) qla2x00_read_flash_byte(ha, pcids + 0x12); ha->efi_revision[1] = qla2x00_read_flash_byte(ha, pcids + 0x13); - DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n", - ha->efi_revision[1], ha->efi_revision[0])); + ql_dbg(ql_dbg_init, vha, 0x0053, + "Read EFI %d.%d.\n", + ha->efi_revision[1], ha->efi_revision[0]); break; default: - DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code " - "type %x at pcids %x.\n", code_type, pcids)); + ql_log(ql_log_warn, vha, 0x0054, + "Unrecognized code type %x at pcids %x.\n", + code_type, pcids); break; } @@ -2627,21 +2650,28 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf) qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10, 8); - DEBUG3(qla_printk(KERN_DEBUG, ha, "dumping fw ver from " - "flash:\n")); - DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8)); + ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010a, + "Dumping fw " + "ver from flash:.\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b, + (uint8_t *)dbyte, 8); if ((dcode[0] == 0xffff && dcode[1] == 0xffff && dcode[2] == 0xffff && dcode[3] == 0xffff) || (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && dcode[3] == 0)) { - DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw " - "revision at %x.\n", ha->flt_region_fw * 4)); + ql_log(ql_log_warn, vha, 0x0057, + "Unrecognized fw revision at %x.\n", + ha->flt_region_fw * 4); } else { /* values are in big endian */ ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3]; ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5]; + ql_dbg(ql_dbg_init, vha, 0x0058, + "FW Version: " + "%d.%d.%d.\n", ha->fw_revision[0], + ha->fw_revision[1], ha->fw_revision[2]); } } @@ -2683,8 +2713,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) bcode = mbuf + (pcihdr % 4); if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) { /* No signature */ - DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM " - "signature.\n")); + ql_log(ql_log_fatal, vha, 0x0059, + "No matching ROM signature.\n"); ret = QLA_FUNCTION_FAILED; break; } @@ -2699,8 +2729,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || bcode[0x2] != 'I' || bcode[0x3] != 'R') { /* Incorrect header. */ - DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not " - "found pcir_adr=%x.\n", pcids)); + ql_log(ql_log_fatal, vha, 0x005a, + "PCI data struct not found pcir_adr=%x.\n", pcids); ret = QLA_FUNCTION_FAILED; break; } @@ -2712,26 +2742,30 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) /* Intel x86, PC-AT compatible. */ ha->bios_revision[0] = bcode[0x12]; ha->bios_revision[1] = bcode[0x13]; - DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n", - ha->bios_revision[1], ha->bios_revision[0])); + ql_dbg(ql_dbg_init, vha, 0x005b, + "Read BIOS %d.%d.\n", + ha->bios_revision[1], ha->bios_revision[0]); break; case ROM_CODE_TYPE_FCODE: /* Open Firmware standard for PCI (FCode). */ ha->fcode_revision[0] = bcode[0x12]; ha->fcode_revision[1] = bcode[0x13]; - DEBUG3(qla_printk(KERN_DEBUG, ha, "read FCODE %d.%d.\n", - ha->fcode_revision[1], ha->fcode_revision[0])); + ql_dbg(ql_dbg_init, vha, 0x005c, + "Read FCODE %d.%d.\n", + ha->fcode_revision[1], ha->fcode_revision[0]); break; case ROM_CODE_TYPE_EFI: /* Extensible Firmware Interface (EFI). */ ha->efi_revision[0] = bcode[0x12]; ha->efi_revision[1] = bcode[0x13]; - DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n", - ha->efi_revision[1], ha->efi_revision[0])); + ql_dbg(ql_dbg_init, vha, 0x005d, + "Read EFI %d.%d.\n", + ha->efi_revision[1], ha->efi_revision[0]); break; default: - DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code " - "type %x at pcids %x.\n", code_type, pcids)); + ql_log(ql_log_warn, vha, 0x005e, + "Unrecognized code type %x at pcids %x.\n", + code_type, pcids); break; } @@ -2753,13 +2787,18 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && dcode[3] == 0)) { - DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw " - "revision at %x.\n", ha->flt_region_fw * 4)); + ql_log(ql_log_warn, vha, 0x005f, + "Unrecognized fw revision at %x.\n", + ha->flt_region_fw * 4); } else { ha->fw_revision[0] = dcode[0]; ha->fw_revision[1] = dcode[1]; ha->fw_revision[2] = dcode[2]; ha->fw_revision[3] = dcode[3]; + ql_dbg(ql_dbg_init, vha, 0x0060, + "Firmware revision %d.%d.%d.%d.\n", + ha->fw_revision[0], ha->fw_revision[1], + ha->fw_revision[2], ha->fw_revision[3]); } /* Check for golden firmware and get version if available */ @@ -2775,9 +2814,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF && dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) { - DEBUG2(qla_printk(KERN_INFO, ha, - "%s(%ld): Unrecognized golden fw at 0x%x.\n", - __func__, vha->host_no, ha->flt_region_gold_fw * 4)); + ql_log(ql_log_warn, vha, 0x0056, + "Unrecognized golden fw at 0x%x.\n", + ha->flt_region_gold_fw * 4); return ret; } @@ -2843,9 +2882,9 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha) if (!ha->fcp_prio_cfg) { ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); if (!ha->fcp_prio_cfg) { - qla_printk(KERN_WARNING, ha, - "Unable to allocate memory for fcp priority data " - "(%x).\n", FCP_PRIO_CFG_SIZE); + ql_log(ql_log_warn, vha, 0x00d5, + "Unable to allocate memory for fcp priorty data (%x).\n", + FCP_PRIO_CFG_SIZE); return QLA_FUNCTION_FAILED; } } @@ -2857,7 +2896,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha) ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg, fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE); - if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 0)) + if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0)) goto fail; /* read remaining FCP CMD config data from flash */ @@ -2869,7 +2908,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha) fcp_prio_addr << 2, (len < max_len ? len : max_len)); /* revalidate the entire FCP priority config data, including entries */ - if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 1)) + if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) goto fail; ha->flags.fcp_prio_enabled = 1; diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 82e9e5c0476..cf8dfab9489 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -197,6 +197,7 @@ static struct { {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"IBM", "2105", NULL, BLIST_RETRY_HWERROR}, {"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN}, + {"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN}, {"IOMEGA", "Io20S *F", NULL, BLIST_KEY}, {"INSITE", "Floptical F*8I", NULL, BLIST_KEY}, {"INSITE", "I325VM", NULL, BLIST_KEY}, @@ -243,6 +244,7 @@ static struct { {"Tornado-", "F4", "*", BLIST_NOREPORTLUN}, {"TOSHIBA", "CDROM", NULL, BLIST_ISROM}, {"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM}, + {"Traxdata", "CDR4120", NULL, BLIST_NOLUN}, /* locks up */ {"USB2.0", "SMARTMEDIA/XD", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, {"WangDAT", "Model 2600", "01.7", BLIST_SELECT_NO_ATN}, {"WangDAT", "Model 3200", "02.2", BLIST_SELECT_NO_ATN}, diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ec1803a4872..fc3f168decb 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -137,6 +137,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) host->host_blocked = host->max_host_blocked; break; case SCSI_MLQUEUE_DEVICE_BUSY: + case SCSI_MLQUEUE_EH_RETRY: device->device_blocked = device->max_device_blocked; break; case SCSI_MLQUEUE_TARGET_BUSY: @@ -213,6 +214,8 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int ret = DRIVER_ERROR << 24; req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); + if (!req) + return ret; if (bufflen && blk_rq_map_kern(sdev->request_queue, req, buffer, bufflen, __GFP_WAIT)) diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index d70e91ae60a..d82a023a901 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -144,9 +144,9 @@ int scsi_autopm_get_device(struct scsi_device *sdev) int err; err = pm_runtime_get_sync(&sdev->sdev_gendev); - if (err < 0) + if (err < 0 && err !=-EACCES) pm_runtime_put_sync(&sdev->sdev_gendev); - else if (err > 0) + else err = 0; return err; } @@ -173,9 +173,9 @@ int scsi_autopm_get_host(struct Scsi_Host *shost) int err; err = pm_runtime_get_sync(&shost->shost_gendev); - if (err < 0) + if (err < 0 && err !=-EACCES) pm_runtime_put_sync(&shost->shost_gendev); - else if (err > 0) + else err = 0; return err; } diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index 8a172d4f456..5fbeadd9681 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c @@ -597,6 +597,28 @@ static DEVICE_ATTR(signalling, S_IRUGO, show_spi_host_signalling, store_spi_host_signalling); +static ssize_t show_spi_host_width(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = transport_class_to_shost(cdev); + + return sprintf(buf, "%s\n", shost->max_id == 16 ? "wide" : "narrow"); +} +static DEVICE_ATTR(host_width, S_IRUGO, + show_spi_host_width, NULL); + +static ssize_t show_spi_host_hba_id(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = transport_class_to_shost(cdev); + + return sprintf(buf, "%d\n", shost->this_id); +} +static DEVICE_ATTR(hba_id, S_IRUGO, + show_spi_host_hba_id, NULL); + #define DV_SET(x, y) \ if(i->f->set_##x) \ i->f->set_##x(sdev->sdev_target, y) @@ -1380,6 +1402,8 @@ static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class, static struct attribute *host_attributes[] = { &dev_attr_signalling.attr, + &dev_attr_host_width.attr, + &dev_attr_hba_id.attr, NULL }; diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index eb7a3e85304..eba183c428c 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -160,6 +160,10 @@ static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev, return NULL; } +/* For device slot and array device slot elements, byte 3 bit 6 + * is "fault sensed" while byte 3 bit 5 is "fault reqstd". As this + * code stands these bits are shifted 4 positions right so in + * sysfs they will appear as bits 2 and 1 respectively. Strange. */ static void ses_get_fault(struct enclosure_device *edev, struct enclosure_component *ecomp) { @@ -181,7 +185,7 @@ static int ses_set_fault(struct enclosure_device *edev, /* zero is disabled */ break; case ENCLOSURE_SETTING_ENABLED: - desc[2] = 0x02; + desc[3] = 0x20; break; default: /* SES doesn't do the SGPIO blink settings */ diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 4778e270716..5fc97d2ba2f 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -221,14 +221,33 @@ static unsigned int sr_check_events(struct cdrom_device_info *cdi, return 0; events = sr_get_events(cd->device); + cd->get_event_changed |= events & DISK_EVENT_MEDIA_CHANGE; + + /* + * If earlier GET_EVENT_STATUS_NOTIFICATION and TUR did not agree + * for several times in a row. We rely on TUR only for this likely + * broken device, to prevent generating incorrect media changed + * events for every open(). + */ + if (cd->ignore_get_event) { + events &= ~DISK_EVENT_MEDIA_CHANGE; + goto do_tur; + } + /* * GET_EVENT_STATUS_NOTIFICATION is enough unless MEDIA_CHANGE * is being cleared. Note that there are devices which hang * if asked to execute TUR repeatedly. */ - if (!(clearing & DISK_EVENT_MEDIA_CHANGE)) - goto skip_tur; + if (cd->device->changed) { + events |= DISK_EVENT_MEDIA_CHANGE; + cd->device->changed = 0; + cd->tur_changed = true; + } + if (!(clearing & DISK_EVENT_MEDIA_CHANGE)) + return events; +do_tur: /* let's see whether the media is there with TUR */ last_present = cd->media_present; ret = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); @@ -242,12 +261,31 @@ static unsigned int sr_check_events(struct cdrom_device_info *cdi, (scsi_sense_valid(&sshdr) && sshdr.asc != 0x3a); if (last_present != cd->media_present) - events |= DISK_EVENT_MEDIA_CHANGE; -skip_tur: + cd->device->changed = 1; + if (cd->device->changed) { events |= DISK_EVENT_MEDIA_CHANGE; cd->device->changed = 0; + cd->tur_changed = true; + } + + if (cd->ignore_get_event) + return events; + + /* check whether GET_EVENT is reporting spurious MEDIA_CHANGE */ + if (!cd->tur_changed) { + if (cd->get_event_changed) { + if (cd->tur_mismatch++ > 8) { + sdev_printk(KERN_WARNING, cd->device, + "GET_EVENT and TUR disagree continuously, suppress GET_EVENT events\n"); + cd->ignore_get_event = true; + } + } else { + cd->tur_mismatch = 0; + } } + cd->tur_changed = false; + cd->get_event_changed = false; return events; } diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h index e036f1dc83c..37c8f6b1751 100644 --- a/drivers/scsi/sr.h +++ b/drivers/scsi/sr.h @@ -41,6 +41,13 @@ typedef struct scsi_cd { unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */ unsigned readcd_cdda:1; /* reading audio data using READ_CD */ unsigned media_present:1; /* media is present */ + + /* GET_EVENT spurious event handling, blk layer guarantees exclusion */ + int tur_mismatch; /* nr of get_event TUR mismatches */ + bool tur_changed:1; /* changed according to TUR */ + bool get_event_changed:1; /* changed according to GET_EVENT */ + bool ignore_get_event:1; /* GET_EVENT is unreliable, use TUR */ + struct cdrom_device_info cdi; /* We hold gendisk and scsi_device references on probe and use * the refs on this kref to decide when to release them */ diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c index 07eaef1c722..7e12a2e4e0a 100644 --- a/drivers/scsi/sun3_NCR5380.c +++ b/drivers/scsi/sun3_NCR5380.c @@ -49,13 +49,6 @@ * inside the execution of NCR5380_intr(), leading to recursive * calls. * - * - I've added a function merge_contiguous_buffers() that tries to - * merge scatter-gather buffers that are located at contiguous - * physical addresses and can be processed with the same DMA setup. - * Since most scatter-gather operations work on a page (4K) of - * 4 buffers (1K), in more than 90% of all cases three interrupts and - * DMA setup actions are saved. - * * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA * and USLEEP, because these were messing up readability and will never be * needed for Atari SCSI. @@ -266,8 +259,9 @@ static struct scsi_host_template *the_template = NULL; (struct NCR5380_hostdata *)(in)->hostdata #define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) -#define NEXT(cmd) (*(struct scsi_cmnd **)&((cmd)->host_scribble)) -#define NEXTADDR(cmd) ((struct scsi_cmnd **)&((cmd)->host_scribble)) +#define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble) +#define SET_NEXT(cmd, next) ((cmd)->host_scribble = (void *)(next)) +#define NEXTADDR(cmd) ((struct scsi_cmnd **)&((cmd)->host_scribble)) #define HOSTNO instance->host_no #define H_NO(cmd) (cmd)->device->host->host_no @@ -459,47 +453,6 @@ static void free_all_tags( void ) /* - * Function: void merge_contiguous_buffers(struct scsi_cmnd *cmd) - * - * Purpose: Try to merge several scatter-gather requests into one DMA - * transfer. This is possible if the scatter buffers lie on - * physical contiguous addresses. - * - * Parameters: struct scsi_cmnd *cmd - * The command to work on. The first scatter buffer's data are - * assumed to be already transferred into ptr/this_residual. - */ - -static void merge_contiguous_buffers(struct scsi_cmnd *cmd) -{ - unsigned long endaddr; -#if (NDEBUG & NDEBUG_MERGING) - unsigned long oldlen = cmd->SCp.this_residual; - int cnt = 1; -#endif - - for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; - cmd->SCp.buffers_residual && - virt_to_phys(SGADDR(&(cmd->SCp.buffer[1]))) == endaddr; ) { - - MER_PRINTK("VTOP(%p) == %08lx -> merging\n", - SGADDR(&(cmd->SCp.buffer[1])), endaddr); -#if (NDEBUG & NDEBUG_MERGING) - ++cnt; -#endif - ++cmd->SCp.buffer; - --cmd->SCp.buffers_residual; - cmd->SCp.this_residual += cmd->SCp.buffer->length; - endaddr += cmd->SCp.buffer->length; - } -#if (NDEBUG & NDEBUG_MERGING) - if (oldlen != cmd->SCp.this_residual) - MER_PRINTK("merged %d buffers from %p, new length %08x\n", - cnt, cmd->SCp.ptr, cmd->SCp.this_residual); -#endif -} - -/* * Function : void initialize_SCp(struct scsi_cmnd *cmd) * * Purpose : initialize the saved data pointers for cmd to point to the @@ -520,11 +473,6 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd) cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); cmd->SCp.this_residual = cmd->SCp.buffer->length; - - /* ++roman: Try to merge some scatter-buffers if they are at - * contiguous physical addresses. - */ -// merge_contiguous_buffers( cmd ); } else { cmd->SCp.buffer = NULL; cmd->SCp.buffers_residual = 0; @@ -841,7 +789,7 @@ static char *lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, char *pos, char *buffer, * */ -static int NCR5380_init (struct Scsi_Host *instance, int flags) +static int __init NCR5380_init(struct Scsi_Host *instance, int flags) { int i; SETUP_HOSTDATA(instance); @@ -889,6 +837,11 @@ static int NCR5380_init (struct Scsi_Host *instance, int flags) return 0; } +static void NCR5380_exit(struct Scsi_Host *instance) +{ + /* Empty, as we didn't schedule any delayed work */ +} + /* * Function : int NCR5380_queue_command (struct scsi_cmnd *cmd, * void (*done)(struct scsi_cmnd *)) @@ -962,7 +915,7 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, * in a queue */ - NEXT(cmd) = NULL; + SET_NEXT(cmd, NULL); cmd->scsi_done = done; cmd->result = 0; @@ -990,14 +943,14 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, */ if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { LIST(cmd, hostdata->issue_queue); - NEXT(cmd) = hostdata->issue_queue; + SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = cmd; } else { for (tmp = (struct scsi_cmnd *)hostdata->issue_queue; NEXT(tmp); tmp = NEXT(tmp)) ; LIST(cmd, tmp); - NEXT(tmp) = cmd; + SET_NEXT(tmp, cmd); } local_irq_restore(flags); @@ -1105,12 +1058,12 @@ static void NCR5380_main (struct work_struct *bl) local_irq_disable(); if (prev) { REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); - NEXT(prev) = NEXT(tmp); + SET_NEXT(prev, NEXT(tmp)); } else { REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp)); hostdata->issue_queue = NEXT(tmp); } - NEXT(tmp) = NULL; + SET_NEXT(tmp, NULL); /* reenable interrupts after finding one */ local_irq_restore(flags); @@ -1144,7 +1097,7 @@ static void NCR5380_main (struct work_struct *bl) } else { local_irq_disable(); LIST(tmp, hostdata->issue_queue); - NEXT(tmp) = hostdata->issue_queue; + SET_NEXT(tmp, hostdata->issue_queue); hostdata->issue_queue = tmp; #ifdef SUPPORT_TAGS cmd_free_tag( tmp ); @@ -1439,7 +1392,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, local_irq_restore(flags); /* Wait for arbitration logic to complete */ -#if NCR_TIMEOUT +#ifdef NCR_TIMEOUT { unsigned long timeout = jiffies + 2*NCR_TIMEOUT; @@ -2070,11 +2023,6 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = SGADDR(cmd->SCp.buffer); - - /* ++roman: Try to merge some scatter-buffers if - * they are at contiguous physical addresses. - */ -// merge_contiguous_buffers( cmd ); INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", HOSTNO, cmd->SCp.this_residual, cmd->SCp.buffers_residual); @@ -2274,7 +2222,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) local_irq_save(flags); LIST(cmd,hostdata->issue_queue); - NEXT(cmd) = hostdata->issue_queue; + SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = (struct scsi_cmnd *) cmd; local_irq_restore(flags); QU_PRINTK("scsi%d: REQUEST SENSE added to head of " @@ -2330,7 +2278,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) local_irq_save(flags); cmd->device->disconnect = 1; LIST(cmd,hostdata->disconnected_queue); - NEXT(cmd) = hostdata->disconnected_queue; + SET_NEXT(cmd, hostdata->disconnected_queue); hostdata->connected = NULL; hostdata->disconnected_queue = cmd; local_irq_restore(flags); @@ -2589,12 +2537,12 @@ static void NCR5380_reselect (struct Scsi_Host *instance) ) { if (prev) { REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); - NEXT(prev) = NEXT(tmp); + SET_NEXT(prev, NEXT(tmp)); } else { REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp)); hostdata->disconnected_queue = NEXT(tmp); } - NEXT(tmp) = NULL; + SET_NEXT(tmp, NULL); break; } } @@ -2762,7 +2710,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); (*prev) = NEXT(tmp); - NEXT(tmp) = NULL; + SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; local_irq_restore(flags); ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", @@ -2835,7 +2783,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); *prev = NEXT(tmp); - NEXT(tmp) = NULL; + SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; /* We must unlock the tag/LUN immediately here, since the * target goes to BUS FREE and doesn't send us another @@ -2943,7 +2891,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) for (i = 0; (cmd = disconnected_queue); ++i) { disconnected_queue = NEXT(cmd); - NEXT(cmd) = NULL; + SET_NEXT(cmd, NULL); cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); cmd->scsi_done( cmd ); } diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 613f5880d13..baf7328de95 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -70,6 +70,12 @@ #include <asm/idprom.h> #include <asm/machines.h> +#define NDEBUG 0 + +#define NDEBUG_ABORT 0x00100000 +#define NDEBUG_TAGS 0x00200000 +#define NDEBUG_MERGING 0x00400000 + /* dma on! */ #define REAL_DMA @@ -86,8 +92,6 @@ static void NCR5380_print(struct Scsi_Host *instance); /*#define RESET_BOOT */ #define DRIVER_SETUP -#define NDEBUG 0 - /* * BUG can be used to trigger a strange code-size related hang on 2.1 kernels */ @@ -195,7 +199,7 @@ static struct Scsi_Host *default_instance; * */ -int sun3scsi_detect(struct scsi_host_template * tpnt) +int __init sun3scsi_detect(struct scsi_host_template * tpnt) { unsigned long ioaddr; static int called = 0; @@ -314,6 +318,7 @@ int sun3scsi_release (struct Scsi_Host *shpnt) iounmap((void *)sun3_scsi_regp); + NCR5380_exit(shpnt); return 0; } diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c index 7c526b8e30a..fbba78e5722 100644 --- a/drivers/scsi/sun3_scsi_vme.c +++ b/drivers/scsi/sun3_scsi_vme.c @@ -39,6 +39,12 @@ /* dma on! */ #define REAL_DMA +#define NDEBUG 0 + +#define NDEBUG_ABORT 0x00100000 +#define NDEBUG_TAGS 0x00200000 +#define NDEBUG_MERGING 0x00400000 + #include "scsi.h" #include "initio.h" #include <scsi/scsi_host.h> @@ -50,8 +56,6 @@ extern int sun3_map_test(unsigned long, char *); /*#define RESET_BOOT */ #define DRIVER_SETUP -#define NDEBUG 0 - /* * BUG can be used to trigger a strange code-size related hang on 2.1 kernels */ @@ -137,7 +141,7 @@ static struct Scsi_Host *default_instance; * */ -static int sun3scsi_detect(struct scsi_host_template * tpnt) +static int __init sun3scsi_detect(struct scsi_host_template * tpnt) { unsigned long ioaddr, irq = 0; static int called = 0; @@ -283,6 +287,7 @@ int sun3scsi_release (struct Scsi_Host *shpnt) iounmap((void *)sun3_scsi_regp); + NCR5380_exit(shpnt); return 0; } |