diff options
Diffstat (limited to 'drivers/net/ethernet/chelsio')
21 files changed, 3472 insertions, 1933 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 4c587938900..babe2a915b0 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -301,7 +301,7 @@ unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, struct sched_port *p = &s->p[port]; unsigned int max_avail_segs; - pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed); + pr_debug("%s mtu=%d speed=%d\n", __func__, mtu, speed); if (speed) p->speed = speed; if (mtu) @@ -1025,7 +1025,7 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold"); /** * get_packet - return the next ingress packet buffer - * @pdev: the PCI device that received the packet + * @adapter: the adapter that received the packet * @fl: the SGE free list holding the packet * @len: the actual packet length, excluding any SGE padding * @@ -1037,14 +1037,15 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold"); * threshold and the packet is too big to copy, or (b) the packet should * be copied but there is no memory for the copy. */ -static inline struct sk_buff *get_packet(struct pci_dev *pdev, +static inline struct sk_buff *get_packet(struct adapter *adapter, struct freelQ *fl, unsigned int len) { - struct sk_buff *skb; const struct freelQ_ce *ce = &fl->centries[fl->cidx]; + struct pci_dev *pdev = adapter->pdev; + struct sk_buff *skb; if (len < copybreak) { - skb = netdev_alloc_skb_ip_align(NULL, len); + skb = napi_alloc_skb(&adapter->napi, len); if (!skb) goto use_orig_buf; @@ -1357,7 +1358,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) struct sge_port_stats *st; struct net_device *dev; - skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad); + skb = get_packet(adapter, fl, len - sge->rx_pkt_pad); if (unlikely(!skb)) { sge->stats.rx_drops++; return; diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index 1df65c915b9..b8528077599 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o +cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 3c481b26074..a18d33fdb27 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -50,13 +50,13 @@ #include "cxgb4_uld.h" #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x0B -#define T4FW_VERSION_MICRO 0x1B +#define T4FW_VERSION_MINOR 0x0C +#define T4FW_VERSION_MICRO 0x19 #define T4FW_VERSION_BUILD 0x00 #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x0B -#define T5FW_VERSION_MICRO 0x1B +#define T5FW_VERSION_MINOR 0x0C +#define T5FW_VERSION_MICRO 0x19 #define T5FW_VERSION_BUILD 0x00 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) @@ -222,6 +222,12 @@ struct tp_err_stats { u32 ofldCongDefer; }; +struct sge_params { + u32 hps; /* host page size for our PF/VF */ + u32 eq_qpp; /* egress queues/page for our PF/VF */ + u32 iq_qpp; /* egress queues/page for our PF/VF */ +}; + struct tp_params { unsigned int ntxchan; /* # of Tx channels */ unsigned int tre; /* log2 of core clocks per TP tick */ @@ -285,6 +291,7 @@ enum chip_type { }; struct adapter_params { + struct sge_params sge; struct tp_params tp; struct vpd_params vpd; struct pci_params pci; @@ -318,10 +325,10 @@ struct adapter_params { #include "t4fw_api.h" #define FW_VERSION(chip) ( \ - FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \ - FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \ - FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \ - FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD)) + FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \ + FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \ + FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \ + FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD)) #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) struct fw_info { @@ -354,7 +361,7 @@ struct link_config { unsigned char link_ok; /* link up? */ }; -#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) +#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) enum { MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ @@ -431,7 +438,8 @@ struct sge_fl { /* SGE free-buffer queue state */ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ __be64 *desc; /* address of HW Rx descriptor ring */ dma_addr_t addr; /* bus address of HW ring start */ - u64 udb; /* BAR2 offset of User Doorbell area */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ }; /* A packet gather list */ @@ -461,7 +469,8 @@ struct sge_rspq { /* state for an SGE response queue */ u16 abs_id; /* absolute SGE id for the response q */ __be64 *desc; /* address of HW response ring */ dma_addr_t phys_addr; /* physical address of the ring */ - u64 udb; /* BAR2 offset of User Doorbell area */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ unsigned int iqe_len; /* entry size */ unsigned int size; /* capacity of response queue */ struct adapter *adap; @@ -519,7 +528,8 @@ struct sge_txq { int db_disabled; unsigned short db_pidx; unsigned short db_pidx_inc; - u64 udb; /* BAR2 offset of User Doorbell area */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ }; struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ @@ -995,6 +1005,15 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, const u8 *fw_data, unsigned int fw_size, struct fw_hdr *card_fw, enum dev_state state, int *reset); int t4_prep_adapter(struct adapter *adapter); + +enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; +int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid); + +int t4_init_sge_params(struct adapter *adapter); int t4_init_tp_params(struct adapter *adap); int t4_filter_field_shift(const struct adapter *adap, int filter_sel); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); @@ -1085,4 +1104,5 @@ void t4_db_dropped(struct adapter *adapter); int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val); void t4_sge_decode_idma_state(struct adapter *adapter, int state); +void t4_free_mem(void *addr); #endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 4fe33606f37..a35d1ec6950 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -243,7 +243,7 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, const struct fw_port_cmd *pcmd) { const union fw_port_dcb *fwdcb = &pcmd->u.dcb; - int port = FW_PORT_CMD_PORTID_GET(be32_to_cpu(pcmd->op_to_portid)); + int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid)); struct net_device *dev = adap->port[port]; struct port_info *pi = netdev_priv(dev); struct port_dcb_info *dcb = &pi->dcb; @@ -256,12 +256,12 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, if (dcb_type == FW_PORT_DCB_TYPE_CONTROL) { enum cxgb4_dcb_state_input input = ((pcmd->u.dcb.control.all_syncd_pkd & - FW_PORT_CMD_ALL_SYNCD) + FW_PORT_CMD_ALL_SYNCD_F) ? CXGB4_DCB_STATE_FW_ALLSYNCED : CXGB4_DCB_STATE_FW_INCOMPLETE); if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) { - dcb_running_version = FW_PORT_CMD_DCB_VERSION_GET( + dcb_running_version = FW_PORT_CMD_DCB_VERSION_G( be16_to_cpu( pcmd->u.dcb.control.dcb_version_to_app_state)); if (dcb_running_version == FW_PORT_DCB_VER_CEE1D01 || @@ -519,7 +519,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); if (pi->dcb.state == CXGB4_DCB_STATE_HOST) - pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY); + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); if (err != FW_PORT_DCB_CFG_SUCCESS) @@ -583,7 +583,7 @@ static void cxgb4_setpgbwgcfg_tx(struct net_device *dev, int pgid, INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); if (pi->dcb.state == CXGB4_DCB_STATE_HOST) - pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY); + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); @@ -623,7 +623,7 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg) INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); if (pi->dcb.state == CXGB4_DCB_STATE_HOST) - pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY); + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); pcmd.u.dcb.pfc.type = FW_PORT_DCB_TYPE_PFC; pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen; @@ -842,7 +842,7 @@ static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, /* write out new app table entry */ INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); if (pi->dcb.state == CXGB4_DCB_STATE_HOST) - pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY); + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID; pcmd.u.dcb.app_priority.protocolid = cpu_to_be16(app_id); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h index 2a6aa88984f..31ce425616c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h @@ -42,12 +42,12 @@ do { \ memset(&(__pcmd), 0, sizeof(__pcmd)); \ (__pcmd).op_to_portid = \ - cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) | \ - FW_CMD_REQUEST | \ - FW_CMD_##__op | \ - FW_PORT_CMD_PORTID(__port)); \ + cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | \ + FW_CMD_REQUEST_F | \ + FW_CMD_##__op##_F | \ + FW_PORT_CMD_PORTID_V(__port)); \ (__pcmd).action_to_len16 = \ - cpu_to_be32(FW_PORT_CMD_ACTION(__action) | \ + cpu_to_be32(FW_PORT_CMD_ACTION_V(__action) | \ FW_LEN16(pcmd)); \ } while (0) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c new file mode 100644 index 00000000000..c98a350d857 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -0,0 +1,158 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/string_helpers.h> +#include <linux/sort.h> + +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4fw_api.h" +#include "cxgb4_debugfs.h" +#include "l2t.h" + +static ssize_t mem_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + loff_t pos = *ppos; + loff_t avail = file_inode(file)->i_size; + unsigned int mem = (uintptr_t)file->private_data & 3; + struct adapter *adap = file->private_data - mem; + __be32 *data; + int ret; + + if (pos < 0) + return -EINVAL; + if (pos >= avail) + return 0; + if (count > avail - pos) + count = avail - pos; + + data = t4_alloc_mem(count); + if (!data) + return -ENOMEM; + + spin_lock(&adap->win0_lock); + ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ); + spin_unlock(&adap->win0_lock); + if (ret) { + t4_free_mem(data); + return ret; + } + ret = copy_to_user(buf, data, count); + + t4_free_mem(data); + if (ret) + return -EFAULT; + + *ppos = pos + count; + return count; +} + +static const struct file_operations mem_debugfs_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mem_read, + .llseek = default_llseek, +}; + +static void add_debugfs_mem(struct adapter *adap, const char *name, + unsigned int idx, unsigned int size_mb) +{ + struct dentry *de; + + de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, + (void *)adap + idx, &mem_debugfs_fops); + if (de && de->d_inode) + de->d_inode->i_size = size_mb << 20; +} + +/* Add an array of Debug FS files. + */ +void add_debugfs_files(struct adapter *adap, + struct t4_debugfs_entry *files, + unsigned int nfiles) +{ + int i; + + /* debugfs support is best effort */ + for (i = 0; i < nfiles; i++) + debugfs_create_file(files[i].name, files[i].mode, + adap->debugfs_root, + (void *)adap + files[i].data, + files[i].ops); +} + +int t4_setup_debugfs(struct adapter *adap) +{ + int i; + u32 size; + + static struct t4_debugfs_entry t4_debugfs_files[] = { + { "l2t", &t4_l2t_fops, S_IRUSR, 0}, + }; + + add_debugfs_files(adap, + t4_debugfs_files, + ARRAY_SIZE(t4_debugfs_files)); + + i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (i & EDRAM0_ENABLE_F) { + size = t4_read_reg(adap, MA_EDRAM0_BAR_A); + add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM0_SIZE_G(size)); + } + if (i & EDRAM1_ENABLE_F) { + size = t4_read_reg(adap, MA_EDRAM1_BAR_A); + add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size)); + } + if (is_t4(adap->params.chip)) { + size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); + if (i & EXT_MEM_ENABLE_F) + add_debugfs_mem(adap, "mc", MEM_MC, + EXT_MEM_SIZE_G(size)); + } else { + if (i & EXT_MEM0_ENABLE_F) { + size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); + add_debugfs_mem(adap, "mc0", MEM_MC0, + EXT_MEM0_SIZE_G(size)); + } + if (i & EXT_MEM1_ENABLE_F) { + size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + add_debugfs_mem(adap, "mc1", MEM_MC1, + EXT_MEM1_SIZE_G(size)); + } + } + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h new file mode 100644 index 00000000000..a3d8867efd3 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h @@ -0,0 +1,52 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_DEBUGFS_H +#define __CXGB4_DEBUGFS_H + +#include <linux/export.h> + +struct t4_debugfs_entry { + const char *name; + const struct file_operations *ops; + mode_t mode; + unsigned char data; +}; + +int t4_setup_debugfs(struct adapter *adap); +void add_debugfs_files(struct adapter *adap, + struct t4_debugfs_entry *files, + unsigned int nfiles); + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 279873cb6e3..973dbb7938c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -61,6 +61,7 @@ #include <net/neighbour.h> #include <net/netevent.h> #include <net/addrconf.h> +#include <net/bonding.h> #include <asm/uaccess.h> #include "cxgb4.h" @@ -68,10 +69,9 @@ #include "t4_msg.h" #include "t4fw_api.h" #include "cxgb4_dcb.h" +#include "cxgb4_debugfs.h" #include "l2t.h" -#include <../drivers/net/bonding/bonding.h> - #ifdef DRV_VERSION #undef DRV_VERSION #endif @@ -141,7 +141,7 @@ static unsigned int pfvfres_pmask(struct adapter *adapter, * Give PF's access to all of the ports. */ if (vf == 0) - return FW_PFVF_CMD_PMASK_MASK; + return FW_PFVF_CMD_PMASK_M; /* * For VFs, we'll assign them access to the ports based purely on the @@ -210,114 +210,25 @@ struct filter_entry { NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) -#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) } - -static const struct pci_device_id cxgb4_pci_tbl[] = { - CH_DEVICE(0xa000, 0), /* PE10K */ - CH_DEVICE(0x4001, -1), - CH_DEVICE(0x4002, -1), - CH_DEVICE(0x4003, -1), - CH_DEVICE(0x4004, -1), - CH_DEVICE(0x4005, -1), - CH_DEVICE(0x4006, -1), - CH_DEVICE(0x4007, -1), - CH_DEVICE(0x4008, -1), - CH_DEVICE(0x4009, -1), - CH_DEVICE(0x400a, -1), - CH_DEVICE(0x400d, -1), - CH_DEVICE(0x400e, -1), - CH_DEVICE(0x4080, -1), - CH_DEVICE(0x4081, -1), - CH_DEVICE(0x4082, -1), - CH_DEVICE(0x4083, -1), - CH_DEVICE(0x4084, -1), - CH_DEVICE(0x4085, -1), - CH_DEVICE(0x4086, -1), - CH_DEVICE(0x4087, -1), - CH_DEVICE(0x4088, -1), - CH_DEVICE(0x4401, 4), - CH_DEVICE(0x4402, 4), - CH_DEVICE(0x4403, 4), - CH_DEVICE(0x4404, 4), - CH_DEVICE(0x4405, 4), - CH_DEVICE(0x4406, 4), - CH_DEVICE(0x4407, 4), - CH_DEVICE(0x4408, 4), - CH_DEVICE(0x4409, 4), - CH_DEVICE(0x440a, 4), - CH_DEVICE(0x440d, 4), - CH_DEVICE(0x440e, 4), - CH_DEVICE(0x4480, 4), - CH_DEVICE(0x4481, 4), - CH_DEVICE(0x4482, 4), - CH_DEVICE(0x4483, 4), - CH_DEVICE(0x4484, 4), - CH_DEVICE(0x4485, 4), - CH_DEVICE(0x4486, 4), - CH_DEVICE(0x4487, 4), - CH_DEVICE(0x4488, 4), - CH_DEVICE(0x5001, 4), - CH_DEVICE(0x5002, 4), - CH_DEVICE(0x5003, 4), - CH_DEVICE(0x5004, 4), - CH_DEVICE(0x5005, 4), - CH_DEVICE(0x5006, 4), - CH_DEVICE(0x5007, 4), - CH_DEVICE(0x5008, 4), - CH_DEVICE(0x5009, 4), - CH_DEVICE(0x500A, 4), - CH_DEVICE(0x500B, 4), - CH_DEVICE(0x500C, 4), - CH_DEVICE(0x500D, 4), - CH_DEVICE(0x500E, 4), - CH_DEVICE(0x500F, 4), - CH_DEVICE(0x5010, 4), - CH_DEVICE(0x5011, 4), - CH_DEVICE(0x5012, 4), - CH_DEVICE(0x5013, 4), - CH_DEVICE(0x5014, 4), - CH_DEVICE(0x5015, 4), - CH_DEVICE(0x5080, 4), - CH_DEVICE(0x5081, 4), - CH_DEVICE(0x5082, 4), - CH_DEVICE(0x5083, 4), - CH_DEVICE(0x5084, 4), - CH_DEVICE(0x5085, 4), - CH_DEVICE(0x5086, 4), - CH_DEVICE(0x5087, 4), - CH_DEVICE(0x5088, 4), - CH_DEVICE(0x5401, 4), - CH_DEVICE(0x5402, 4), - CH_DEVICE(0x5403, 4), - CH_DEVICE(0x5404, 4), - CH_DEVICE(0x5405, 4), - CH_DEVICE(0x5406, 4), - CH_DEVICE(0x5407, 4), - CH_DEVICE(0x5408, 4), - CH_DEVICE(0x5409, 4), - CH_DEVICE(0x540A, 4), - CH_DEVICE(0x540B, 4), - CH_DEVICE(0x540C, 4), - CH_DEVICE(0x540D, 4), - CH_DEVICE(0x540E, 4), - CH_DEVICE(0x540F, 4), - CH_DEVICE(0x5410, 4), - CH_DEVICE(0x5411, 4), - CH_DEVICE(0x5412, 4), - CH_DEVICE(0x5413, 4), - CH_DEVICE(0x5414, 4), - CH_DEVICE(0x5415, 4), - CH_DEVICE(0x5480, 4), - CH_DEVICE(0x5481, 4), - CH_DEVICE(0x5482, 4), - CH_DEVICE(0x5483, 4), - CH_DEVICE(0x5484, 4), - CH_DEVICE(0x5485, 4), - CH_DEVICE(0x5486, 4), - CH_DEVICE(0x5487, 4), - CH_DEVICE(0x5488, 4), - { 0, } -}; +/* Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static struct pci_device_id cxgb4_pci_tbl[] = { +#define CH_PCI_DEVICE_ID_FUNCTION 0x4 + +/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is + * called for both. + */ +#define CH_PCI_DEVICE_ID_FUNCTION2 0x0 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + {PCI_VDEVICE(CHELSIO, (devid)), 4} + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \ + { 0, } \ + } + +#include "t4_pci_id_tbl.h" #define FW4_FNAME "cxgb4/t4fw.bin" #define FW5_FNAME "cxgb4/t5fw.bin" @@ -512,9 +423,10 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) u32 name, value; int err; - name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) | - FW_PARAMS_PARAM_YZ(txq->q.cntxt_id)); + name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V( + FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) | + FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); value = enable ? i : 0xffffffff; /* Since we can be called while atomic (from "interrupt @@ -709,7 +621,7 @@ EXPORT_SYMBOL(cxgb4_dcb_enabled); /* Handle a Data Center Bridging update message from the firmware. */ static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) { - int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid)); + int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid)); struct net_device *dev = adap->port[port]; int old_dcb_enabled = cxgb4_dcb_enabled(dev); int new_dcb_enabled; @@ -832,17 +744,17 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, #ifdef CONFIG_CHELSIO_T4_DCB const struct fw_port_cmd *pcmd = (const void *)p->data; - unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid)); + unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid)); unsigned int action = - FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16)); + FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16)); if (cmd == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { - int port = FW_PORT_CMD_PORTID_GET( + int port = FW_PORT_CMD_PORTID_G( be32_to_cpu(pcmd->op_to_portid)); struct net_device *dev = q->adap->port[port]; int state_input = ((pcmd->u.info.dcbxdis_pkd & - FW_PORT_CMD_DCBXDIS) + FW_PORT_CMD_DCBXDIS_F) ? CXGB4_DCB_INPUT_FW_DISABLED : CXGB4_DCB_INPUT_FW_ENABLED); @@ -1287,7 +1199,7 @@ void *t4_alloc_mem(size_t size) /* * Free memory allocated through alloc_mem(). */ -static void t4_free_mem(void *addr) +void t4_free_mem(void *addr) { if (is_vmalloc_addr(addr)) vfree(addr); @@ -1339,52 +1251,52 @@ static int set_filter_wr(struct adapter *adapter, int fidx) * filter specification structure but for now it's easiest to simply * put this fairly direct code in line ... */ - fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR)); - fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16)); + fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR)); + fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16)); fwr->tid_to_iq = - htonl(V_FW_FILTER_WR_TID(ftid) | - V_FW_FILTER_WR_RQTYPE(f->fs.type) | - V_FW_FILTER_WR_NOREPLY(0) | - V_FW_FILTER_WR_IQ(f->fs.iq)); + htonl(FW_FILTER_WR_TID_V(ftid) | + FW_FILTER_WR_RQTYPE_V(f->fs.type) | + FW_FILTER_WR_NOREPLY_V(0) | + FW_FILTER_WR_IQ_V(f->fs.iq)); fwr->del_filter_to_l2tix = - htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | - V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | - V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | - V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | - V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | - V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | - V_FW_FILTER_WR_DMAC(f->fs.newdmac) | - V_FW_FILTER_WR_SMAC(f->fs.newsmac) | - V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || + htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) | + FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) | + FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) | + FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) | + FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) | + FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) | + FW_FILTER_WR_DMAC_V(f->fs.newdmac) | + FW_FILTER_WR_SMAC_V(f->fs.newsmac) | + FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) | - V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || + FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE || f->fs.newvlan == VLAN_REWRITE) | - V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | - V_FW_FILTER_WR_TXCHAN(f->fs.eport) | - V_FW_FILTER_WR_PRIO(f->fs.prio) | - V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); + FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) | + FW_FILTER_WR_TXCHAN_V(f->fs.eport) | + FW_FILTER_WR_PRIO_V(f->fs.prio) | + FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0)); fwr->ethtype = htons(f->fs.val.ethtype); fwr->ethtypem = htons(f->fs.mask.ethtype); fwr->frag_to_ovlan_vldm = - (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | - V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | - V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) | - V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) | - V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) | - V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld)); + (FW_FILTER_WR_FRAG_V(f->fs.val.frag) | + FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) | + FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) | + FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) | + FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) | + FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld)); fwr->smac_sel = 0; fwr->rx_chan_rx_rpl_iq = - htons(V_FW_FILTER_WR_RX_CHAN(0) | - V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id)); + htons(FW_FILTER_WR_RX_CHAN_V(0) | + FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id)); fwr->maci_to_matchtypem = - htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | - V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | - V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | - V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | - V_FW_FILTER_WR_PORT(f->fs.val.iport) | - V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | - V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | - V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); + htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) | + FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) | + FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) | + FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) | + FW_FILTER_WR_PORT_V(f->fs.val.iport) | + FW_FILTER_WR_PORTM_V(f->fs.mask.iport) | + FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) | + FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype)); fwr->ptcl = f->fs.val.proto; fwr->ptclm = f->fs.mask.proto; fwr->ttyp = f->fs.val.tos; @@ -1615,14 +1527,14 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) if (adapter->params.fw_vers) snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u.%u, TP %u.%u.%u.%u", - FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers), - FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers), - FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers), - FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers)); + FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); } static void get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -2721,9 +2633,10 @@ static int set_rspq_intr_params(struct sge_rspq *q, new_idx = closest_thres(&adap->sge, cnt); if (q->desc && q->pktcnt_idx != new_idx) { /* the queue has already been created, update it */ - v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | - FW_PARAMS_PARAM_YZ(q->cntxt_id); + v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V( + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | + FW_PARAMS_PARAM_YZ_V(q->cntxt_id); err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v, &new_idx); if (err) @@ -2937,7 +2850,7 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) int ret; const struct firmware *fw; struct adapter *adap = netdev2adap(netdev); - unsigned int mbox = FW_PCIE_FW_MASTER_MASK + 1; + unsigned int mbox = PCIE_FW_MASTER_M + 1; ef->data[sizeof(ef->data) - 1] = '\0'; ret = request_firmware(&fw, ef->data, adap->pdev_dev); @@ -3014,21 +2927,35 @@ static u32 get_rss_table_size(struct net_device *dev) return pi->rss_size; } -static int get_rss_table(struct net_device *dev, u32 *p, u8 *key) +static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc) { const struct port_info *pi = netdev_priv(dev); unsigned int n = pi->rss_size; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!p) + return 0; while (n--) p[n] = pi->rss[n]; return 0; } -static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key) +static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key, + const u8 hfunc) { unsigned int i; struct port_info *pi = netdev_priv(dev); + /* We require at least one supported parameter to be changed and no + * change in any of the unsupported parameters + */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!p) + return 0; + for (i = 0; i < pi->rss_size; i++) pi->rss[i] = p[i]; if (pi->adapter->flags & FULL_INIT_DONE) @@ -3048,45 +2975,45 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, info->data = 0; switch (info->flow_type) { case TCP_V4_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) + if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; case UDP_V4_FLOW: - if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) && - (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) + if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case IPV4_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; case TCP_V6_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) + if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; case UDP_V6_FLOW: - if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) && - (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) + if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) info->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case IPV6_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) info->data = RXH_IP_SRC | RXH_IP_DST; break; } @@ -3131,102 +3058,14 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .flash_device = set_flash, }; -/* - * debugfs support - */ -static ssize_t mem_read(struct file *file, char __user *buf, size_t count, - loff_t *ppos) -{ - loff_t pos = *ppos; - loff_t avail = file_inode(file)->i_size; - unsigned int mem = (uintptr_t)file->private_data & 3; - struct adapter *adap = file->private_data - mem; - __be32 *data; - int ret; - - if (pos < 0) - return -EINVAL; - if (pos >= avail) - return 0; - if (count > avail - pos) - count = avail - pos; - - data = t4_alloc_mem(count); - if (!data) - return -ENOMEM; - - spin_lock(&adap->win0_lock); - ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ); - spin_unlock(&adap->win0_lock); - if (ret) { - t4_free_mem(data); - return ret; - } - ret = copy_to_user(buf, data, count); - - t4_free_mem(data); - if (ret) - return -EFAULT; - - *ppos = pos + count; - return count; -} - -static const struct file_operations mem_debugfs_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = mem_read, - .llseek = default_llseek, -}; - -static void add_debugfs_mem(struct adapter *adap, const char *name, - unsigned int idx, unsigned int size_mb) -{ - struct dentry *de; - - de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, - (void *)adap + idx, &mem_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = size_mb << 20; -} - static int setup_debugfs(struct adapter *adap) { - int i; - u32 size; - if (IS_ERR_OR_NULL(adap->debugfs_root)) return -1; - i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); - if (i & EDRAM0_ENABLE) { - size = t4_read_reg(adap, MA_EDRAM0_BAR); - add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size)); - } - if (i & EDRAM1_ENABLE) { - size = t4_read_reg(adap, MA_EDRAM1_BAR); - add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size)); - } - if (is_t4(adap->params.chip)) { - size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); - if (i & EXT_MEM_ENABLE) - add_debugfs_mem(adap, "mc", MEM_MC, - EXT_MEM_SIZE_GET(size)); - } else { - if (i & EXT_MEM_ENABLE) { - size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); - add_debugfs_mem(adap, "mc0", MEM_MC0, - EXT_MEM_SIZE_GET(size)); - } - if (i & EXT_MEM1_ENABLE) { - size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR); - add_debugfs_mem(adap, "mc1", MEM_MC1, - EXT_MEM_SIZE_GET(size)); - } - } - if (adap->l2t) - debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, - &t4_l2t_fops); +#ifdef CONFIG_DEBUG_FS + t4_setup_debugfs(adap); +#endif return 0; } @@ -3508,9 +3347,9 @@ int cxgb4_clip_get(const struct net_device *dev, adap = netdev2adap(dev); memset(&c, 0, sizeof(c)); - c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); - c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c)); + c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c)); c.ip_hi = *(__be64 *)(lip->s6_addr); c.ip_lo = *(__be64 *)(lip->s6_addr + 8); return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); @@ -3525,9 +3364,9 @@ int cxgb4_clip_release(const struct net_device *dev, adap = netdev2adap(dev); memset(&c, 0, sizeof(c)); - c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | - FW_CMD_REQUEST | FW_CMD_READ); - c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c)); + c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); + c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c)); c.ip_hi = *(__be64 *)(lip->s6_addr); c.ip_lo = *(__be64 *)(lip->s6_addr + 8); return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); @@ -3568,7 +3407,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid, req->local_ip = sip; req->peer_ip = htonl(0); chan = rxq_to_chan(&adap->sge, queue); - req->opt0 = cpu_to_be64(TX_CHAN(chan)); + req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); req->opt1 = cpu_to_be64(CONN_POLICY_ASK | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); ret = t4_mgmt_tx(adap, skb); @@ -3611,7 +3450,7 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, req->peer_ip_hi = cpu_to_be64(0); req->peer_ip_lo = cpu_to_be64(0); chan = rxq_to_chan(&adap->sge, queue); - req->opt0 = cpu_to_be64(TX_CHAN(chan)); + req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); req->opt1 = cpu_to_be64(CONN_POLICY_ASK | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); ret = t4_mgmt_tx(adap, skb); @@ -3893,7 +3732,7 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) { struct adapter *adap; u32 offset, memtype, memaddr; - u32 edc0_size, edc1_size, mc0_size, mc1_size; + u32 edc0_size, edc1_size, mc0_size, mc1_size, size; u32 edc0_end, edc1_end, mc0_end, mc1_end; int ret; @@ -3907,9 +3746,12 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) * and EDC1. Some cards will have neither MC0 nor MC1, most cards have * MC0, and some have both MC0 and MC1. */ - edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20; - edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20; - mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20; + size = t4_read_reg(adap, MA_EDRAM0_BAR_A); + edc0_size = EDRAM0_SIZE_G(size) << 20; + size = t4_read_reg(adap, MA_EDRAM1_BAR_A); + edc1_size = EDRAM1_SIZE_G(size) << 20; + size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); + mc0_size = EXT_MEM0_SIZE_G(size) << 20; edc0_end = edc0_size; edc1_end = edc0_end + edc1_size; @@ -3929,9 +3771,8 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) /* T4 only has a single memory channel */ goto err; } else { - mc1_size = EXT_MEM_SIZE_GET( - t4_read_reg(adap, - MA_EXT_MEMORY1_BAR)) << 20; + size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + mc1_size = EXT_MEM1_SIZE_G(size) << 20; mc1_end = mc0_end + mc1_size; if (offset < mc1_end) { memtype = MEM_MC1; @@ -3968,6 +3809,22 @@ u64 cxgb4_read_sge_timestamp(struct net_device *dev) } EXPORT_SYMBOL(cxgb4_read_sge_timestamp); +int cxgb4_bar2_sge_qregs(struct net_device *dev, + unsigned int qid, + enum cxgb4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid) +{ + return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev), + qid, + (qtype == CXGB4_BAR2_QTYPE_EGRESS + ? T4_BAR2_QTYPE_EGRESS + : T4_BAR2_QTYPE_INGRESS), + pbar2_qoffset, + pbar2_qid); +} +EXPORT_SYMBOL(cxgb4_bar2_sge_qregs); + static struct pci_driver cxgb4_driver; static void check_neigh_update(struct neighbour *neigh) @@ -4150,31 +4007,18 @@ static void process_db_drop(struct work_struct *work) u32 dropped_db = t4_read_reg(adap, 0x010ac); u16 qid = (dropped_db >> 15) & 0x1ffff; u16 pidx_inc = dropped_db & 0x1fff; - unsigned int s_qpp; - unsigned short udb_density; - unsigned long qpshift; - int page; - u32 udb; - - dev_warn(adap->pdev_dev, - "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n", - dropped_db, qid, - (dropped_db >> 14) & 1, - (dropped_db >> 13) & 1, - pidx_inc); + u64 bar2_qoffset; + unsigned int bar2_qid; + int ret; - drain_db_fifo(adap, 1); - - s_qpp = QUEUESPERPAGEPF1 * adap->fn; - udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap, - SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); - qpshift = PAGE_SHIFT - ilog2(udb_density); - udb = qid << qpshift; - udb &= PAGE_MASK; - page = udb / PAGE_SIZE; - udb += (qid - (page * udb_density)) * 128; - - writel(PIDX(pidx_inc), adap->bar2 + udb + 8); + ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, + &bar2_qoffset, &bar2_qid); + if (ret) + dev_err(adap->pdev_dev, "doorbell drop recovery: " + "qid=%d, pidx_inc=%d\n", qid, pidx_inc); + else + writel(PIDX_T5(pidx_inc) | QID(bar2_qid), + adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); /* Re-enable BAR2 WC */ t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); @@ -4232,12 +4076,8 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.adapter_type = adap->params.chip; lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); lli.cclk_ps = 1000000000 / adap->params.vpd.cclk; - lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( - t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> - (adap->fn * 4)); - lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( - t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> - (adap->fn * 4)); + lli.udb_density = 1 << adap->params.sge.eq_qpp; + lli.ucq_density = 1 << adap->params.sge.iq_qpp; lli.filt_mode = adap->params.tp.vlan_pri_map; /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ for (i = 0; i < NCHAN; i++) @@ -4399,8 +4239,7 @@ static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa, if (cxgb4_netdev(event_dev)) { switch (event) { case NETDEV_UP: - ret = cxgb4_clip_get(event_dev, - (const struct in6_addr *)ifa->addr.s6_addr); + ret = cxgb4_clip_get(event_dev, &ifa->addr); if (ret < 0) { rcu_read_unlock(); return ret; @@ -4408,8 +4247,7 @@ static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa, ret = NOTIFY_OK; break; case NETDEV_DOWN: - cxgb4_clip_release(event_dev, - (const struct in6_addr *)ifa->addr.s6_addr); + cxgb4_clip_release(event_dev, &ifa->addr); ret = NOTIFY_OK; break; default: @@ -4478,8 +4316,7 @@ static int update_dev_clip(struct net_device *root_dev, struct net_device *dev) read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { - ret = cxgb4_clip_get(dev, - (const struct in6_addr *)ifa->addr.s6_addr); + ret = cxgb4_clip_get(dev, &ifa->addr); if (ret < 0) break; } @@ -4960,14 +4797,14 @@ static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg) */ memset(&ldst_cmd, 0, sizeof(ldst_cmd)); ldst_cmd.op_to_addrspace = - htonl(FW_CMD_OP(FW_LDST_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ | - FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); + htonl(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE)); ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd)); - ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1); + ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1); ldst_cmd.u.pcie.ctrl_to_fn = - (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn)); + (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn)); ldst_cmd.u.pcie.r = reg; ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); @@ -5054,8 +4891,8 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) /* get device capabilities */ memset(c, 0, sizeof(*c)); - c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_READ); + c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c); if (ret < 0) @@ -5071,16 +4908,16 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) dev_err(adap->pdev_dev, "virtualization ACLs not supported"); return ret; } - c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); + c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL); if (ret < 0) return ret; ret = t4_config_glbl_rss(adap, adap->fn, FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, - FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | - FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F | + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F); if (ret < 0) return ret; @@ -5241,8 +5078,8 @@ static int adap_init0_config(struct adapter *adapter, int reset) if (cf->size >= FLASH_CFG_MAX_SIZE) ret = -ENOMEM; else { - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); ret = t4_query_params(adapter, adapter->mbox, adapter->fn, 0, 1, params, val); if (ret == 0) { @@ -5260,8 +5097,8 @@ static int adap_init0_config(struct adapter *adapter, int reset) size_t size = cf->size & ~0x3; __be32 *data = (__be32 *)cf->data; - mtype = FW_PARAMS_PARAM_Y_GET(val[0]); - maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16; + mtype = FW_PARAMS_PARAM_Y_G(val[0]); + maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16; spin_lock(&adapter->win0_lock); ret = t4_memory_rw(adapter, 0, mtype, maddr, @@ -5298,13 +5135,13 @@ static int adap_init0_config(struct adapter *adapter, int reset) */ memset(&caps_cmd, 0, sizeof(caps_cmd)); caps_cmd.op_to_write = - htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); caps_cmd.cfvalid_to_len16 = - htonl(FW_CAPS_CONFIG_CMD_CFVALID | - FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | - FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | + htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | + FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | + FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); @@ -5318,9 +5155,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) if (ret == -ENOENT) { memset(&caps_cmd, 0, sizeof(caps_cmd)); caps_cmd.op_to_write = - htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); @@ -5343,9 +5180,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) * And now tell the firmware to use the configuration we just loaded. */ caps_cmd.op_to_write = - htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE); + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F); caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), NULL); @@ -5416,8 +5253,8 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) * Get device capabilities and select which we'll be using. */ memset(&caps_cmd, 0, sizeof(caps_cmd)); - caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_READ); + caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); @@ -5433,8 +5270,8 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) dev_err(adapter->pdev_dev, "virtualization ACLs not supported"); goto bye; } - caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); + caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), NULL); if (ret < 0) @@ -5456,10 +5293,10 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) adapter->flags |= RSS_TNLALLLOOKUP; ret = t4_config_glbl_rss(adapter, adapter->mbox, FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, - FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | - FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ | + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F | + FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F | ((adapter->flags & RSS_TNLALLLOOKUP) ? - FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0)); + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F : 0)); if (ret < 0) goto bye; @@ -5470,7 +5307,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) PFRES_NEQ, PFRES_NETHCTRL, PFRES_NIQFLINT, PFRES_NIQ, PFRES_TC, PFRES_NVI, - FW_PFVF_CMD_CMASK_MASK, + FW_PFVF_CMD_CMASK_M, pfvfres_pmask(adapter, adapter->fn, 0), PFRES_NEXACTF, PFRES_R_CAPS, PFRES_WX_CAPS); @@ -5515,7 +5352,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) VFRES_NEQ, VFRES_NETHCTRL, VFRES_NIQFLINT, VFRES_NIQ, VFRES_TC, VFRES_NVI, - FW_PFVF_CMD_CMASK_MASK, + FW_PFVF_CMD_CMASK_M, pfvfres_pmask( adapter, pf, vf), VFRES_NEXACTF, @@ -5687,14 +5524,8 @@ static int adap_init0(struct adapter *adap) struct fw_caps_config_cmd caps_cmd; int reset = 1; - /* - * Contact FW, advertising Master capability (and potentially forcing - * ourselves as the Master PF if our module parameter force_init is - * set). - */ - ret = t4_fw_hello(adap, adap->mbox, adap->fn, - force_init ? MASTER_MUST : MASTER_MAY, - &state); + /* Contact FW, advertising Master capability */ + ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); if (ret < 0) { dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", ret); @@ -5702,8 +5533,6 @@ static int adap_init0(struct adapter *adap) } if (ret == adap->mbox) adap->flags |= MASTER_PF; - if (force_init && state == DEV_STATE_INIT) - state = DEV_STATE_UNINIT; /* * If we're the Master PF Driver and the device is uninitialized, @@ -5779,8 +5608,8 @@ static int adap_init0(struct adapter *adap) * and portvec ... */ v = - FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC); + FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec); if (ret < 0) goto bye; @@ -5802,7 +5631,6 @@ static int adap_init0(struct adapter *adap) } else { dev_info(adap->pdev_dev, "Coming up as MASTER: "\ "Initializing adapter\n"); - /* * If the firmware doesn't support Configuration * Files warn user and exit, @@ -5817,8 +5645,9 @@ static int adap_init0(struct adapter *adap) * Find out whether we're dealing with a version of * the firmware which has configuration file support. */ - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V( + FW_PARAMS_PARAM_DEV_CF)); ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, params, val); @@ -5878,14 +5707,14 @@ static int adap_init0(struct adapter *adap) * Grab some of our basic fundamental operating parameters. */ #define FW_PARAM_DEV(param) \ - (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \ + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param)) #define FW_PARAM_PFVF(param) \ - FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \ - FW_PARAMS_PARAM_Y(0) | \ - FW_PARAMS_PARAM_Z(0) + FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \ + FW_PARAMS_PARAM_Y_V(0) | \ + FW_PARAMS_PARAM_Z_V(0) params[0] = FW_PARAM_PFVF(EQ_START); params[1] = FW_PARAM_PFVF(L2T_START); @@ -5945,8 +5774,8 @@ static int adap_init0(struct adapter *adap) * to manage. */ memset(&caps_cmd, 0, sizeof(caps_cmd)); - caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_READ); + caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); @@ -6092,6 +5921,7 @@ static int adap_init0(struct adapter *adap) t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, adap->params.b_wnd); } + t4_init_sge_params(adap); t4_init_tp_params(adap); adap->flags |= FW_OK; return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 1366ba620c8..152b4c4c780 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -52,10 +52,10 @@ enum { }; #define INIT_TP_WR(w, tid) do { \ - (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \ - FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \ - (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \ - FW_WR_FLOWID(tid)); \ + (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) | \ + FW_WR_IMMDLEN_V(sizeof(*w) - sizeof(w->wr))); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*w), 16)) | \ + FW_WR_FLOWID_V(tid)); \ (w)->wr.wr_lo = cpu_to_be64(0); \ } while (0) @@ -65,9 +65,10 @@ enum { } while (0) #define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \ - (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \ - (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \ - FW_WR_FLOWID(tid)); \ + (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | \ + FW_WR_ATOMIC_V(atomic)); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(wrlen, 16)) | \ + FW_WR_FLOWID_V(tid)); \ (w)->wr.wr_lo = cpu_to_be64(0); \ } while (0) @@ -304,4 +305,11 @@ void cxgb4_enable_db_coalescing(struct net_device *dev); int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte); u64 cxgb4_read_sge_timestamp(struct net_device *dev); +enum cxgb4_bar2_qtype { CXGB4_BAR2_QTYPE_EGRESS, CXGB4_BAR2_QTYPE_INGRESS }; +int cxgb4_bar2_sge_qregs(struct net_device *dev, + unsigned int qid, + enum cxgb4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid); + #endif /* !__CXGB4_OFLD_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 96041397ee1..a047baa9fd0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -435,9 +435,9 @@ u64 cxgb4_select_ntuple(struct net_device *dev, if (tp->vnic_shift >= 0) { u32 viid = cxgb4_port_viid(dev); - u32 vf = FW_VIID_VIN_GET(viid); - u32 pf = FW_VIID_PFN_GET(viid); - u32 vld = FW_VIID_VIVLD_GET(viid); + u32 vf = FW_VIID_VIN_G(viid); + u32 pf = FW_VIID_PFN_G(viid); + u32 vld = FW_VIID_VIVLD_G(viid); ntuple |= (u64)(V_FT_VNID_ID_VF(vf) | V_FT_VNID_ID_PF(pf) | diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 39f2b13e66c..ebf935a1e35 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -527,14 +527,16 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) val |= DBPRIO(1); wmb(); - /* If we're on T4, use the old doorbell mechanism; otherwise - * use the new BAR2 mechanism. + /* If we don't have access to the new User Doorbell (T5+), use + * the old doorbell mechanism; otherwise use the new BAR2 + * mechanism. */ - if (is_t4(adap->params.chip)) { + if (unlikely(q->bar2_addr == NULL)) { t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), val | QID(q->cntxt_id)); } else { - writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL); + writel(val | QID(q->bar2_qid), + q->bar2_addr + SGE_UDB_KDOORBELL); /* This Write memory Barrier will force the write to * the User Doorbell area to be flushed. @@ -576,7 +578,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, __be64 *d = &q->desc[q->pidx]; struct rx_sw_desc *sd = &q->sdesc[q->pidx]; - gfp |= __GFP_NOWARN | __GFP_COLD; + gfp |= __GFP_NOWARN; if (s->fl_pg_order == 0) goto alloc_small_pages; @@ -585,7 +587,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, * Prefer large buffers */ while (n) { - pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order); + pg = __dev_alloc_pages(gfp, s->fl_pg_order); if (unlikely(!pg)) { q->large_alloc_failed++; break; /* fall back to single pages */ @@ -615,7 +617,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, alloc_small_pages: while (n--) { - pg = __skb_alloc_page(gfp, NULL); + pg = __dev_alloc_page(gfp); if (unlikely(!pg)) { q->alloc_failed++; break; @@ -816,7 +818,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, sgl->addr0 = cpu_to_be64(addr[1]); } - sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); + sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); if (likely(--nfrags == 0)) return; /* @@ -850,14 +852,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, *end = 0; } -/* This function copies a tx_desc struct to memory mapped BAR2 space(user space - * writes). For coalesced WR SGE, fetches data from the FIFO instead of from - * Host. +/* This function copies 64 byte coalesced work request to + * memory mapped BAR2 space. For coalesced WR SGE fetches + * data from the FIFO instead of from Host. */ -static void cxgb_pio_copy(u64 __iomem *dst, struct tx_desc *desc) +static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) { - int count = sizeof(*desc) / sizeof(u64); - u64 *src = (u64 *)desc; + int count = 8; while (count) { writeq(*src, dst); @@ -879,7 +880,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) { wmb(); /* write descriptors before telling HW */ - if (is_t4(adap->params.chip)) { + /* If we don't have access to the new User Doorbell (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(q->bar2_addr == NULL)) { u32 val = PIDX(n); unsigned long flags; @@ -905,21 +909,22 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) */ WARN_ON(val & DBPRIO(1)); - /* For T5 and later we use the Write-Combine mapped BAR2 User - * Doorbell mechanism. If we're only writing a single TX - * Descriptor and TX Write Combining hasn't been disabled, we - * can use the Write Combining Gather Buffer; otherwise we use - * the simple doorbell. + /* If we're only writing a single TX Descriptor and we can use + * Inferred QID registers, we can use the Write Combining + * Gather Buffer; otherwise we use the simple doorbell. */ - if (n == 1) { + if (n == 1 && q->bar2_qid == 0) { int index = (q->pidx ? (q->pidx - 1) : (q->size - 1)); + u64 *wr = (u64 *)&q->desc[index]; - cxgb_pio_copy(adap->bar2 + q->udb + SGE_UDB_WCDOORBELL, - q->desc + index); + cxgb_pio_copy((u64 __iomem *) + (q->bar2_addr + SGE_UDB_WCDOORBELL), + wr); } else { - writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL); + writel(val | QID(q->bar2_qid), + q->bar2_addr + SGE_UDB_KDOORBELL); } /* This Write Memory Barrier will force the write to the User @@ -1092,10 +1097,10 @@ out_free: dev_kfree_skb_any(skb); goto out_free; } - wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); + wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); if (unlikely(credits < ETHTXQ_STOP_THRES)) { eth_txq_stop(q); - wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } wr = (void *)&q->q.desc[q->q.pidx]; @@ -1112,8 +1117,8 @@ out_free: dev_kfree_skb_any(skb); int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; len += sizeof(*lso); - wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | - FW_WR_IMMDLEN(len)); + wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN_V(len)); lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | LSO_FIRST_SLICE | LSO_LAST_SLICE | LSO_IPV6(v6) | @@ -1135,8 +1140,8 @@ out_free: dev_kfree_skb_any(skb); q->tx_cso += ssi->gso_segs; } else { len += sizeof(*cpl); - wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | - FW_WR_IMMDLEN(len)); + wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN_V(len)); cpl = (void *)(wr + 1); if (skb->ip_summed == CHECKSUM_PARTIAL) { cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; @@ -1224,7 +1229,7 @@ static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) { reclaim_completed_tx_imm(&q->q); if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { - wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); + wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); q->q.stops++; q->full = 1; } @@ -1406,7 +1411,7 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) { struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data; - wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); + wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); q->q.stops++; q->full = 1; } @@ -1997,11 +2002,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) params = QINTR_TIMER_IDX(7); val = CIDXINC(work_done) | SEINTARM(params); - if (is_t4(q->adap->params.chip)) { + + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(q->bar2_addr == NULL)) { t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), val | INGRESSQID((u32)q->cntxt_id)); } else { - writel(val, q->adap->bar2 + q->udb + SGE_UDB_GTS); + writel(val | INGRESSQID(q->bar2_qid), + q->bar2_addr + SGE_UDB_GTS); wmb(); } return work_done; @@ -2047,11 +2057,16 @@ static unsigned int process_intrq(struct adapter *adap) } val = CIDXINC(credits) | SEINTARM(q->intr_params); - if (is_t4(adap->params.chip)) { + + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(q->bar2_addr == NULL)) { t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), val | INGRESSQID(q->cntxt_id)); } else { - writel(val, adap->bar2 + q->udb + SGE_UDB_GTS); + writel(val | INGRESSQID(q->bar2_qid), + q->bar2_addr + SGE_UDB_GTS); wmb(); } spin_unlock(&adap->sge.intrq_lock); @@ -2235,48 +2250,32 @@ static void sge_tx_timer_cb(unsigned long data) } /** - * udb_address - return the BAR2 User Doorbell address for a Queue - * @adap: the adapter - * @cntxt_id: the Queue Context ID - * @qpp: Queues Per Page (for all PFs) + * bar2_address - return the BAR2 address for an SGE Queue's Registers + * @adapter: the adapter + * @qid: the SGE Queue ID + * @qtype: the SGE Queue Type (Egress or Ingress) + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues * - * Returns the BAR2 address of the user Doorbell associated with the - * indicated Queue Context ID. Note that this is only applicable - * for T5 and later. - */ -static u64 udb_address(struct adapter *adap, unsigned int cntxt_id, - unsigned int qpp) -{ - u64 udb; - unsigned int s_qpp; - unsigned short udb_density; - unsigned long qpshift; - int page; - - BUG_ON(is_t4(adap->params.chip)); - - s_qpp = (QUEUESPERPAGEPF0 + - (QUEUESPERPAGEPF1 - QUEUESPERPAGEPF0) * adap->fn); - udb_density = 1 << ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); - qpshift = PAGE_SHIFT - ilog2(udb_density); - udb = (u64)cntxt_id << qpshift; - udb &= PAGE_MASK; - page = udb / PAGE_SIZE; - udb += (cntxt_id - (page * udb_density)) * SGE_UDB_SIZE; - - return udb; -} + * Returns the BAR2 address for the SGE Queue Registers associated with + * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also + * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE + * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" + * Registers are supported (e.g. the Write Combining Doorbell Buffer). + */ +static void __iomem *bar2_address(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + unsigned int *pbar2_qid) +{ + u64 bar2_qoffset; + int ret; -static u64 udb_address_eq(struct adapter *adap, unsigned int cntxt_id) -{ - return udb_address(adap, cntxt_id, - t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF)); -} + ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype, + &bar2_qoffset, pbar2_qid); + if (ret) + return NULL; -static u64 udb_address_iq(struct adapter *adap, unsigned int cntxt_id) -{ - return udb_address(adap, cntxt_id, - t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF)); + return adapter->bar2 + bar2_qoffset; } int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, @@ -2297,20 +2296,20 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, return -ENOMEM; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) | + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F | FW_LEN16(c)); - c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | - FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) | - FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) | - FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | + FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | + FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) | + FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx : -intr_idx - 1)); - c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) | - FW_IQ_CMD_IQGTSMODE | - FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | - FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); + c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | + FW_IQ_CMD_IQGTSMODE_F | + FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | + FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); c.iqsize = htons(iq->size); c.iqaddr = cpu_to_be64(iq->phys_addr); @@ -2323,12 +2322,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, goto fl_nomem; flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); - c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN(1) | - FW_IQ_CMD_FL0FETCHRO(1) | - FW_IQ_CMD_FL0DATARO(1) | - FW_IQ_CMD_FL0PADEN(1)); - c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) | - FW_IQ_CMD_FL0FBMAX(3)); + c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN_F | + FW_IQ_CMD_FL0FETCHRO_F | + FW_IQ_CMD_FL0DATARO_F | + FW_IQ_CMD_FL0PADEN_F); + c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) | + FW_IQ_CMD_FL0FBMAX_V(3)); c.fl0size = htons(flsz); c.fl0addr = cpu_to_be64(fl->addr); } @@ -2344,8 +2343,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, iq->next_intr_params = iq->intr_params; iq->cntxt_id = ntohs(c.iqid); iq->abs_id = ntohs(c.physiqid); - if (!is_t4(adap->params.chip)) - iq->udb = udb_address_iq(adap, iq->cntxt_id); + iq->bar2_addr = bar2_address(adap, + iq->cntxt_id, + T4_BAR2_QTYPE_INGRESS, + &iq->bar2_qid); iq->size--; /* subtract status entry */ iq->netdev = dev; iq->handler = hnd; @@ -2362,11 +2363,13 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; - /* Note, we must initialize the Free List User Doorbell - * address before refilling the Free List! + /* Note, we must initialize the BAR2 Free List User Doorbell + * information before refilling the Free List! */ - if (!is_t4(adap->params.chip)) - fl->udb = udb_address_eq(adap, fl->cntxt_id); + fl->bar2_addr = bar2_address(adap, + fl->cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &fl->bar2_qid); refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); } return 0; @@ -2392,9 +2395,10 @@ err: static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) { q->cntxt_id = id; - if (!is_t4(adap->params.chip)) - q->udb = udb_address_eq(adap, q->cntxt_id); - + q->bar2_addr = bar2_address(adap, + q->cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &q->bar2_qid); q->in_use = 0; q->cidx = q->pidx = 0; q->stops = q->restarts = 0; @@ -2423,21 +2427,22 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, return -ENOMEM; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | - FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); - c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE | - FW_EQ_ETH_CMD_VIID(pi->viid)); - c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | - FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | - FW_EQ_ETH_CMD_FETCHRO(1) | - FW_EQ_ETH_CMD_IQID(iqid)); - c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) | - FW_EQ_ETH_CMD_FBMAX(3) | - FW_EQ_ETH_CMD_CIDXFTHRESH(5) | - FW_EQ_ETH_CMD_EQSIZE(nentries)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_EQ_ETH_CMD_PFN_V(adap->fn) | + FW_EQ_ETH_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | + FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); + c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F | + FW_EQ_ETH_CMD_VIID_V(pi->viid)); + c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) | + FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | + FW_EQ_ETH_CMD_FETCHRO_V(1) | + FW_EQ_ETH_CMD_IQID_V(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) | + FW_EQ_ETH_CMD_FBMAX_V(3) | + FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) | + FW_EQ_ETH_CMD_EQSIZE_V(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); @@ -2451,7 +2456,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, return ret; } - init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd))); + init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); txq->txq = netdevq; txq->tso = txq->tx_cso = txq->vlan_ins = 0; txq->mapping_err = 0; @@ -2476,22 +2481,22 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, if (!txq->q.desc) return -ENOMEM; - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_EQ_CTRL_CMD_PFN(adap->fn) | - FW_EQ_CTRL_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC | - FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); - c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_EQ_CTRL_CMD_PFN_V(adap->fn) | + FW_EQ_CTRL_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F | + FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid)); c.physeqid_pkd = htonl(0); - c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) | - FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | - FW_EQ_CTRL_CMD_FETCHRO | - FW_EQ_CTRL_CMD_IQID(iqid)); - c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) | - FW_EQ_CTRL_CMD_FBMAX(3) | - FW_EQ_CTRL_CMD_CIDXFTHRESH(5) | - FW_EQ_CTRL_CMD_EQSIZE(nentries)); + c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) | + FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | + FW_EQ_CTRL_CMD_FETCHRO_F | + FW_EQ_CTRL_CMD_IQID_V(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) | + FW_EQ_CTRL_CMD_FBMAX_V(3) | + FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) | + FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); @@ -2503,7 +2508,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, return ret; } - init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid))); + init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); txq->adap = adap; skb_queue_head_init(&txq->sendq); tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); @@ -2530,20 +2535,20 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, return -ENOMEM; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_EQ_OFLD_CMD_PFN(adap->fn) | - FW_EQ_OFLD_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | - FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); - c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) | - FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) | - FW_EQ_OFLD_CMD_FETCHRO(1) | - FW_EQ_OFLD_CMD_IQID(iqid)); - c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) | - FW_EQ_OFLD_CMD_FBMAX(3) | - FW_EQ_OFLD_CMD_CIDXFTHRESH(5) | - FW_EQ_OFLD_CMD_EQSIZE(nentries)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_EQ_OFLD_CMD_PFN_V(adap->fn) | + FW_EQ_OFLD_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | + FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); + c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) | + FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | + FW_EQ_OFLD_CMD_FETCHRO_F | + FW_EQ_OFLD_CMD_IQID_V(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) | + FW_EQ_OFLD_CMD_FBMAX_V(3) | + FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) | + FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); @@ -2557,7 +2562,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, return ret; } - init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd))); + init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); txq->adap = adap; skb_queue_head_init(&txq->sendq); tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 163a2a14948..28d04153f99 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -188,9 +188,9 @@ static void t4_report_fw_error(struct adapter *adap) u32 pcie_fw; pcie_fw = t4_read_reg(adap, MA_PCIE_FW); - if (pcie_fw & FW_PCIE_FW_ERR) + if (pcie_fw & PCIE_FW_ERR) dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", - reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]); + reason[PCIE_FW_EVAL_G(pcie_fw)]); } /* @@ -310,16 +310,17 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, } res = t4_read_reg64(adap, data_reg); - if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { + if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) { fw_asrt(adap, data_reg); - res = FW_CMD_RETVAL(EIO); - } else if (rpl) + res = FW_CMD_RETVAL_V(EIO); + } else if (rpl) { get_mbox_rpl(adap, rpl, size / 8, data_reg); + } - if (FW_CMD_RETVAL_GET((int)res)) + if (FW_CMD_RETVAL_G((int)res)) dump_mbox(adap, mbox, data_reg); t4_write_reg(adap, ctl_reg, 0); - return -FW_CMD_RETVAL_GET((int)res); + return -FW_CMD_RETVAL_G((int)res); } } @@ -483,12 +484,12 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, * MEM_MC0 = 2 -- For T5 * MEM_MC1 = 3 -- For T5 */ - edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)); + edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A)); if (mtype != MEM_MC1) memoffset = (mtype * (edc_size * 1024 * 1024)); else { - mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, - MA_EXT_MEMORY_BAR)); + mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, + MA_EXT_MEMORY1_BAR_A)); memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; } @@ -710,8 +711,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) * Ask firmware for the Core Clock since it knows how to translate the * Reference Clock ('V2') VPD field into a Core Clock value ... */ - cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); + cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); ret = t4_query_params(adapter, adapter->mbox, 0, 0, 1, &cclk_param, &cclk_val); @@ -992,10 +993,10 @@ static int should_install_fs_fw(struct adapter *adap, int card_fw_usable, install: dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, " "installing firmware %u.%u.%u.%u on card.\n", - FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), - FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason, - FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), - FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); + FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason, + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); return 1; } @@ -1067,12 +1068,12 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, "driver compiled with %d.%d.%d.%d, " "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", state, - FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d), - FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d), - FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), - FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), - FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), - FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); + FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d), + FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d), + FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); ret = EINVAL; goto bye; } @@ -1131,6 +1132,27 @@ unsigned int t4_flash_cfg_addr(struct adapter *adapter) return FLASH_CFG_START; } +/* Return TRUE if the specified firmware matches the adapter. I.e. T4 + * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead + * and emit an error message for mismatched firmware to save our caller the + * effort ... + */ +static bool t4_fw_matches_chip(const struct adapter *adap, + const struct fw_hdr *hdr) +{ + /* The expression below will return FALSE for any unsupported adapter + * which will keep us "honest" in the future ... + */ + if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) || + (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5)) + return true; + + dev_err(adap->pdev_dev, + "FW image (%d) is not suitable for this adapter (%d)\n", + hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip)); + return false; +} + /** * t4_load_fw - download firmware * @adap: the adapter @@ -1170,6 +1192,8 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) FW_MAX_SIZE); return -EFBIG; } + if (!t4_fw_matches_chip(adap, hdr)) + return -EINVAL; for (csum = 0, i = 0; i < size / sizeof(csum); i++) csum += ntohl(p[i]); @@ -1212,6 +1236,8 @@ out: if (ret) dev_err(adap->pdev_dev, "firmware download failed, error %d\n", ret); + else + ret = t4_get_fw_version(adap, &adap->params.fw_vers); return ret; } @@ -1236,7 +1262,7 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc) { struct fw_port_cmd c; - unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); + unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO); lc->link_ok = 0; if (lc->requested_fc & PAUSE_RX) @@ -1245,9 +1271,9 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, fc |= FW_PORT_CAP_FC_TX; memset(&c, 0, sizeof(c)); - c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); - c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); if (!(lc->supported & FW_PORT_CAP_ANEG)) { @@ -1275,9 +1301,9 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) struct fw_port_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); - c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); @@ -1563,7 +1589,7 @@ static void cim_intr_handler(struct adapter *adapter) int fat; - if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR) + if (t4_read_reg(adapter, MA_PCIE_FW) & PCIE_FW_ERR) t4_report_fw_error(adapter); fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, @@ -2071,9 +2097,9 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, struct fw_rss_ind_tbl_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE | - FW_RSS_IND_TBL_CMD_VIID(viid)); + cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_RSS_IND_TBL_CMD_VIID_V(viid)); cmd.retval_len16 = htonl(FW_LEN16(cmd)); /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ @@ -2090,13 +2116,13 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, while (nq > 0) { unsigned int v; - v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); + v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp); if (++rsp >= rsp_end) rsp = rspq; - v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); + v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp); if (++rsp >= rsp_end) rsp = rspq; - v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); + v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp); if (++rsp >= rsp_end) rsp = rspq; @@ -2126,14 +2152,14 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, struct fw_rss_glb_config_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); + c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); c.retval_len16 = htonl(FW_LEN16(c)); if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { - c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); + c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode)); } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { c.u.basicvirtual.mode_pkd = - htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); + htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode)); c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); } else return -EINVAL; @@ -2553,18 +2579,18 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) { memset(wr, 0, sizeof(*wr)); - wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR)); - wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16)); - wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) | - V_FW_FILTER_WR_NOREPLY(qid < 0)); - wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER); + wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR)); + wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16)); + wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) | + FW_FILTER_WR_NOREPLY_V(qid < 0)); + wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F); if (qid >= 0) - wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid)); + wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid)); } #define INIT_CMD(var, cmd, rd_wr) do { \ - (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ - FW_CMD_REQUEST | FW_CMD_##rd_wr); \ + (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \ + FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \ (var).retval_len16 = htonl(FW_LEN16(var)); \ } while (0) @@ -2574,9 +2600,9 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); + c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE)); c.cycles_to_len16 = htonl(FW_LEN16(c)); c.u.addrval.addr = htonl(addr); c.u.addrval.val = htonl(val); @@ -2602,11 +2628,11 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | - FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); + c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | + FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO)); c.cycles_to_len16 = htonl(FW_LEN16(c)); - c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | - FW_LDST_CMD_MMD(mmd)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) | + FW_LDST_CMD_MMD_V(mmd)); c.u.mdio.raddr = htons(reg); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); @@ -2632,11 +2658,11 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); + c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO)); c.cycles_to_len16 = htonl(FW_LEN16(c)); - c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | - FW_LDST_CMD_MMD(mmd)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) | + FW_LDST_CMD_MMD_V(mmd)); c.u.mdio.raddr = htons(reg); c.u.mdio.rval = htons(val); @@ -2773,13 +2799,13 @@ retry: memset(&c, 0, sizeof(c)); INIT_CMD(c, HELLO, WRITE); c.err_to_clearinit = htonl( - FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | - FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | - FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : - FW_HELLO_CMD_MBMASTER_MASK) | - FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | - FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) | - FW_HELLO_CMD_CLEARINIT); + FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) | + FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) | + FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox : + FW_HELLO_CMD_MBMASTER_M) | + FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) | + FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) | + FW_HELLO_CMD_CLEARINIT_F); /* * Issue the HELLO command to the firmware. If it's not successful @@ -2792,17 +2818,17 @@ retry: if (ret < 0) { if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) goto retry; - if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR) + if (t4_read_reg(adap, MA_PCIE_FW) & PCIE_FW_ERR) t4_report_fw_error(adap); return ret; } v = ntohl(c.err_to_clearinit); - master_mbox = FW_HELLO_CMD_MBMASTER_GET(v); + master_mbox = FW_HELLO_CMD_MBMASTER_G(v); if (state) { - if (v & FW_HELLO_CMD_ERR) + if (v & FW_HELLO_CMD_ERR_F) *state = DEV_STATE_ERR; - else if (v & FW_HELLO_CMD_INIT) + else if (v & FW_HELLO_CMD_INIT_F) *state = DEV_STATE_INIT; else *state = DEV_STATE_UNINIT; @@ -2817,9 +2843,9 @@ retry: * and we wouldn't want to fail pointlessly. (This can happen when an * OS loads lots of different drivers rapidly at the same time). In * this case, the Master PF returned by the firmware will be - * FW_PCIE_FW_MASTER_MASK so the test below will work ... + * PCIE_FW_MASTER_M so the test below will work ... */ - if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 && + if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 && master_mbox != mbox) { int waiting = FW_CMD_HELLO_TIMEOUT; @@ -2843,7 +2869,7 @@ retry: * our retries ... */ pcie_fw = t4_read_reg(adap, MA_PCIE_FW); - if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) { + if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) { if (waiting <= 0) { if (retries-- > 0) goto retry; @@ -2858,9 +2884,9 @@ retry: * report errors preferentially. */ if (state) { - if (pcie_fw & FW_PCIE_FW_ERR) + if (pcie_fw & PCIE_FW_ERR) *state = DEV_STATE_ERR; - else if (pcie_fw & FW_PCIE_FW_INIT) + else if (pcie_fw & PCIE_FW_INIT) *state = DEV_STATE_INIT; } @@ -2869,9 +2895,9 @@ retry: * there's not a valid Master PF, grab its identity * for our caller. */ - if (master_mbox == FW_PCIE_FW_MASTER_MASK && - (pcie_fw & FW_PCIE_FW_MASTER_VLD)) - master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw); + if (master_mbox == PCIE_FW_MASTER_M && + (pcie_fw & PCIE_FW_MASTER_VLD)) + master_mbox = PCIE_FW_MASTER_G(pcie_fw); break; } } @@ -2939,7 +2965,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) * Issues a RESET command to firmware (if desired) with a HALT indication * and then puts the microprocessor into RESET state. The RESET command * will only be issued if a legitimate mailbox is provided (mbox <= - * FW_PCIE_FW_MASTER_MASK). + * PCIE_FW_MASTER_M). * * This is generally used in order for the host to safely manipulate the * adapter without fear of conflicting with whatever the firmware might @@ -2954,13 +2980,13 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) * If a legitimate mailbox is provided, issue a RESET command * with a HALT indication. */ - if (mbox <= FW_PCIE_FW_MASTER_MASK) { + if (mbox <= PCIE_FW_MASTER_M) { struct fw_reset_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, RESET, WRITE); c.val = htonl(PIORST | PIORSTMODE); - c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U)); + c.halt_pkd = htonl(FW_RESET_CMD_HALT_F); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -2979,8 +3005,8 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) */ if (ret == 0 || force) { t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST); - t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, - FW_PCIE_FW_HALT); + t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, + PCIE_FW_HALT_F); } /* @@ -3019,7 +3045,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) * doing it automatically, we need to clear the PCIE_FW.HALT * bit. */ - t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0); + t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, 0); /* * If we've been given a valid mailbox, first try to get the @@ -3028,7 +3054,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) * valid mailbox or the RESET command failed, fall back to * hitting the chip with a hammer. */ - if (mbox <= FW_PCIE_FW_MASTER_MASK) { + if (mbox <= PCIE_FW_MASTER_M) { t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); msleep(100); if (t4_fw_reset(adap, mbox, @@ -3043,7 +3069,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { - if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT)) + if (!(t4_read_reg(adap, PCIE_FW) & PCIE_FW_HALT_F)) return 0; msleep(100); ms += 100; @@ -3080,6 +3106,9 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; int reset, ret; + if (!t4_fw_matches_chip(adap, fw_hdr)) + return -EINVAL; + ret = t4_fw_halt(adap, mbox, force); if (ret < 0 && !force) return ret; @@ -3250,9 +3279,9 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, return -EINVAL; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | - FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | - FW_PARAMS_CMD_VFN(vf)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F | + FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) | + FW_PARAMS_CMD_VFN_V(vf)); c.retval_len16 = htonl(FW_LEN16(c)); for (i = 0; i < nparams; i++, p += 2) *p = htonl(*params++); @@ -3290,10 +3319,10 @@ int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox, return -EINVAL; memset(&c, 0, sizeof(c)); - c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE | - FW_PARAMS_CMD_PFN(pf) | - FW_PARAMS_CMD_VFN(vf)); + c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_PARAMS_CMD_PFN_V(pf) | + FW_PARAMS_CMD_VFN_V(vf)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); while (nparams--) { @@ -3328,9 +3357,9 @@ int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, return -EINVAL; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | - FW_PARAMS_CMD_VFN(vf)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) | + FW_PARAMS_CMD_VFN_V(vf)); c.retval_len16 = htonl(FW_LEN16(c)); while (nparams--) { *p++ = htonl(*params++); @@ -3370,20 +3399,20 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, struct fw_pfvf_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | - FW_PFVF_CMD_VFN(vf)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) | + FW_PFVF_CMD_VFN_V(vf)); c.retval_len16 = htonl(FW_LEN16(c)); - c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | - FW_PFVF_CMD_NIQ(rxq)); - c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | - FW_PFVF_CMD_PMASK(pmask) | - FW_PFVF_CMD_NEQ(txq)); - c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | - FW_PFVF_CMD_NEXACTF(nexact)); - c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | - FW_PFVF_CMD_WX_CAPS(wxcaps) | - FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); + c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) | + FW_PFVF_CMD_NIQ_V(rxq)); + c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) | + FW_PFVF_CMD_PMASK_V(pmask) | + FW_PFVF_CMD_NEQ_V(txq)); + c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) | + FW_PFVF_CMD_NEXACTF_V(nexact)); + c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) | + FW_PFVF_CMD_WX_CAPS_V(wxcaps) | + FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -3412,11 +3441,11 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, struct fw_vi_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); - c.portid_pkd = FW_VI_CMD_PORTID(port); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c)); + c.portid_pkd = FW_VI_CMD_PORTID_V(port); c.nmac = nmac - 1; ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); @@ -3437,8 +3466,8 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, } } if (rss_size) - *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); - return FW_VI_CMD_VIID_GET(ntohs(c.type_viid)); + *rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd)); + return FW_VI_CMD_VIID_G(ntohs(c.type_viid)); } /** @@ -3465,23 +3494,23 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, if (mtu < 0) mtu = FW_RXMODE_MTU_NO_CHG; if (promisc < 0) - promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; + promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; if (all_multi < 0) - all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; + all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; if (bcast < 0) - bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; + bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; if (vlanex < 0) - vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; + vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid)); c.retval_len16 = htonl(FW_LEN16(c)); - c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | - FW_VI_RXMODE_CMD_PROMISCEN(promisc) | - FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | - FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | - FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); + c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) | + FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | + FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | + FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | + FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } @@ -3522,15 +3551,15 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, return -EINVAL; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | - FW_VI_MAC_CMD_VIID(viid)); - c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | - FW_CMD_LEN16((naddr + 2) / 2)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) | + FW_VI_MAC_CMD_VIID_V(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) | + FW_CMD_LEN16_V((naddr + 2) / 2)); for (i = 0, p = c.u.exact; i < naddr; i++, p++) { - p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); } @@ -3539,7 +3568,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, return ret; for (i = 0, p = c.u.exact; i < naddr; i++, p++) { - u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); + u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx)); if (idx) idx[i] = index >= max_naddr ? 0xffff : index; @@ -3585,17 +3614,17 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); - c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); - p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_SMAC_RESULT(mode) | - FW_VI_MAC_CMD_IDX(idx)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid)); + c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1)); + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_SMAC_RESULT_V(mode) | + FW_VI_MAC_CMD_IDX_V(idx)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) { - ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); + ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx)); if (ret >= max_mac_addr) ret = -ENOMEM; } @@ -3619,11 +3648,11 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, struct fw_vi_mac_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); - c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | - FW_VI_MAC_CMD_HASHUNIEN(ucast) | - FW_CMD_LEN16(1)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F | + FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | + FW_CMD_LEN16_V(1)); c.u.hash.hashvec = cpu_to_be64(vec); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } @@ -3646,12 +3675,12 @@ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, struct fw_vi_enable_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid)); - c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | - FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) | - FW_VI_ENABLE_CMD_DCB_INFO(dcb_en)); + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) | + FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) | + FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en)); return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); } @@ -3686,9 +3715,9 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, struct fw_vi_enable_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); - c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid)); + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c)); c.blinkdur = htons(nblinks); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -3713,11 +3742,11 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, struct fw_iq_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | - FW_IQ_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); - c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) | + FW_IQ_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c)); + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype)); c.iqid = htons(iqid); c.fl0id = htons(fl0id); c.fl1id = htons(fl1id); @@ -3740,11 +3769,11 @@ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, struct fw_eq_eth_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | - FW_EQ_ETH_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); - c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) | + FW_EQ_ETH_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -3764,11 +3793,11 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, struct fw_eq_ctrl_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | - FW_EQ_CTRL_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); - c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) | + FW_EQ_CTRL_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -3788,11 +3817,11 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, struct fw_eq_ofld_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | - FW_EQ_OFLD_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); - c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) | + FW_EQ_OFLD_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -3810,25 +3839,25 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) if (opcode == FW_PORT_CMD) { /* link/module state change message */ int speed = 0, fc = 0; const struct fw_port_cmd *p = (void *)rpl; - int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); + int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid)); int port = adap->chan_map[chan]; struct port_info *pi = adap2pinfo(adap, port); struct link_config *lc = &pi->link_cfg; u32 stat = ntohl(p->u.info.lstatus_to_modtype); - int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; - u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); + int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; + u32 mod = FW_PORT_CMD_MODTYPE_G(stat); - if (stat & FW_PORT_CMD_RXPAUSE) + if (stat & FW_PORT_CMD_RXPAUSE_F) fc |= PAUSE_RX; - if (stat & FW_PORT_CMD_TXPAUSE) + if (stat & FW_PORT_CMD_TXPAUSE_F) fc |= PAUSE_TX; - if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) speed = 100; - else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) speed = 1000; - else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) speed = 10000; - else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) speed = 40000; if (link_ok != lc->link_ok || speed != lc->speed || @@ -4002,6 +4031,126 @@ int t4_prep_adapter(struct adapter *adapter) } /** + * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information + * @adapter: the adapter + * @qid: the Queue ID + * @qtype: the Ingress or Egress type for @qid + * @pbar2_qoffset: BAR2 Queue Offset + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 SGE Queue Registers information associated with the + * indicated Absolute Queue ID. These are passed back in return value + * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue + * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. + * + * This may return an error which indicates that BAR2 SGE Queue + * registers aren't available. If an error is not returned, then the + * following values are returned: + * + * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers + * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid + * + * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which + * require the "Inferred Queue ID" ability may be used. E.g. the + * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, + * then these "Inferred Queue ID" register may not be used. + */ +int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid) +{ + unsigned int page_shift, page_size, qpp_shift, qpp_mask; + u64 bar2_page_offset, bar2_qoffset; + unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; + + /* T4 doesn't support BAR2 SGE Queue registers. + */ + if (is_t4(adapter->params.chip)) + return -EINVAL; + + /* Get our SGE Page Size parameters. + */ + page_shift = adapter->params.sge.hps + 10; + page_size = 1 << page_shift; + + /* Get the right Queues per Page parameters for our Queue. + */ + qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS + ? adapter->params.sge.eq_qpp + : adapter->params.sge.iq_qpp); + qpp_mask = (1 << qpp_shift) - 1; + + /* Calculate the basics of the BAR2 SGE Queue register area: + * o The BAR2 page the Queue registers will be in. + * o The BAR2 Queue ID. + * o The BAR2 Queue ID Offset into the BAR2 page. + */ + bar2_page_offset = ((qid >> qpp_shift) << page_shift); + bar2_qid = qid & qpp_mask; + bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; + + /* If the BAR2 Queue ID Offset is less than the Page Size, then the + * hardware will infer the Absolute Queue ID simply from the writes to + * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a + * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply + * write to the first BAR2 SGE Queue Area within the BAR2 Page with + * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID + * from the BAR2 Page and BAR2 Queue ID. + * + * One important censequence of this is that some BAR2 SGE registers + * have a "Queue ID" field and we can write the BAR2 SGE Queue ID + * there. But other registers synthesize the SGE Queue ID purely + * from the writes to the registers -- the Write Combined Doorbell + * Buffer is a good example. These BAR2 SGE Registers are only + * available for those BAR2 SGE Register areas where the SGE Absolute + * Queue ID can be inferred from simple writes. + */ + bar2_qoffset = bar2_page_offset; + bar2_qinferred = (bar2_qid_offset < page_size); + if (bar2_qinferred) { + bar2_qoffset += bar2_qid_offset; + bar2_qid = 0; + } + + *pbar2_qoffset = bar2_qoffset; + *pbar2_qid = bar2_qid; + return 0; +} + +/** + * t4_init_sge_params - initialize adap->params.sge + * @adapter: the adapter + * + * Initialize various fields of the adapter's SGE Parameters structure. + */ +int t4_init_sge_params(struct adapter *adapter) +{ + struct sge_params *sge_params = &adapter->params.sge; + u32 hps, qpp; + unsigned int s_hps, s_qpp; + + /* Extract the SGE Page Size for our PF. + */ + hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE); + s_hps = (HOSTPAGESIZEPF0_S + + (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn); + sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M); + + /* Extract the SGE Egress and Ingess Queues Per Page for our PF. + */ + s_qpp = (QUEUESPERPAGEPF0_S + + (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn); + qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF); + sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); + qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF); + sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); + + return 0; +} + +/** * t4_init_tp_params - initialize adap->params.tp * @adap: the adapter * @@ -4121,11 +4270,11 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) while ((adap->params.portvec & (1 << j)) == 0) j++; - c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | - FW_CMD_REQUEST | FW_CMD_READ | - FW_PORT_CMD_PORTID(j)); + c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(j)); c.action_to_len16 = htonl( - FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | + FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | FW_LEN16(c)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret) @@ -4143,13 +4292,13 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) adap->port[i]->dev_port = j; ret = ntohl(c.u.info.lstatus_to_modtype); - p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? - FW_PORT_CMD_MDIOADDR_GET(ret) : -1; - p->port_type = FW_PORT_CMD_PTYPE_GET(ret); + p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ? + FW_PORT_CMD_MDIOADDR_G(ret) : -1; + p->port_type = FW_PORT_CMD_PTYPE_G(ret); p->mod_type = FW_PORT_MOD_TYPE_NA; - rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_READ | + rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); rvc.retval_len16 = htonl(FW_LEN16(rvc)); ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 5f4db2398c7..0f89f68948a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -205,16 +205,62 @@ struct work_request_hdr { #define WR_HDR struct work_request_hdr wr /* option 0 fields */ -#define S_MSS_IDX 60 -#define M_MSS_IDX 0xF -#define V_MSS_IDX(x) ((__u64)(x) << S_MSS_IDX) -#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX) +#define TX_CHAN_S 2 +#define TX_CHAN_V(x) ((x) << TX_CHAN_S) + +#define ULP_MODE_S 8 +#define ULP_MODE_V(x) ((x) << ULP_MODE_S) + +#define RCV_BUFSIZ_S 12 +#define RCV_BUFSIZ_M 0x3FFU +#define RCV_BUFSIZ_V(x) ((x) << RCV_BUFSIZ_S) + +#define SMAC_SEL_S 28 +#define SMAC_SEL_V(x) ((__u64)(x) << SMAC_SEL_S) + +#define L2T_IDX_S 36 +#define L2T_IDX_V(x) ((__u64)(x) << L2T_IDX_S) + +#define WND_SCALE_S 50 +#define WND_SCALE_V(x) ((__u64)(x) << WND_SCALE_S) + +#define KEEP_ALIVE_S 54 +#define KEEP_ALIVE_V(x) ((__u64)(x) << KEEP_ALIVE_S) +#define KEEP_ALIVE_F KEEP_ALIVE_V(1ULL) + +#define MSS_IDX_S 60 +#define MSS_IDX_M 0xF +#define MSS_IDX_V(x) ((__u64)(x) << MSS_IDX_S) +#define MSS_IDX_G(x) (((x) >> MSS_IDX_S) & MSS_IDX_M) /* option 2 fields */ -#define S_RSS_QUEUE 0 -#define M_RSS_QUEUE 0x3FF -#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE) -#define G_RSS_QUEUE(x) (((x) >> S_RSS_QUEUE) & M_RSS_QUEUE) +#define RSS_QUEUE_S 0 +#define RSS_QUEUE_M 0x3FF +#define RSS_QUEUE_V(x) ((x) << RSS_QUEUE_S) +#define RSS_QUEUE_G(x) (((x) >> RSS_QUEUE_S) & RSS_QUEUE_M) + +#define RSS_QUEUE_VALID_S 10 +#define RSS_QUEUE_VALID_V(x) ((x) << RSS_QUEUE_VALID_S) +#define RSS_QUEUE_VALID_F RSS_QUEUE_VALID_V(1U) + +#define RX_FC_DISABLE_S 20 +#define RX_FC_DISABLE_V(x) ((x) << RX_FC_DISABLE_S) +#define RX_FC_DISABLE_F RX_FC_DISABLE_V(1U) + +#define RX_FC_VALID_S 22 +#define RX_FC_VALID_V(x) ((x) << RX_FC_VALID_S) +#define RX_FC_VALID_F RX_FC_VALID_V(1U) + +#define RX_CHANNEL_S 26 +#define RX_CHANNEL_V(x) ((x) << RX_CHANNEL_S) + +#define WND_SCALE_EN_S 28 +#define WND_SCALE_EN_V(x) ((x) << WND_SCALE_EN_S) +#define WND_SCALE_EN_F WND_SCALE_EN_V(1U) + +#define T5_OPT_2_VALID_S 31 +#define T5_OPT_2_VALID_V(x) ((x) << T5_OPT_2_VALID_S) +#define T5_OPT_2_VALID_F T5_OPT_2_VALID_V(1U) struct cpl_pass_open_req { WR_HDR; @@ -224,20 +270,11 @@ struct cpl_pass_open_req { __be32 local_ip; __be32 peer_ip; __be64 opt0; -#define TX_CHAN(x) ((x) << 2) #define NO_CONG(x) ((x) << 4) #define DELACK(x) ((x) << 5) -#define ULP_MODE(x) ((x) << 8) -#define RCV_BUFSIZ(x) ((x) << 12) -#define RCV_BUFSIZ_MASK 0x3FFU #define DSCP(x) ((x) << 22) -#define SMAC_SEL(x) ((u64)(x) << 28) -#define L2T_IDX(x) ((u64)(x) << 36) #define TCAM_BYPASS(x) ((u64)(x) << 48) #define NAGLE(x) ((u64)(x) << 49) -#define WND_SCALE(x) ((u64)(x) << 50) -#define KEEP_ALIVE(x) ((u64)(x) << 54) -#define MSS_IDX(x) ((u64)(x) << 60) __be64 opt1; #define SYN_RSS_ENABLE (1 << 0) #define SYN_RSS_QUEUE(x) ((x) << 2) @@ -267,20 +304,13 @@ struct cpl_pass_accept_rpl { WR_HDR; union opcode_tid ot; __be32 opt2; -#define RSS_QUEUE(x) ((x) << 0) -#define RSS_QUEUE_VALID (1 << 10) #define RX_COALESCE_VALID(x) ((x) << 11) #define RX_COALESCE(x) ((x) << 12) #define PACE(x) ((x) << 16) -#define RX_FC_VALID ((1U) << 19) -#define RX_FC_DISABLE ((1U) << 20) #define TX_QUEUE(x) ((x) << 23) -#define RX_CHANNEL(x) ((x) << 26) #define CCTRL_ECN(x) ((x) << 27) -#define WND_SCALE_EN(x) ((x) << 28) #define TSTAMPS_EN(x) ((x) << 29) #define SACK_EN(x) ((x) << 30) -#define T5_OPT_2_VALID ((1U) << 31) __be64 opt0; }; @@ -305,10 +335,10 @@ struct cpl_act_open_req { __be32 opt2; }; -#define S_FILTER_TUPLE 24 -#define M_FILTER_TUPLE 0xFFFFFFFFFF -#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE) -#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE) +#define FILTER_TUPLE_S 24 +#define FILTER_TUPLE_M 0xFFFFFFFFFF +#define FILTER_TUPLE_V(x) ((x) << FILTER_TUPLE_S) +#define FILTER_TUPLE_G(x) (((x) >> FILTER_TUPLE_S) & FILTER_TUPLE_M) struct cpl_t5_act_open_req { WR_HDR; union opcode_tid ot; @@ -579,10 +609,16 @@ struct cpl_rx_data_ack { WR_HDR; union opcode_tid ot; __be32 credit_dack; -#define RX_CREDITS(x) ((x) << 0) -#define RX_FORCE_ACK(x) ((x) << 28) }; +/* cpl_rx_data_ack.ack_seq fields */ +#define RX_CREDITS_S 0 +#define RX_CREDITS_V(x) ((x) << RX_CREDITS_S) + +#define RX_FORCE_ACK_S 28 +#define RX_FORCE_ACK_V(x) ((x) << RX_FORCE_ACK_S) +#define RX_FORCE_ACK_F RX_FORCE_ACK_V(1U) + struct cpl_rx_pkt { struct rss_header rsshdr; u8 opcode; @@ -803,6 +839,9 @@ enum { ULP_TX_SC_ISGL = 0x83 }; +#define ULPTX_CMD_S 24 +#define ULPTX_CMD_V(x) ((x) << ULPTX_CMD_S) + struct ulptx_sge_pair { __be32 len[2]; __be64 addr[2]; @@ -810,7 +849,6 @@ struct ulptx_sge_pair { struct ulptx_sgl { __be32 cmd_nsge; -#define ULPTX_CMD(x) ((x) << 24) #define ULPTX_NSGE(x) ((x) << 0) #define ULPTX_MORE (1U << 23) __be32 len0; @@ -821,15 +859,21 @@ struct ulptx_sgl { struct ulp_mem_io { WR_HDR; __be32 cmd; -#define ULP_MEMIO_ORDER(x) ((x) << 23) __be32 len16; /* command length */ __be32 dlen; /* data length in 32-byte units */ -#define ULP_MEMIO_DATA_LEN(x) ((x) << 0) __be32 lock_addr; -#define ULP_MEMIO_ADDR(x) ((x) << 0) #define ULP_MEMIO_LOCK(x) ((x) << 31) }; +/* additional ulp_mem_io.cmd fields */ +#define ULP_MEMIO_ORDER_S 23 +#define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S) +#define ULP_MEMIO_ORDER_F ULP_MEMIO_ORDER_V(1U) + +#define T5_ULP_MEMIO_IMM_S 23 +#define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S) +#define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U) + #define S_T5_ULP_MEMIO_IMM 23 #define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM) #define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U) @@ -838,4 +882,12 @@ struct ulp_mem_io { #define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER) #define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U) +/* ulp_mem_io.lock_addr fields */ +#define ULP_MEMIO_ADDR_S 0 +#define ULP_MEMIO_ADDR_V(x) ((x) << ULP_MEMIO_ADDR_S) + +/* ulp_mem_io.dlen fields */ +#define ULP_MEMIO_DATA_LEN_S 0 +#define ULP_MEMIO_DATA_LEN_V(x) ((x) << ULP_MEMIO_DATA_LEN_S) + #endif /* __T4_MSG_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h new file mode 100644 index 00000000000..9e4f95a91fb --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -0,0 +1,160 @@ +/* + * This file is part of the Chelsio T4/T5 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __T4_PCI_ID_TBL_H__ +#define __T4_PCI_ID_TBL_H__ + +/* The code can defined cpp macros for creating a PCI Device ID Table. This is + * useful because it allows the PCI ID Table to be maintained in a single place. + * + * The macros are: + * + * CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + * -- Used to start the definition of the PCI ID Table. + * + * CH_PCI_DEVICE_ID_FUNCTION + * -- The PCI Function Number to use in the PCI Device ID Table. "0" + * -- for drivers attaching to PF0-3, "4" for drivers attaching to PF4, + * -- "8" for drivers attaching to SR-IOV Virtual Functions, etc. + * + * CH_PCI_DEVICE_ID_FUNCTION2 [optional] + * -- If defined, create a PCI Device ID Table with both + * -- CH_PCI_DEVICE_ID_FUNCTION and CH_PCI_DEVICE_ID_FUNCTION2 populated. + * + * CH_PCI_ID_TABLE_ENTRY(DeviceID) + * -- Used for the individual PCI Device ID entries. Note that we will + * -- be adding a trailing comma (",") after all of the entries (and + * -- between the pairs of entries if CH_PCI_DEVICE_ID_FUNCTION2 is defined). + * + * CH_PCI_DEVICE_ID_TABLE_DEFINE_END + * -- Used to finish the definition of the PCI ID Table. Note that we + * -- will be adding a trailing semi-colon (";") here. + */ +#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + +#ifndef CH_PCI_DEVICE_ID_FUNCTION +#error CH_PCI_DEVICE_ID_FUNCTION not defined! +#endif +#ifndef CH_PCI_ID_TABLE_ENTRY +#error CH_PCI_ID_TABLE_ENTRY not defined! +#endif +#ifndef CH_PCI_DEVICE_ID_TABLE_DEFINE_END +#error CH_PCI_DEVICE_ID_TABLE_DEFINE_END not defined! +#endif + +/* T4 and later ASICs use a PCI Device ID scheme of 0xVFPP where: + * + * V = "4" for T4; "5" for T5, etc. + * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs + * PP = adapter product designation + * + * We use this consistency in order to create the proper PCI Device IDs + * for the specified CH_PCI_DEVICE_ID_FUNCTION. + */ +#ifndef CH_PCI_DEVICE_ID_FUNCTION2 +#define CH_PCI_ID_TABLE_FENTRY(devid) \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION) << 8)) +#else +#define CH_PCI_ID_TABLE_FENTRY(devid) \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION) << 8)), \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION2) << 8)) +#endif + +CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + /* T4 adapters: + */ + CH_PCI_ID_TABLE_FENTRY(0x4000), /* T440-dbg */ + CH_PCI_ID_TABLE_FENTRY(0x4001), /* T420-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4002), /* T422-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4003), /* T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4004), /* T420-bch */ + CH_PCI_ID_TABLE_FENTRY(0x4005), /* T440-bch */ + CH_PCI_ID_TABLE_FENTRY(0x4006), /* T440-ch */ + CH_PCI_ID_TABLE_FENTRY(0x4007), /* T420-so */ + CH_PCI_ID_TABLE_FENTRY(0x4008), /* T420-cx */ + CH_PCI_ID_TABLE_FENTRY(0x4009), /* T420-bt */ + CH_PCI_ID_TABLE_FENTRY(0x400a), /* T404-bt */ + CH_PCI_ID_TABLE_FENTRY(0x400b), /* B420-sr */ + CH_PCI_ID_TABLE_FENTRY(0x400c), /* B404-bt */ + CH_PCI_ID_TABLE_FENTRY(0x400d), /* T480-cr */ + CH_PCI_ID_TABLE_FENTRY(0x400e), /* T440-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4080), /* Custom T480-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4081), /* Custom T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4082), /* Custom T420-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4083), /* Custom T420-xaui */ + CH_PCI_ID_TABLE_FENTRY(0x4084), /* Custom T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4085), /* Custom T420-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4086), /* Custom T440-bt */ + CH_PCI_ID_TABLE_FENTRY(0x4087), /* Custom T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4088), /* Custom T440 2-xaui, 2-xfi */ + + /* T5 adapters: + */ + CH_PCI_ID_TABLE_FENTRY(0x5000), /* T580-dbg */ + CH_PCI_ID_TABLE_FENTRY(0x5001), /* T520-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5002), /* T522-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5003), /* T540-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5004), /* T520-bch */ + CH_PCI_ID_TABLE_FENTRY(0x5005), /* T540-bch */ + CH_PCI_ID_TABLE_FENTRY(0x5006), /* T540-ch */ + CH_PCI_ID_TABLE_FENTRY(0x5007), /* T520-so */ + CH_PCI_ID_TABLE_FENTRY(0x5008), /* T520-cx */ + CH_PCI_ID_TABLE_FENTRY(0x5009), /* T520-bt */ + CH_PCI_ID_TABLE_FENTRY(0x500a), /* T504-bt */ + CH_PCI_ID_TABLE_FENTRY(0x500b), /* B520-sr */ + CH_PCI_ID_TABLE_FENTRY(0x500c), /* B504-bt */ + CH_PCI_ID_TABLE_FENTRY(0x500d), /* T580-cr */ + CH_PCI_ID_TABLE_FENTRY(0x500e), /* T540-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5010), /* T580-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5011), /* T520-LL-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5012), /* T560-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */ + CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */ + CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ + CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5083), /* Custom T540-LP-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5084), /* Custom T580-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5085), /* Custom 3x T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */ +CH_PCI_DEVICE_ID_TABLE_DEFINE_END; + +#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */ + +#endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 8d2de1006b0..d7bd34ee65b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -155,17 +155,22 @@ #define HOSTPAGESIZEPF2_SHIFT 8 #define HOSTPAGESIZEPF2(x) ((x) << HOSTPAGESIZEPF2_SHIFT) -#define HOSTPAGESIZEPF1_MASK 0x0000000fU -#define HOSTPAGESIZEPF1_SHIFT 4 -#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_SHIFT) +#define HOSTPAGESIZEPF1_M 0x0000000fU +#define HOSTPAGESIZEPF1_S 4 +#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_S) -#define HOSTPAGESIZEPF0_MASK 0x0000000fU -#define HOSTPAGESIZEPF0_SHIFT 0 -#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT) +#define HOSTPAGESIZEPF0_M 0x0000000fU +#define HOSTPAGESIZEPF0_S 0 +#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_S) #define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010 -#define QUEUESPERPAGEPF0_MASK 0x0000000fU -#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) +#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014 + +#define QUEUESPERPAGEPF1_S 4 + +#define QUEUESPERPAGEPF0_S 0 +#define QUEUESPERPAGEPF0_MASK 0x0000000fU +#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) #define QUEUESPERPAGEPF0 0 #define QUEUESPERPAGEPF1 4 @@ -323,6 +328,7 @@ #define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc #define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8 #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 +#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8 #define S_HP_INT_THRESH 28 #define M_HP_INT_THRESH 0xfU @@ -511,21 +517,62 @@ #define MC_BIST_STATUS_RDATA 0x7688 -#define MA_EDRAM0_BAR 0x77c0 -#define MA_EDRAM1_BAR 0x77c4 -#define EDRAM_SIZE_MASK 0xfffU -#define EDRAM_SIZE_GET(x) ((x) & EDRAM_SIZE_MASK) +#define MA_EDRAM0_BAR_A 0x77c0 + +#define EDRAM0_SIZE_S 0 +#define EDRAM0_SIZE_M 0xfffU +#define EDRAM0_SIZE_V(x) ((x) << EDRAM0_SIZE_S) +#define EDRAM0_SIZE_G(x) (((x) >> EDRAM0_SIZE_S) & EDRAM0_SIZE_M) + +#define MA_EDRAM1_BAR_A 0x77c4 + +#define EDRAM1_SIZE_S 0 +#define EDRAM1_SIZE_M 0xfffU +#define EDRAM1_SIZE_V(x) ((x) << EDRAM1_SIZE_S) +#define EDRAM1_SIZE_G(x) (((x) >> EDRAM1_SIZE_S) & EDRAM1_SIZE_M) + +#define MA_EXT_MEMORY_BAR_A 0x77c8 + +#define EXT_MEM_SIZE_S 0 +#define EXT_MEM_SIZE_M 0xfffU +#define EXT_MEM_SIZE_V(x) ((x) << EXT_MEM_SIZE_S) +#define EXT_MEM_SIZE_G(x) (((x) >> EXT_MEM_SIZE_S) & EXT_MEM_SIZE_M) + +#define MA_EXT_MEMORY1_BAR_A 0x7808 + +#define EXT_MEM1_SIZE_S 0 +#define EXT_MEM1_SIZE_M 0xfffU +#define EXT_MEM1_SIZE_V(x) ((x) << EXT_MEM1_SIZE_S) +#define EXT_MEM1_SIZE_G(x) (((x) >> EXT_MEM1_SIZE_S) & EXT_MEM1_SIZE_M) + +#define MA_EXT_MEMORY0_BAR_A 0x77c8 + +#define EXT_MEM0_SIZE_S 0 +#define EXT_MEM0_SIZE_M 0xfffU +#define EXT_MEM0_SIZE_V(x) ((x) << EXT_MEM0_SIZE_S) +#define EXT_MEM0_SIZE_G(x) (((x) >> EXT_MEM0_SIZE_S) & EXT_MEM0_SIZE_M) + +#define MA_TARGET_MEM_ENABLE_A 0x77d8 + +#define EXT_MEM_ENABLE_S 2 +#define EXT_MEM_ENABLE_V(x) ((x) << EXT_MEM_ENABLE_S) +#define EXT_MEM_ENABLE_F EXT_MEM_ENABLE_V(1U) + +#define EDRAM1_ENABLE_S 1 +#define EDRAM1_ENABLE_V(x) ((x) << EDRAM1_ENABLE_S) +#define EDRAM1_ENABLE_F EDRAM1_ENABLE_V(1U) + +#define EDRAM0_ENABLE_S 0 +#define EDRAM0_ENABLE_V(x) ((x) << EDRAM0_ENABLE_S) +#define EDRAM0_ENABLE_F EDRAM0_ENABLE_V(1U) -#define MA_EXT_MEMORY_BAR 0x77c8 -#define EXT_MEM_SIZE_MASK 0x00000fffU -#define EXT_MEM_SIZE_SHIFT 0 -#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT) +#define EXT_MEM1_ENABLE_S 4 +#define EXT_MEM1_ENABLE_V(x) ((x) << EXT_MEM1_ENABLE_S) +#define EXT_MEM1_ENABLE_F EXT_MEM1_ENABLE_V(1U) -#define MA_TARGET_MEM_ENABLE 0x77d8 -#define EXT_MEM1_ENABLE 0x00000010U -#define EXT_MEM_ENABLE 0x00000004U -#define EDRAM1_ENABLE 0x00000002U -#define EDRAM0_ENABLE 0x00000001U +#define EXT_MEM0_ENABLE_S 2 +#define EXT_MEM0_ENABLE_V(x) ((x) << EXT_MEM0_ENABLE_S) +#define EXT_MEM0_ENABLE_F EXT_MEM0_ENABLE_V(1U) #define MA_INT_CAUSE 0x77e0 #define MEM_PERR_INT_CAUSE 0x00000002U @@ -542,7 +589,6 @@ #define MA_PARITY_ERROR_STATUS 0x77f4 #define MA_PARITY_ERROR_STATUS2 0x7804 -#define MA_EXT_MEMORY1_BAR 0x7808 #define EDC_0_BASE_ADDR 0x7900 #define EDC_BIST_CMD 0x7904 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 3409756a85b..beaf80a6214 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -109,18 +109,49 @@ struct fw_wr_hdr { __be32 lo; }; -#define FW_WR_OP(x) ((x) << 24) -#define FW_WR_OP_GET(x) (((x) >> 24) & 0xff) -#define FW_WR_ATOMIC(x) ((x) << 23) -#define FW_WR_FLUSH(x) ((x) << 22) -#define FW_WR_COMPL(x) ((x) << 21) -#define FW_WR_IMMDLEN_MASK 0xff -#define FW_WR_IMMDLEN(x) ((x) << 0) - -#define FW_WR_EQUIQ (1U << 31) -#define FW_WR_EQUEQ (1U << 30) -#define FW_WR_FLOWID(x) ((x) << 8) -#define FW_WR_LEN16(x) ((x) << 0) +/* work request opcode (hi) */ +#define FW_WR_OP_S 24 +#define FW_WR_OP_M 0xff +#define FW_WR_OP_V(x) ((x) << FW_WR_OP_S) +#define FW_WR_OP_G(x) (((x) >> FW_WR_OP_S) & FW_WR_OP_M) + +/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER */ +#define FW_WR_ATOMIC_S 23 +#define FW_WR_ATOMIC_V(x) ((x) << FW_WR_ATOMIC_S) + +/* flush flag (hi) - firmware flushes flushable work request buffered + * in the flow context. + */ +#define FW_WR_FLUSH_S 22 +#define FW_WR_FLUSH_V(x) ((x) << FW_WR_FLUSH_S) + +/* completion flag (hi) - firmware generates a cpl_fw6_ack */ +#define FW_WR_COMPL_S 21 +#define FW_WR_COMPL_V(x) ((x) << FW_WR_COMPL_S) +#define FW_WR_COMPL_F FW_WR_COMPL_V(1U) + +/* work request immediate data length (hi) */ +#define FW_WR_IMMDLEN_S 0 +#define FW_WR_IMMDLEN_M 0xff +#define FW_WR_IMMDLEN_V(x) ((x) << FW_WR_IMMDLEN_S) + +/* egress queue status update to associated ingress queue entry (lo) */ +#define FW_WR_EQUIQ_S 31 +#define FW_WR_EQUIQ_V(x) ((x) << FW_WR_EQUIQ_S) +#define FW_WR_EQUIQ_F FW_WR_EQUIQ_V(1U) + +/* egress queue status update to egress queue status entry (lo) */ +#define FW_WR_EQUEQ_S 30 +#define FW_WR_EQUEQ_V(x) ((x) << FW_WR_EQUEQ_S) +#define FW_WR_EQUEQ_F FW_WR_EQUEQ_V(1U) + +/* flow context identifier (lo) */ +#define FW_WR_FLOWID_S 8 +#define FW_WR_FLOWID_V(x) ((x) << FW_WR_FLOWID_S) + +/* length in units of 16-bytes (lo) */ +#define FW_WR_LEN16_S 0 +#define FW_WR_LEN16_V(x) ((x) << FW_WR_LEN16_S) #define HW_TPL_FR_MT_PR_IV_P_FC 0X32B #define HW_TPL_FR_MT_PR_OV_P_FC 0X327 @@ -166,239 +197,239 @@ struct fw_filter_wr { __u8 sma[6]; }; -#define S_FW_FILTER_WR_TID 12 -#define M_FW_FILTER_WR_TID 0xfffff -#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID) -#define G_FW_FILTER_WR_TID(x) \ - (((x) >> S_FW_FILTER_WR_TID) & M_FW_FILTER_WR_TID) - -#define S_FW_FILTER_WR_RQTYPE 11 -#define M_FW_FILTER_WR_RQTYPE 0x1 -#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE) -#define G_FW_FILTER_WR_RQTYPE(x) \ - (((x) >> S_FW_FILTER_WR_RQTYPE) & M_FW_FILTER_WR_RQTYPE) -#define F_FW_FILTER_WR_RQTYPE V_FW_FILTER_WR_RQTYPE(1U) - -#define S_FW_FILTER_WR_NOREPLY 10 -#define M_FW_FILTER_WR_NOREPLY 0x1 -#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY) -#define G_FW_FILTER_WR_NOREPLY(x) \ - (((x) >> S_FW_FILTER_WR_NOREPLY) & M_FW_FILTER_WR_NOREPLY) -#define F_FW_FILTER_WR_NOREPLY V_FW_FILTER_WR_NOREPLY(1U) - -#define S_FW_FILTER_WR_IQ 0 -#define M_FW_FILTER_WR_IQ 0x3ff -#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ) -#define G_FW_FILTER_WR_IQ(x) \ - (((x) >> S_FW_FILTER_WR_IQ) & M_FW_FILTER_WR_IQ) - -#define S_FW_FILTER_WR_DEL_FILTER 31 -#define M_FW_FILTER_WR_DEL_FILTER 0x1 -#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER) -#define G_FW_FILTER_WR_DEL_FILTER(x) \ - (((x) >> S_FW_FILTER_WR_DEL_FILTER) & M_FW_FILTER_WR_DEL_FILTER) -#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U) - -#define S_FW_FILTER_WR_RPTTID 25 -#define M_FW_FILTER_WR_RPTTID 0x1 -#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID) -#define G_FW_FILTER_WR_RPTTID(x) \ - (((x) >> S_FW_FILTER_WR_RPTTID) & M_FW_FILTER_WR_RPTTID) -#define F_FW_FILTER_WR_RPTTID V_FW_FILTER_WR_RPTTID(1U) - -#define S_FW_FILTER_WR_DROP 24 -#define M_FW_FILTER_WR_DROP 0x1 -#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP) -#define G_FW_FILTER_WR_DROP(x) \ - (((x) >> S_FW_FILTER_WR_DROP) & M_FW_FILTER_WR_DROP) -#define F_FW_FILTER_WR_DROP V_FW_FILTER_WR_DROP(1U) - -#define S_FW_FILTER_WR_DIRSTEER 23 -#define M_FW_FILTER_WR_DIRSTEER 0x1 -#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER) -#define G_FW_FILTER_WR_DIRSTEER(x) \ - (((x) >> S_FW_FILTER_WR_DIRSTEER) & M_FW_FILTER_WR_DIRSTEER) -#define F_FW_FILTER_WR_DIRSTEER V_FW_FILTER_WR_DIRSTEER(1U) - -#define S_FW_FILTER_WR_MASKHASH 22 -#define M_FW_FILTER_WR_MASKHASH 0x1 -#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH) -#define G_FW_FILTER_WR_MASKHASH(x) \ - (((x) >> S_FW_FILTER_WR_MASKHASH) & M_FW_FILTER_WR_MASKHASH) -#define F_FW_FILTER_WR_MASKHASH V_FW_FILTER_WR_MASKHASH(1U) - -#define S_FW_FILTER_WR_DIRSTEERHASH 21 -#define M_FW_FILTER_WR_DIRSTEERHASH 0x1 -#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH) -#define G_FW_FILTER_WR_DIRSTEERHASH(x) \ - (((x) >> S_FW_FILTER_WR_DIRSTEERHASH) & M_FW_FILTER_WR_DIRSTEERHASH) -#define F_FW_FILTER_WR_DIRSTEERHASH V_FW_FILTER_WR_DIRSTEERHASH(1U) - -#define S_FW_FILTER_WR_LPBK 20 -#define M_FW_FILTER_WR_LPBK 0x1 -#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK) -#define G_FW_FILTER_WR_LPBK(x) \ - (((x) >> S_FW_FILTER_WR_LPBK) & M_FW_FILTER_WR_LPBK) -#define F_FW_FILTER_WR_LPBK V_FW_FILTER_WR_LPBK(1U) - -#define S_FW_FILTER_WR_DMAC 19 -#define M_FW_FILTER_WR_DMAC 0x1 -#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC) -#define G_FW_FILTER_WR_DMAC(x) \ - (((x) >> S_FW_FILTER_WR_DMAC) & M_FW_FILTER_WR_DMAC) -#define F_FW_FILTER_WR_DMAC V_FW_FILTER_WR_DMAC(1U) - -#define S_FW_FILTER_WR_SMAC 18 -#define M_FW_FILTER_WR_SMAC 0x1 -#define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC) -#define G_FW_FILTER_WR_SMAC(x) \ - (((x) >> S_FW_FILTER_WR_SMAC) & M_FW_FILTER_WR_SMAC) -#define F_FW_FILTER_WR_SMAC V_FW_FILTER_WR_SMAC(1U) - -#define S_FW_FILTER_WR_INSVLAN 17 -#define M_FW_FILTER_WR_INSVLAN 0x1 -#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN) -#define G_FW_FILTER_WR_INSVLAN(x) \ - (((x) >> S_FW_FILTER_WR_INSVLAN) & M_FW_FILTER_WR_INSVLAN) -#define F_FW_FILTER_WR_INSVLAN V_FW_FILTER_WR_INSVLAN(1U) - -#define S_FW_FILTER_WR_RMVLAN 16 -#define M_FW_FILTER_WR_RMVLAN 0x1 -#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN) -#define G_FW_FILTER_WR_RMVLAN(x) \ - (((x) >> S_FW_FILTER_WR_RMVLAN) & M_FW_FILTER_WR_RMVLAN) -#define F_FW_FILTER_WR_RMVLAN V_FW_FILTER_WR_RMVLAN(1U) - -#define S_FW_FILTER_WR_HITCNTS 15 -#define M_FW_FILTER_WR_HITCNTS 0x1 -#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS) -#define G_FW_FILTER_WR_HITCNTS(x) \ - (((x) >> S_FW_FILTER_WR_HITCNTS) & M_FW_FILTER_WR_HITCNTS) -#define F_FW_FILTER_WR_HITCNTS V_FW_FILTER_WR_HITCNTS(1U) - -#define S_FW_FILTER_WR_TXCHAN 13 -#define M_FW_FILTER_WR_TXCHAN 0x3 -#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN) -#define G_FW_FILTER_WR_TXCHAN(x) \ - (((x) >> S_FW_FILTER_WR_TXCHAN) & M_FW_FILTER_WR_TXCHAN) - -#define S_FW_FILTER_WR_PRIO 12 -#define M_FW_FILTER_WR_PRIO 0x1 -#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO) -#define G_FW_FILTER_WR_PRIO(x) \ - (((x) >> S_FW_FILTER_WR_PRIO) & M_FW_FILTER_WR_PRIO) -#define F_FW_FILTER_WR_PRIO V_FW_FILTER_WR_PRIO(1U) - -#define S_FW_FILTER_WR_L2TIX 0 -#define M_FW_FILTER_WR_L2TIX 0xfff -#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX) -#define G_FW_FILTER_WR_L2TIX(x) \ - (((x) >> S_FW_FILTER_WR_L2TIX) & M_FW_FILTER_WR_L2TIX) - -#define S_FW_FILTER_WR_FRAG 7 -#define M_FW_FILTER_WR_FRAG 0x1 -#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG) -#define G_FW_FILTER_WR_FRAG(x) \ - (((x) >> S_FW_FILTER_WR_FRAG) & M_FW_FILTER_WR_FRAG) -#define F_FW_FILTER_WR_FRAG V_FW_FILTER_WR_FRAG(1U) - -#define S_FW_FILTER_WR_FRAGM 6 -#define M_FW_FILTER_WR_FRAGM 0x1 -#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM) -#define G_FW_FILTER_WR_FRAGM(x) \ - (((x) >> S_FW_FILTER_WR_FRAGM) & M_FW_FILTER_WR_FRAGM) -#define F_FW_FILTER_WR_FRAGM V_FW_FILTER_WR_FRAGM(1U) - -#define S_FW_FILTER_WR_IVLAN_VLD 5 -#define M_FW_FILTER_WR_IVLAN_VLD 0x1 -#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD) -#define G_FW_FILTER_WR_IVLAN_VLD(x) \ - (((x) >> S_FW_FILTER_WR_IVLAN_VLD) & M_FW_FILTER_WR_IVLAN_VLD) -#define F_FW_FILTER_WR_IVLAN_VLD V_FW_FILTER_WR_IVLAN_VLD(1U) - -#define S_FW_FILTER_WR_OVLAN_VLD 4 -#define M_FW_FILTER_WR_OVLAN_VLD 0x1 -#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD) -#define G_FW_FILTER_WR_OVLAN_VLD(x) \ - (((x) >> S_FW_FILTER_WR_OVLAN_VLD) & M_FW_FILTER_WR_OVLAN_VLD) -#define F_FW_FILTER_WR_OVLAN_VLD V_FW_FILTER_WR_OVLAN_VLD(1U) - -#define S_FW_FILTER_WR_IVLAN_VLDM 3 -#define M_FW_FILTER_WR_IVLAN_VLDM 0x1 -#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM) -#define G_FW_FILTER_WR_IVLAN_VLDM(x) \ - (((x) >> S_FW_FILTER_WR_IVLAN_VLDM) & M_FW_FILTER_WR_IVLAN_VLDM) -#define F_FW_FILTER_WR_IVLAN_VLDM V_FW_FILTER_WR_IVLAN_VLDM(1U) - -#define S_FW_FILTER_WR_OVLAN_VLDM 2 -#define M_FW_FILTER_WR_OVLAN_VLDM 0x1 -#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM) -#define G_FW_FILTER_WR_OVLAN_VLDM(x) \ - (((x) >> S_FW_FILTER_WR_OVLAN_VLDM) & M_FW_FILTER_WR_OVLAN_VLDM) -#define F_FW_FILTER_WR_OVLAN_VLDM V_FW_FILTER_WR_OVLAN_VLDM(1U) - -#define S_FW_FILTER_WR_RX_CHAN 15 -#define M_FW_FILTER_WR_RX_CHAN 0x1 -#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN) -#define G_FW_FILTER_WR_RX_CHAN(x) \ - (((x) >> S_FW_FILTER_WR_RX_CHAN) & M_FW_FILTER_WR_RX_CHAN) -#define F_FW_FILTER_WR_RX_CHAN V_FW_FILTER_WR_RX_CHAN(1U) - -#define S_FW_FILTER_WR_RX_RPL_IQ 0 -#define M_FW_FILTER_WR_RX_RPL_IQ 0x3ff -#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ) -#define G_FW_FILTER_WR_RX_RPL_IQ(x) \ - (((x) >> S_FW_FILTER_WR_RX_RPL_IQ) & M_FW_FILTER_WR_RX_RPL_IQ) - -#define S_FW_FILTER_WR_MACI 23 -#define M_FW_FILTER_WR_MACI 0x1ff -#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI) -#define G_FW_FILTER_WR_MACI(x) \ - (((x) >> S_FW_FILTER_WR_MACI) & M_FW_FILTER_WR_MACI) - -#define S_FW_FILTER_WR_MACIM 14 -#define M_FW_FILTER_WR_MACIM 0x1ff -#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM) -#define G_FW_FILTER_WR_MACIM(x) \ - (((x) >> S_FW_FILTER_WR_MACIM) & M_FW_FILTER_WR_MACIM) - -#define S_FW_FILTER_WR_FCOE 13 -#define M_FW_FILTER_WR_FCOE 0x1 -#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE) -#define G_FW_FILTER_WR_FCOE(x) \ - (((x) >> S_FW_FILTER_WR_FCOE) & M_FW_FILTER_WR_FCOE) -#define F_FW_FILTER_WR_FCOE V_FW_FILTER_WR_FCOE(1U) - -#define S_FW_FILTER_WR_FCOEM 12 -#define M_FW_FILTER_WR_FCOEM 0x1 -#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM) -#define G_FW_FILTER_WR_FCOEM(x) \ - (((x) >> S_FW_FILTER_WR_FCOEM) & M_FW_FILTER_WR_FCOEM) -#define F_FW_FILTER_WR_FCOEM V_FW_FILTER_WR_FCOEM(1U) - -#define S_FW_FILTER_WR_PORT 9 -#define M_FW_FILTER_WR_PORT 0x7 -#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT) -#define G_FW_FILTER_WR_PORT(x) \ - (((x) >> S_FW_FILTER_WR_PORT) & M_FW_FILTER_WR_PORT) - -#define S_FW_FILTER_WR_PORTM 6 -#define M_FW_FILTER_WR_PORTM 0x7 -#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM) -#define G_FW_FILTER_WR_PORTM(x) \ - (((x) >> S_FW_FILTER_WR_PORTM) & M_FW_FILTER_WR_PORTM) - -#define S_FW_FILTER_WR_MATCHTYPE 3 -#define M_FW_FILTER_WR_MATCHTYPE 0x7 -#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE) -#define G_FW_FILTER_WR_MATCHTYPE(x) \ - (((x) >> S_FW_FILTER_WR_MATCHTYPE) & M_FW_FILTER_WR_MATCHTYPE) - -#define S_FW_FILTER_WR_MATCHTYPEM 0 -#define M_FW_FILTER_WR_MATCHTYPEM 0x7 -#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM) -#define G_FW_FILTER_WR_MATCHTYPEM(x) \ - (((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM) +#define FW_FILTER_WR_TID_S 12 +#define FW_FILTER_WR_TID_M 0xfffff +#define FW_FILTER_WR_TID_V(x) ((x) << FW_FILTER_WR_TID_S) +#define FW_FILTER_WR_TID_G(x) \ + (((x) >> FW_FILTER_WR_TID_S) & FW_FILTER_WR_TID_M) + +#define FW_FILTER_WR_RQTYPE_S 11 +#define FW_FILTER_WR_RQTYPE_M 0x1 +#define FW_FILTER_WR_RQTYPE_V(x) ((x) << FW_FILTER_WR_RQTYPE_S) +#define FW_FILTER_WR_RQTYPE_G(x) \ + (((x) >> FW_FILTER_WR_RQTYPE_S) & FW_FILTER_WR_RQTYPE_M) +#define FW_FILTER_WR_RQTYPE_F FW_FILTER_WR_RQTYPE_V(1U) + +#define FW_FILTER_WR_NOREPLY_S 10 +#define FW_FILTER_WR_NOREPLY_M 0x1 +#define FW_FILTER_WR_NOREPLY_V(x) ((x) << FW_FILTER_WR_NOREPLY_S) +#define FW_FILTER_WR_NOREPLY_G(x) \ + (((x) >> FW_FILTER_WR_NOREPLY_S) & FW_FILTER_WR_NOREPLY_M) +#define FW_FILTER_WR_NOREPLY_F FW_FILTER_WR_NOREPLY_V(1U) + +#define FW_FILTER_WR_IQ_S 0 +#define FW_FILTER_WR_IQ_M 0x3ff +#define FW_FILTER_WR_IQ_V(x) ((x) << FW_FILTER_WR_IQ_S) +#define FW_FILTER_WR_IQ_G(x) \ + (((x) >> FW_FILTER_WR_IQ_S) & FW_FILTER_WR_IQ_M) + +#define FW_FILTER_WR_DEL_FILTER_S 31 +#define FW_FILTER_WR_DEL_FILTER_M 0x1 +#define FW_FILTER_WR_DEL_FILTER_V(x) ((x) << FW_FILTER_WR_DEL_FILTER_S) +#define FW_FILTER_WR_DEL_FILTER_G(x) \ + (((x) >> FW_FILTER_WR_DEL_FILTER_S) & FW_FILTER_WR_DEL_FILTER_M) +#define FW_FILTER_WR_DEL_FILTER_F FW_FILTER_WR_DEL_FILTER_V(1U) + +#define FW_FILTER_WR_RPTTID_S 25 +#define FW_FILTER_WR_RPTTID_M 0x1 +#define FW_FILTER_WR_RPTTID_V(x) ((x) << FW_FILTER_WR_RPTTID_S) +#define FW_FILTER_WR_RPTTID_G(x) \ + (((x) >> FW_FILTER_WR_RPTTID_S) & FW_FILTER_WR_RPTTID_M) +#define FW_FILTER_WR_RPTTID_F FW_FILTER_WR_RPTTID_V(1U) + +#define FW_FILTER_WR_DROP_S 24 +#define FW_FILTER_WR_DROP_M 0x1 +#define FW_FILTER_WR_DROP_V(x) ((x) << FW_FILTER_WR_DROP_S) +#define FW_FILTER_WR_DROP_G(x) \ + (((x) >> FW_FILTER_WR_DROP_S) & FW_FILTER_WR_DROP_M) +#define FW_FILTER_WR_DROP_F FW_FILTER_WR_DROP_V(1U) + +#define FW_FILTER_WR_DIRSTEER_S 23 +#define FW_FILTER_WR_DIRSTEER_M 0x1 +#define FW_FILTER_WR_DIRSTEER_V(x) ((x) << FW_FILTER_WR_DIRSTEER_S) +#define FW_FILTER_WR_DIRSTEER_G(x) \ + (((x) >> FW_FILTER_WR_DIRSTEER_S) & FW_FILTER_WR_DIRSTEER_M) +#define FW_FILTER_WR_DIRSTEER_F FW_FILTER_WR_DIRSTEER_V(1U) + +#define FW_FILTER_WR_MASKHASH_S 22 +#define FW_FILTER_WR_MASKHASH_M 0x1 +#define FW_FILTER_WR_MASKHASH_V(x) ((x) << FW_FILTER_WR_MASKHASH_S) +#define FW_FILTER_WR_MASKHASH_G(x) \ + (((x) >> FW_FILTER_WR_MASKHASH_S) & FW_FILTER_WR_MASKHASH_M) +#define FW_FILTER_WR_MASKHASH_F FW_FILTER_WR_MASKHASH_V(1U) + +#define FW_FILTER_WR_DIRSTEERHASH_S 21 +#define FW_FILTER_WR_DIRSTEERHASH_M 0x1 +#define FW_FILTER_WR_DIRSTEERHASH_V(x) ((x) << FW_FILTER_WR_DIRSTEERHASH_S) +#define FW_FILTER_WR_DIRSTEERHASH_G(x) \ + (((x) >> FW_FILTER_WR_DIRSTEERHASH_S) & FW_FILTER_WR_DIRSTEERHASH_M) +#define FW_FILTER_WR_DIRSTEERHASH_F FW_FILTER_WR_DIRSTEERHASH_V(1U) + +#define FW_FILTER_WR_LPBK_S 20 +#define FW_FILTER_WR_LPBK_M 0x1 +#define FW_FILTER_WR_LPBK_V(x) ((x) << FW_FILTER_WR_LPBK_S) +#define FW_FILTER_WR_LPBK_G(x) \ + (((x) >> FW_FILTER_WR_LPBK_S) & FW_FILTER_WR_LPBK_M) +#define FW_FILTER_WR_LPBK_F FW_FILTER_WR_LPBK_V(1U) + +#define FW_FILTER_WR_DMAC_S 19 +#define FW_FILTER_WR_DMAC_M 0x1 +#define FW_FILTER_WR_DMAC_V(x) ((x) << FW_FILTER_WR_DMAC_S) +#define FW_FILTER_WR_DMAC_G(x) \ + (((x) >> FW_FILTER_WR_DMAC_S) & FW_FILTER_WR_DMAC_M) +#define FW_FILTER_WR_DMAC_F FW_FILTER_WR_DMAC_V(1U) + +#define FW_FILTER_WR_SMAC_S 18 +#define FW_FILTER_WR_SMAC_M 0x1 +#define FW_FILTER_WR_SMAC_V(x) ((x) << FW_FILTER_WR_SMAC_S) +#define FW_FILTER_WR_SMAC_G(x) \ + (((x) >> FW_FILTER_WR_SMAC_S) & FW_FILTER_WR_SMAC_M) +#define FW_FILTER_WR_SMAC_F FW_FILTER_WR_SMAC_V(1U) + +#define FW_FILTER_WR_INSVLAN_S 17 +#define FW_FILTER_WR_INSVLAN_M 0x1 +#define FW_FILTER_WR_INSVLAN_V(x) ((x) << FW_FILTER_WR_INSVLAN_S) +#define FW_FILTER_WR_INSVLAN_G(x) \ + (((x) >> FW_FILTER_WR_INSVLAN_S) & FW_FILTER_WR_INSVLAN_M) +#define FW_FILTER_WR_INSVLAN_F FW_FILTER_WR_INSVLAN_V(1U) + +#define FW_FILTER_WR_RMVLAN_S 16 +#define FW_FILTER_WR_RMVLAN_M 0x1 +#define FW_FILTER_WR_RMVLAN_V(x) ((x) << FW_FILTER_WR_RMVLAN_S) +#define FW_FILTER_WR_RMVLAN_G(x) \ + (((x) >> FW_FILTER_WR_RMVLAN_S) & FW_FILTER_WR_RMVLAN_M) +#define FW_FILTER_WR_RMVLAN_F FW_FILTER_WR_RMVLAN_V(1U) + +#define FW_FILTER_WR_HITCNTS_S 15 +#define FW_FILTER_WR_HITCNTS_M 0x1 +#define FW_FILTER_WR_HITCNTS_V(x) ((x) << FW_FILTER_WR_HITCNTS_S) +#define FW_FILTER_WR_HITCNTS_G(x) \ + (((x) >> FW_FILTER_WR_HITCNTS_S) & FW_FILTER_WR_HITCNTS_M) +#define FW_FILTER_WR_HITCNTS_F FW_FILTER_WR_HITCNTS_V(1U) + +#define FW_FILTER_WR_TXCHAN_S 13 +#define FW_FILTER_WR_TXCHAN_M 0x3 +#define FW_FILTER_WR_TXCHAN_V(x) ((x) << FW_FILTER_WR_TXCHAN_S) +#define FW_FILTER_WR_TXCHAN_G(x) \ + (((x) >> FW_FILTER_WR_TXCHAN_S) & FW_FILTER_WR_TXCHAN_M) + +#define FW_FILTER_WR_PRIO_S 12 +#define FW_FILTER_WR_PRIO_M 0x1 +#define FW_FILTER_WR_PRIO_V(x) ((x) << FW_FILTER_WR_PRIO_S) +#define FW_FILTER_WR_PRIO_G(x) \ + (((x) >> FW_FILTER_WR_PRIO_S) & FW_FILTER_WR_PRIO_M) +#define FW_FILTER_WR_PRIO_F FW_FILTER_WR_PRIO_V(1U) + +#define FW_FILTER_WR_L2TIX_S 0 +#define FW_FILTER_WR_L2TIX_M 0xfff +#define FW_FILTER_WR_L2TIX_V(x) ((x) << FW_FILTER_WR_L2TIX_S) +#define FW_FILTER_WR_L2TIX_G(x) \ + (((x) >> FW_FILTER_WR_L2TIX_S) & FW_FILTER_WR_L2TIX_M) + +#define FW_FILTER_WR_FRAG_S 7 +#define FW_FILTER_WR_FRAG_M 0x1 +#define FW_FILTER_WR_FRAG_V(x) ((x) << FW_FILTER_WR_FRAG_S) +#define FW_FILTER_WR_FRAG_G(x) \ + (((x) >> FW_FILTER_WR_FRAG_S) & FW_FILTER_WR_FRAG_M) +#define FW_FILTER_WR_FRAG_F FW_FILTER_WR_FRAG_V(1U) + +#define FW_FILTER_WR_FRAGM_S 6 +#define FW_FILTER_WR_FRAGM_M 0x1 +#define FW_FILTER_WR_FRAGM_V(x) ((x) << FW_FILTER_WR_FRAGM_S) +#define FW_FILTER_WR_FRAGM_G(x) \ + (((x) >> FW_FILTER_WR_FRAGM_S) & FW_FILTER_WR_FRAGM_M) +#define FW_FILTER_WR_FRAGM_F FW_FILTER_WR_FRAGM_V(1U) + +#define FW_FILTER_WR_IVLAN_VLD_S 5 +#define FW_FILTER_WR_IVLAN_VLD_M 0x1 +#define FW_FILTER_WR_IVLAN_VLD_V(x) ((x) << FW_FILTER_WR_IVLAN_VLD_S) +#define FW_FILTER_WR_IVLAN_VLD_G(x) \ + (((x) >> FW_FILTER_WR_IVLAN_VLD_S) & FW_FILTER_WR_IVLAN_VLD_M) +#define FW_FILTER_WR_IVLAN_VLD_F FW_FILTER_WR_IVLAN_VLD_V(1U) + +#define FW_FILTER_WR_OVLAN_VLD_S 4 +#define FW_FILTER_WR_OVLAN_VLD_M 0x1 +#define FW_FILTER_WR_OVLAN_VLD_V(x) ((x) << FW_FILTER_WR_OVLAN_VLD_S) +#define FW_FILTER_WR_OVLAN_VLD_G(x) \ + (((x) >> FW_FILTER_WR_OVLAN_VLD_S) & FW_FILTER_WR_OVLAN_VLD_M) +#define FW_FILTER_WR_OVLAN_VLD_F FW_FILTER_WR_OVLAN_VLD_V(1U) + +#define FW_FILTER_WR_IVLAN_VLDM_S 3 +#define FW_FILTER_WR_IVLAN_VLDM_M 0x1 +#define FW_FILTER_WR_IVLAN_VLDM_V(x) ((x) << FW_FILTER_WR_IVLAN_VLDM_S) +#define FW_FILTER_WR_IVLAN_VLDM_G(x) \ + (((x) >> FW_FILTER_WR_IVLAN_VLDM_S) & FW_FILTER_WR_IVLAN_VLDM_M) +#define FW_FILTER_WR_IVLAN_VLDM_F FW_FILTER_WR_IVLAN_VLDM_V(1U) + +#define FW_FILTER_WR_OVLAN_VLDM_S 2 +#define FW_FILTER_WR_OVLAN_VLDM_M 0x1 +#define FW_FILTER_WR_OVLAN_VLDM_V(x) ((x) << FW_FILTER_WR_OVLAN_VLDM_S) +#define FW_FILTER_WR_OVLAN_VLDM_G(x) \ + (((x) >> FW_FILTER_WR_OVLAN_VLDM_S) & FW_FILTER_WR_OVLAN_VLDM_M) +#define FW_FILTER_WR_OVLAN_VLDM_F FW_FILTER_WR_OVLAN_VLDM_V(1U) + +#define FW_FILTER_WR_RX_CHAN_S 15 +#define FW_FILTER_WR_RX_CHAN_M 0x1 +#define FW_FILTER_WR_RX_CHAN_V(x) ((x) << FW_FILTER_WR_RX_CHAN_S) +#define FW_FILTER_WR_RX_CHAN_G(x) \ + (((x) >> FW_FILTER_WR_RX_CHAN_S) & FW_FILTER_WR_RX_CHAN_M) +#define FW_FILTER_WR_RX_CHAN_F FW_FILTER_WR_RX_CHAN_V(1U) + +#define FW_FILTER_WR_RX_RPL_IQ_S 0 +#define FW_FILTER_WR_RX_RPL_IQ_M 0x3ff +#define FW_FILTER_WR_RX_RPL_IQ_V(x) ((x) << FW_FILTER_WR_RX_RPL_IQ_S) +#define FW_FILTER_WR_RX_RPL_IQ_G(x) \ + (((x) >> FW_FILTER_WR_RX_RPL_IQ_S) & FW_FILTER_WR_RX_RPL_IQ_M) + +#define FW_FILTER_WR_MACI_S 23 +#define FW_FILTER_WR_MACI_M 0x1ff +#define FW_FILTER_WR_MACI_V(x) ((x) << FW_FILTER_WR_MACI_S) +#define FW_FILTER_WR_MACI_G(x) \ + (((x) >> FW_FILTER_WR_MACI_S) & FW_FILTER_WR_MACI_M) + +#define FW_FILTER_WR_MACIM_S 14 +#define FW_FILTER_WR_MACIM_M 0x1ff +#define FW_FILTER_WR_MACIM_V(x) ((x) << FW_FILTER_WR_MACIM_S) +#define FW_FILTER_WR_MACIM_G(x) \ + (((x) >> FW_FILTER_WR_MACIM_S) & FW_FILTER_WR_MACIM_M) + +#define FW_FILTER_WR_FCOE_S 13 +#define FW_FILTER_WR_FCOE_M 0x1 +#define FW_FILTER_WR_FCOE_V(x) ((x) << FW_FILTER_WR_FCOE_S) +#define FW_FILTER_WR_FCOE_G(x) \ + (((x) >> FW_FILTER_WR_FCOE_S) & FW_FILTER_WR_FCOE_M) +#define FW_FILTER_WR_FCOE_F FW_FILTER_WR_FCOE_V(1U) + +#define FW_FILTER_WR_FCOEM_S 12 +#define FW_FILTER_WR_FCOEM_M 0x1 +#define FW_FILTER_WR_FCOEM_V(x) ((x) << FW_FILTER_WR_FCOEM_S) +#define FW_FILTER_WR_FCOEM_G(x) \ + (((x) >> FW_FILTER_WR_FCOEM_S) & FW_FILTER_WR_FCOEM_M) +#define FW_FILTER_WR_FCOEM_F FW_FILTER_WR_FCOEM_V(1U) + +#define FW_FILTER_WR_PORT_S 9 +#define FW_FILTER_WR_PORT_M 0x7 +#define FW_FILTER_WR_PORT_V(x) ((x) << FW_FILTER_WR_PORT_S) +#define FW_FILTER_WR_PORT_G(x) \ + (((x) >> FW_FILTER_WR_PORT_S) & FW_FILTER_WR_PORT_M) + +#define FW_FILTER_WR_PORTM_S 6 +#define FW_FILTER_WR_PORTM_M 0x7 +#define FW_FILTER_WR_PORTM_V(x) ((x) << FW_FILTER_WR_PORTM_S) +#define FW_FILTER_WR_PORTM_G(x) \ + (((x) >> FW_FILTER_WR_PORTM_S) & FW_FILTER_WR_PORTM_M) + +#define FW_FILTER_WR_MATCHTYPE_S 3 +#define FW_FILTER_WR_MATCHTYPE_M 0x7 +#define FW_FILTER_WR_MATCHTYPE_V(x) ((x) << FW_FILTER_WR_MATCHTYPE_S) +#define FW_FILTER_WR_MATCHTYPE_G(x) \ + (((x) >> FW_FILTER_WR_MATCHTYPE_S) & FW_FILTER_WR_MATCHTYPE_M) + +#define FW_FILTER_WR_MATCHTYPEM_S 0 +#define FW_FILTER_WR_MATCHTYPEM_M 0x7 +#define FW_FILTER_WR_MATCHTYPEM_V(x) ((x) << FW_FILTER_WR_MATCHTYPEM_S) +#define FW_FILTER_WR_MATCHTYPEM_G(x) \ + (((x) >> FW_FILTER_WR_MATCHTYPEM_S) & FW_FILTER_WR_MATCHTYPEM_M) struct fw_ulptx_wr { __be32 op_to_compl; @@ -460,65 +491,65 @@ struct fw_ofld_connection_wr { } tcb; }; -#define S_FW_OFLD_CONNECTION_WR_VERSION 31 -#define M_FW_OFLD_CONNECTION_WR_VERSION 0x1 -#define V_FW_OFLD_CONNECTION_WR_VERSION(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_VERSION) -#define G_FW_OFLD_CONNECTION_WR_VERSION(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_VERSION) & \ - M_FW_OFLD_CONNECTION_WR_VERSION) -#define F_FW_OFLD_CONNECTION_WR_VERSION \ - V_FW_OFLD_CONNECTION_WR_VERSION(1U) - -#define S_FW_OFLD_CONNECTION_WR_CPL 30 -#define M_FW_OFLD_CONNECTION_WR_CPL 0x1 -#define V_FW_OFLD_CONNECTION_WR_CPL(x) ((x) << S_FW_OFLD_CONNECTION_WR_CPL) -#define G_FW_OFLD_CONNECTION_WR_CPL(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_CPL) & M_FW_OFLD_CONNECTION_WR_CPL) -#define F_FW_OFLD_CONNECTION_WR_CPL V_FW_OFLD_CONNECTION_WR_CPL(1U) - -#define S_FW_OFLD_CONNECTION_WR_T_STATE 28 -#define M_FW_OFLD_CONNECTION_WR_T_STATE 0xf -#define V_FW_OFLD_CONNECTION_WR_T_STATE(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_T_STATE) -#define G_FW_OFLD_CONNECTION_WR_T_STATE(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_T_STATE) & \ - M_FW_OFLD_CONNECTION_WR_T_STATE) - -#define S_FW_OFLD_CONNECTION_WR_RCV_SCALE 24 -#define M_FW_OFLD_CONNECTION_WR_RCV_SCALE 0xf -#define V_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_RCV_SCALE) -#define G_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_RCV_SCALE) & \ - M_FW_OFLD_CONNECTION_WR_RCV_SCALE) - -#define S_FW_OFLD_CONNECTION_WR_ASTID 0 -#define M_FW_OFLD_CONNECTION_WR_ASTID 0xffffff -#define V_FW_OFLD_CONNECTION_WR_ASTID(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_ASTID) -#define G_FW_OFLD_CONNECTION_WR_ASTID(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_ASTID) & M_FW_OFLD_CONNECTION_WR_ASTID) - -#define S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 15 -#define M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 0x1 -#define V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) -#define G_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) & \ - M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) -#define F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK \ - V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(1U) - -#define S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 14 -#define M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 0x1 -#define V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \ - ((x) << S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) -#define G_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \ - (((x) >> S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) & \ - M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) -#define F_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL \ - V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(1U) +#define FW_OFLD_CONNECTION_WR_VERSION_S 31 +#define FW_OFLD_CONNECTION_WR_VERSION_M 0x1 +#define FW_OFLD_CONNECTION_WR_VERSION_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_VERSION_S) +#define FW_OFLD_CONNECTION_WR_VERSION_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_VERSION_S) & \ + FW_OFLD_CONNECTION_WR_VERSION_M) +#define FW_OFLD_CONNECTION_WR_VERSION_F \ + FW_OFLD_CONNECTION_WR_VERSION_V(1U) + +#define FW_OFLD_CONNECTION_WR_CPL_S 30 +#define FW_OFLD_CONNECTION_WR_CPL_M 0x1 +#define FW_OFLD_CONNECTION_WR_CPL_V(x) ((x) << FW_OFLD_CONNECTION_WR_CPL_S) +#define FW_OFLD_CONNECTION_WR_CPL_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_CPL_S) & FW_OFLD_CONNECTION_WR_CPL_M) +#define FW_OFLD_CONNECTION_WR_CPL_F FW_OFLD_CONNECTION_WR_CPL_V(1U) + +#define FW_OFLD_CONNECTION_WR_T_STATE_S 28 +#define FW_OFLD_CONNECTION_WR_T_STATE_M 0xf +#define FW_OFLD_CONNECTION_WR_T_STATE_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_T_STATE_S) +#define FW_OFLD_CONNECTION_WR_T_STATE_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_T_STATE_S) & \ + FW_OFLD_CONNECTION_WR_T_STATE_M) + +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_S 24 +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_M 0xf +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_RCV_SCALE_S) +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_RCV_SCALE_S) & \ + FW_OFLD_CONNECTION_WR_RCV_SCALE_M) + +#define FW_OFLD_CONNECTION_WR_ASTID_S 0 +#define FW_OFLD_CONNECTION_WR_ASTID_M 0xffffff +#define FW_OFLD_CONNECTION_WR_ASTID_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_ASTID_S) +#define FW_OFLD_CONNECTION_WR_ASTID_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_ASTID_S) & FW_OFLD_CONNECTION_WR_ASTID_M) + +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S 15 +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_M 0x1 +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S) +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S) & \ + FW_OFLD_CONNECTION_WR_CPLRXDATAACK_M) +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F \ + FW_OFLD_CONNECTION_WR_CPLRXDATAACK_V(1U) + +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S 14 +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_M 0x1 +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S) +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S) & \ + FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_M) +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_F \ + FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_V(1U) enum fw_flowc_mnem { FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */ @@ -539,33 +570,56 @@ struct fw_flowc_mnemval { struct fw_flowc_wr { __be32 op_to_nparams; -#define FW_FLOWC_WR_NPARAMS(x) ((x) << 0) __be32 flowid_len16; struct fw_flowc_mnemval mnemval[0]; }; +#define FW_FLOWC_WR_NPARAMS_S 0 +#define FW_FLOWC_WR_NPARAMS_V(x) ((x) << FW_FLOWC_WR_NPARAMS_S) + struct fw_ofld_tx_data_wr { __be32 op_to_immdlen; __be32 flowid_len16; __be32 plen; __be32 tunnel_to_proxy; -#define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19) -#define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18) -#define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17) -#define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16) -#define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15) -#define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14) -#define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10) -#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6) }; +#define FW_OFLD_TX_DATA_WR_TUNNEL_S 19 +#define FW_OFLD_TX_DATA_WR_TUNNEL_V(x) ((x) << FW_OFLD_TX_DATA_WR_TUNNEL_S) + +#define FW_OFLD_TX_DATA_WR_SAVE_S 18 +#define FW_OFLD_TX_DATA_WR_SAVE_V(x) ((x) << FW_OFLD_TX_DATA_WR_SAVE_S) + +#define FW_OFLD_TX_DATA_WR_FLUSH_S 17 +#define FW_OFLD_TX_DATA_WR_FLUSH_V(x) ((x) << FW_OFLD_TX_DATA_WR_FLUSH_S) +#define FW_OFLD_TX_DATA_WR_FLUSH_F FW_OFLD_TX_DATA_WR_FLUSH_V(1U) + +#define FW_OFLD_TX_DATA_WR_URGENT_S 16 +#define FW_OFLD_TX_DATA_WR_URGENT_V(x) ((x) << FW_OFLD_TX_DATA_WR_URGENT_S) + +#define FW_OFLD_TX_DATA_WR_MORE_S 15 +#define FW_OFLD_TX_DATA_WR_MORE_V(x) ((x) << FW_OFLD_TX_DATA_WR_MORE_S) + +#define FW_OFLD_TX_DATA_WR_SHOVE_S 14 +#define FW_OFLD_TX_DATA_WR_SHOVE_V(x) ((x) << FW_OFLD_TX_DATA_WR_SHOVE_S) +#define FW_OFLD_TX_DATA_WR_SHOVE_F FW_OFLD_TX_DATA_WR_SHOVE_V(1U) + +#define FW_OFLD_TX_DATA_WR_ULPMODE_S 10 +#define FW_OFLD_TX_DATA_WR_ULPMODE_V(x) ((x) << FW_OFLD_TX_DATA_WR_ULPMODE_S) + +#define FW_OFLD_TX_DATA_WR_ULPSUBMODE_S 6 +#define FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(x) \ + ((x) << FW_OFLD_TX_DATA_WR_ULPSUBMODE_S) + struct fw_cmd_wr { __be32 op_dma; -#define FW_CMD_WR_DMA (1U << 17) __be32 len16_pkd; __be64 cookie_daddr; }; +#define FW_CMD_WR_DMA_S 17 +#define FW_CMD_WR_DMA_V(x) ((x) << FW_CMD_WR_DMA_S) + struct fw_eth_tx_pkt_vm_wr { __be32 op_immdlen; __be32 equiq_to_len16; @@ -641,18 +695,39 @@ struct fw_cmd_hdr { __be32 lo; }; -#define FW_CMD_OP(x) ((x) << 24) -#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff) -#define FW_CMD_REQUEST (1U << 23) -#define FW_CMD_REQUEST_GET(x) (((x) >> 23) & 0x1) -#define FW_CMD_READ (1U << 22) -#define FW_CMD_WRITE (1U << 21) -#define FW_CMD_EXEC (1U << 20) -#define FW_CMD_RAMASK(x) ((x) << 20) -#define FW_CMD_RETVAL(x) ((x) << 8) -#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff) -#define FW_CMD_LEN16(x) ((x) << 0) -#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) +#define FW_CMD_OP_S 24 +#define FW_CMD_OP_M 0xff +#define FW_CMD_OP_V(x) ((x) << FW_CMD_OP_S) +#define FW_CMD_OP_G(x) (((x) >> FW_CMD_OP_S) & FW_CMD_OP_M) + +#define FW_CMD_REQUEST_S 23 +#define FW_CMD_REQUEST_V(x) ((x) << FW_CMD_REQUEST_S) +#define FW_CMD_REQUEST_F FW_CMD_REQUEST_V(1U) + +#define FW_CMD_READ_S 22 +#define FW_CMD_READ_V(x) ((x) << FW_CMD_READ_S) +#define FW_CMD_READ_F FW_CMD_READ_V(1U) + +#define FW_CMD_WRITE_S 21 +#define FW_CMD_WRITE_V(x) ((x) << FW_CMD_WRITE_S) +#define FW_CMD_WRITE_F FW_CMD_WRITE_V(1U) + +#define FW_CMD_EXEC_S 20 +#define FW_CMD_EXEC_V(x) ((x) << FW_CMD_EXEC_S) +#define FW_CMD_EXEC_F FW_CMD_EXEC_V(1U) + +#define FW_CMD_RAMASK_S 20 +#define FW_CMD_RAMASK_V(x) ((x) << FW_CMD_RAMASK_S) + +#define FW_CMD_RETVAL_S 8 +#define FW_CMD_RETVAL_M 0xff +#define FW_CMD_RETVAL_V(x) ((x) << FW_CMD_RETVAL_S) +#define FW_CMD_RETVAL_G(x) (((x) >> FW_CMD_RETVAL_S) & FW_CMD_RETVAL_M) + +#define FW_CMD_LEN16_S 0 +#define FW_CMD_LEN16_V(x) ((x) << FW_CMD_LEN16_S) + +#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) enum fw_ldst_addrspc { FW_LDST_ADDRSPC_FIRMWARE = 0x0001, @@ -685,7 +760,8 @@ enum fw_ldst_func_mod_index { struct fw_ldst_cmd { __be32 op_to_addrspace; -#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0) +#define FW_LDST_CMD_ADDRSPACE_S 0 +#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S) __be32 cycles_to_len16; union fw_ldst { struct fw_ldst_addrval { @@ -741,15 +817,33 @@ struct fw_ldst_cmd { } u; }; -#define FW_LDST_CMD_MSG(x) ((x) << 31) -#define FW_LDST_CMD_PADDR(x) ((x) << 8) -#define FW_LDST_CMD_MMD(x) ((x) << 0) -#define FW_LDST_CMD_FID(x) ((x) << 15) -#define FW_LDST_CMD_CTL(x) ((x) << 0) -#define FW_LDST_CMD_RPLCPF(x) ((x) << 0) -#define FW_LDST_CMD_LC (1U << 4) -#define FW_LDST_CMD_NACCESS(x) ((x) << 0) -#define FW_LDST_CMD_FN(x) ((x) << 0) +#define FW_LDST_CMD_MSG_S 31 +#define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S) + +#define FW_LDST_CMD_PADDR_S 8 +#define FW_LDST_CMD_PADDR_V(x) ((x) << FW_LDST_CMD_PADDR_S) + +#define FW_LDST_CMD_MMD_S 0 +#define FW_LDST_CMD_MMD_V(x) ((x) << FW_LDST_CMD_MMD_S) + +#define FW_LDST_CMD_FID_S 15 +#define FW_LDST_CMD_FID_V(x) ((x) << FW_LDST_CMD_FID_S) + +#define FW_LDST_CMD_CTL_S 0 +#define FW_LDST_CMD_CTL_V(x) ((x) << FW_LDST_CMD_CTL_S) + +#define FW_LDST_CMD_RPLCPF_S 0 +#define FW_LDST_CMD_RPLCPF_V(x) ((x) << FW_LDST_CMD_RPLCPF_S) + +#define FW_LDST_CMD_LC_S 4 +#define FW_LDST_CMD_LC_V(x) ((x) << FW_LDST_CMD_LC_S) +#define FW_LDST_CMD_LC_F FW_LDST_CMD_LC_V(1U) + +#define FW_LDST_CMD_FN_S 0 +#define FW_LDST_CMD_FN_V(x) ((x) << FW_LDST_CMD_FN_S) + +#define FW_LDST_CMD_NACCESS_S 0 +#define FW_LDST_CMD_NACCESS_V(x) ((x) << FW_LDST_CMD_NACCESS_S) struct fw_reset_cmd { __be32 op_to_write; @@ -758,11 +852,12 @@ struct fw_reset_cmd { __be32 halt_pkd; }; -#define FW_RESET_CMD_HALT_SHIFT 31 -#define FW_RESET_CMD_HALT_MASK 0x1 -#define FW_RESET_CMD_HALT(x) ((x) << FW_RESET_CMD_HALT_SHIFT) -#define FW_RESET_CMD_HALT_GET(x) \ - (((x) >> FW_RESET_CMD_HALT_SHIFT) & FW_RESET_CMD_HALT_MASK) +#define FW_RESET_CMD_HALT_S 31 +#define FW_RESET_CMD_HALT_M 0x1 +#define FW_RESET_CMD_HALT_V(x) ((x) << FW_RESET_CMD_HALT_S) +#define FW_RESET_CMD_HALT_G(x) \ + (((x) >> FW_RESET_CMD_HALT_S) & FW_RESET_CMD_HALT_M) +#define FW_RESET_CMD_HALT_F FW_RESET_CMD_HALT_V(1U) enum fw_hellow_cmd { fw_hello_cmd_stage_os = 0x0 @@ -772,22 +867,42 @@ struct fw_hello_cmd { __be32 op_to_write; __be32 retval_len16; __be32 err_to_clearinit; -#define FW_HELLO_CMD_ERR (1U << 31) -#define FW_HELLO_CMD_INIT (1U << 30) -#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29) -#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28) -#define FW_HELLO_CMD_MBMASTER_MASK 0xfU -#define FW_HELLO_CMD_MBMASTER_SHIFT 24 -#define FW_HELLO_CMD_MBMASTER(x) ((x) << FW_HELLO_CMD_MBMASTER_SHIFT) -#define FW_HELLO_CMD_MBMASTER_GET(x) \ - (((x) >> FW_HELLO_CMD_MBMASTER_SHIFT) & FW_HELLO_CMD_MBMASTER_MASK) -#define FW_HELLO_CMD_MBASYNCNOTINT(x) ((x) << 23) -#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20) -#define FW_HELLO_CMD_STAGE(x) ((x) << 17) -#define FW_HELLO_CMD_CLEARINIT (1U << 16) __be32 fwrev; }; +#define FW_HELLO_CMD_ERR_S 31 +#define FW_HELLO_CMD_ERR_V(x) ((x) << FW_HELLO_CMD_ERR_S) +#define FW_HELLO_CMD_ERR_F FW_HELLO_CMD_ERR_V(1U) + +#define FW_HELLO_CMD_INIT_S 30 +#define FW_HELLO_CMD_INIT_V(x) ((x) << FW_HELLO_CMD_INIT_S) +#define FW_HELLO_CMD_INIT_F FW_HELLO_CMD_INIT_V(1U) + +#define FW_HELLO_CMD_MASTERDIS_S 29 +#define FW_HELLO_CMD_MASTERDIS_V(x) ((x) << FW_HELLO_CMD_MASTERDIS_S) + +#define FW_HELLO_CMD_MASTERFORCE_S 28 +#define FW_HELLO_CMD_MASTERFORCE_V(x) ((x) << FW_HELLO_CMD_MASTERFORCE_S) + +#define FW_HELLO_CMD_MBMASTER_S 24 +#define FW_HELLO_CMD_MBMASTER_M 0xfU +#define FW_HELLO_CMD_MBMASTER_V(x) ((x) << FW_HELLO_CMD_MBMASTER_S) +#define FW_HELLO_CMD_MBMASTER_G(x) \ + (((x) >> FW_HELLO_CMD_MBMASTER_S) & FW_HELLO_CMD_MBMASTER_M) + +#define FW_HELLO_CMD_MBASYNCNOTINT_S 23 +#define FW_HELLO_CMD_MBASYNCNOTINT_V(x) ((x) << FW_HELLO_CMD_MBASYNCNOTINT_S) + +#define FW_HELLO_CMD_MBASYNCNOT_S 20 +#define FW_HELLO_CMD_MBASYNCNOT_V(x) ((x) << FW_HELLO_CMD_MBASYNCNOT_S) + +#define FW_HELLO_CMD_STAGE_S 17 +#define FW_HELLO_CMD_STAGE_V(x) ((x) << FW_HELLO_CMD_STAGE_S) + +#define FW_HELLO_CMD_CLEARINIT_S 16 +#define FW_HELLO_CMD_CLEARINIT_V(x) ((x) << FW_HELLO_CMD_CLEARINIT_S) +#define FW_HELLO_CMD_CLEARINIT_F FW_HELLO_CMD_CLEARINIT_V(1U) + struct fw_bye_cmd { __be32 op_to_write; __be32 retval_len16; @@ -898,9 +1013,17 @@ struct fw_caps_config_cmd { __be32 finicsum; }; -#define FW_CAPS_CONFIG_CMD_CFVALID (1U << 27) -#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) ((x) << 24) -#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) ((x) << 16) +#define FW_CAPS_CONFIG_CMD_CFVALID_S 27 +#define FW_CAPS_CONFIG_CMD_CFVALID_V(x) ((x) << FW_CAPS_CONFIG_CMD_CFVALID_S) +#define FW_CAPS_CONFIG_CMD_CFVALID_F FW_CAPS_CONFIG_CMD_CFVALID_V(1U) + +#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF_S 24 +#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(x) \ + ((x) << FW_CAPS_CONFIG_CMD_MEMTYPE_CF_S) + +#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_S 16 +#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(x) \ + ((x) << FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_S) /* * params command mnemonics @@ -996,20 +1119,29 @@ enum fw_params_param_dmaq { FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13, }; -#define FW_PARAMS_MNEM(x) ((x) << 24) -#define FW_PARAMS_PARAM_X(x) ((x) << 16) -#define FW_PARAMS_PARAM_Y_SHIFT 8 -#define FW_PARAMS_PARAM_Y_MASK 0xffU -#define FW_PARAMS_PARAM_Y(x) ((x) << FW_PARAMS_PARAM_Y_SHIFT) -#define FW_PARAMS_PARAM_Y_GET(x) (((x) >> FW_PARAMS_PARAM_Y_SHIFT) &\ - FW_PARAMS_PARAM_Y_MASK) -#define FW_PARAMS_PARAM_Z_SHIFT 0 -#define FW_PARAMS_PARAM_Z_MASK 0xffu -#define FW_PARAMS_PARAM_Z(x) ((x) << FW_PARAMS_PARAM_Z_SHIFT) -#define FW_PARAMS_PARAM_Z_GET(x) (((x) >> FW_PARAMS_PARAM_Z_SHIFT) &\ - FW_PARAMS_PARAM_Z_MASK) -#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0) -#define FW_PARAMS_PARAM_YZ(x) ((x) << 0) +#define FW_PARAMS_MNEM_S 24 +#define FW_PARAMS_MNEM_V(x) ((x) << FW_PARAMS_MNEM_S) + +#define FW_PARAMS_PARAM_X_S 16 +#define FW_PARAMS_PARAM_X_V(x) ((x) << FW_PARAMS_PARAM_X_S) + +#define FW_PARAMS_PARAM_Y_S 8 +#define FW_PARAMS_PARAM_Y_M 0xffU +#define FW_PARAMS_PARAM_Y_V(x) ((x) << FW_PARAMS_PARAM_Y_S) +#define FW_PARAMS_PARAM_Y_G(x) (((x) >> FW_PARAMS_PARAM_Y_S) &\ + FW_PARAMS_PARAM_Y_M) + +#define FW_PARAMS_PARAM_Z_S 0 +#define FW_PARAMS_PARAM_Z_M 0xffu +#define FW_PARAMS_PARAM_Z_V(x) ((x) << FW_PARAMS_PARAM_Z_S) +#define FW_PARAMS_PARAM_Z_G(x) (((x) >> FW_PARAMS_PARAM_Z_S) &\ + FW_PARAMS_PARAM_Z_M) + +#define FW_PARAMS_PARAM_XYZ_S 0 +#define FW_PARAMS_PARAM_XYZ_V(x) ((x) << FW_PARAMS_PARAM_XYZ_S) + +#define FW_PARAMS_PARAM_YZ_S 0 +#define FW_PARAMS_PARAM_YZ_V(x) ((x) << FW_PARAMS_PARAM_YZ_S) struct fw_params_cmd { __be32 op_to_vfn; @@ -1020,8 +1152,11 @@ struct fw_params_cmd { } param[7]; }; -#define FW_PARAMS_CMD_PFN(x) ((x) << 8) -#define FW_PARAMS_CMD_VFN(x) ((x) << 0) +#define FW_PARAMS_CMD_PFN_S 8 +#define FW_PARAMS_CMD_PFN_V(x) ((x) << FW_PARAMS_CMD_PFN_S) + +#define FW_PARAMS_CMD_VFN_S 0 +#define FW_PARAMS_CMD_VFN_V(x) ((x) << FW_PARAMS_CMD_VFN_S) struct fw_pfvf_cmd { __be32 op_to_vfn; @@ -1035,46 +1170,82 @@ struct fw_pfvf_cmd { __be32 r4; }; -#define FW_PFVF_CMD_PFN(x) ((x) << 8) -#define FW_PFVF_CMD_VFN(x) ((x) << 0) - -#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20) -#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff) - -#define FW_PFVF_CMD_NIQ(x) ((x) << 0) -#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_PFVF_CMD_TYPE (1 << 31) -#define FW_PFVF_CMD_TYPE_GET(x) (((x) >> 31) & 0x1) - -#define FW_PFVF_CMD_CMASK(x) ((x) << 24) -#define FW_PFVF_CMD_CMASK_MASK 0xf -#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & FW_PFVF_CMD_CMASK_MASK) - -#define FW_PFVF_CMD_PMASK(x) ((x) << 20) -#define FW_PFVF_CMD_PMASK_MASK 0xf -#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & FW_PFVF_CMD_PMASK_MASK) - -#define FW_PFVF_CMD_NEQ(x) ((x) << 0) -#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_PFVF_CMD_TC(x) ((x) << 24) -#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff) - -#define FW_PFVF_CMD_NVI(x) ((x) << 16) -#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff) - -#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0) -#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff) - -#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24) -#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff) - -#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16) -#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff) - -#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0) -#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff) +#define FW_PFVF_CMD_PFN_S 8 +#define FW_PFVF_CMD_PFN_V(x) ((x) << FW_PFVF_CMD_PFN_S) + +#define FW_PFVF_CMD_VFN_S 0 +#define FW_PFVF_CMD_VFN_V(x) ((x) << FW_PFVF_CMD_VFN_S) + +#define FW_PFVF_CMD_NIQFLINT_S 20 +#define FW_PFVF_CMD_NIQFLINT_M 0xfff +#define FW_PFVF_CMD_NIQFLINT_V(x) ((x) << FW_PFVF_CMD_NIQFLINT_S) +#define FW_PFVF_CMD_NIQFLINT_G(x) \ + (((x) >> FW_PFVF_CMD_NIQFLINT_S) & FW_PFVF_CMD_NIQFLINT_M) + +#define FW_PFVF_CMD_NIQ_S 0 +#define FW_PFVF_CMD_NIQ_M 0xfffff +#define FW_PFVF_CMD_NIQ_V(x) ((x) << FW_PFVF_CMD_NIQ_S) +#define FW_PFVF_CMD_NIQ_G(x) \ + (((x) >> FW_PFVF_CMD_NIQ_S) & FW_PFVF_CMD_NIQ_M) + +#define FW_PFVF_CMD_TYPE_S 31 +#define FW_PFVF_CMD_TYPE_M 0x1 +#define FW_PFVF_CMD_TYPE_V(x) ((x) << FW_PFVF_CMD_TYPE_S) +#define FW_PFVF_CMD_TYPE_G(x) \ + (((x) >> FW_PFVF_CMD_TYPE_S) & FW_PFVF_CMD_TYPE_M) +#define FW_PFVF_CMD_TYPE_F FW_PFVF_CMD_TYPE_V(1U) + +#define FW_PFVF_CMD_CMASK_S 24 +#define FW_PFVF_CMD_CMASK_M 0xf +#define FW_PFVF_CMD_CMASK_V(x) ((x) << FW_PFVF_CMD_CMASK_S) +#define FW_PFVF_CMD_CMASK_G(x) \ + (((x) >> FW_PFVF_CMD_CMASK_S) & FW_PFVF_CMD_CMASK_M) + +#define FW_PFVF_CMD_PMASK_S 20 +#define FW_PFVF_CMD_PMASK_M 0xf +#define FW_PFVF_CMD_PMASK_V(x) ((x) << FW_PFVF_CMD_PMASK_S) +#define FW_PFVF_CMD_PMASK_G(x) \ + (((x) >> FW_PFVF_CMD_PMASK_S) & FW_PFVF_CMD_PMASK_M) + +#define FW_PFVF_CMD_NEQ_S 0 +#define FW_PFVF_CMD_NEQ_M 0xfffff +#define FW_PFVF_CMD_NEQ_V(x) ((x) << FW_PFVF_CMD_NEQ_S) +#define FW_PFVF_CMD_NEQ_G(x) \ + (((x) >> FW_PFVF_CMD_NEQ_S) & FW_PFVF_CMD_NEQ_M) + +#define FW_PFVF_CMD_TC_S 24 +#define FW_PFVF_CMD_TC_M 0xff +#define FW_PFVF_CMD_TC_V(x) ((x) << FW_PFVF_CMD_TC_S) +#define FW_PFVF_CMD_TC_G(x) (((x) >> FW_PFVF_CMD_TC_S) & FW_PFVF_CMD_TC_M) + +#define FW_PFVF_CMD_NVI_S 16 +#define FW_PFVF_CMD_NVI_M 0xff +#define FW_PFVF_CMD_NVI_V(x) ((x) << FW_PFVF_CMD_NVI_S) +#define FW_PFVF_CMD_NVI_G(x) (((x) >> FW_PFVF_CMD_NVI_S) & FW_PFVF_CMD_NVI_M) + +#define FW_PFVF_CMD_NEXACTF_S 0 +#define FW_PFVF_CMD_NEXACTF_M 0xffff +#define FW_PFVF_CMD_NEXACTF_V(x) ((x) << FW_PFVF_CMD_NEXACTF_S) +#define FW_PFVF_CMD_NEXACTF_G(x) \ + (((x) >> FW_PFVF_CMD_NEXACTF_S) & FW_PFVF_CMD_NEXACTF_M) + +#define FW_PFVF_CMD_R_CAPS_S 24 +#define FW_PFVF_CMD_R_CAPS_M 0xff +#define FW_PFVF_CMD_R_CAPS_V(x) ((x) << FW_PFVF_CMD_R_CAPS_S) +#define FW_PFVF_CMD_R_CAPS_G(x) \ + (((x) >> FW_PFVF_CMD_R_CAPS_S) & FW_PFVF_CMD_R_CAPS_M) + +#define FW_PFVF_CMD_WX_CAPS_S 16 +#define FW_PFVF_CMD_WX_CAPS_M 0xff +#define FW_PFVF_CMD_WX_CAPS_V(x) ((x) << FW_PFVF_CMD_WX_CAPS_S) +#define FW_PFVF_CMD_WX_CAPS_G(x) \ + (((x) >> FW_PFVF_CMD_WX_CAPS_S) & FW_PFVF_CMD_WX_CAPS_M) + +#define FW_PFVF_CMD_NETHCTRL_S 0 +#define FW_PFVF_CMD_NETHCTRL_M 0xffff +#define FW_PFVF_CMD_NETHCTRL_V(x) ((x) << FW_PFVF_CMD_NETHCTRL_S) +#define FW_PFVF_CMD_NETHCTRL_G(x) \ + (((x) >> FW_PFVF_CMD_NETHCTRL_S) & FW_PFVF_CMD_NETHCTRL_M) enum fw_iq_type { FW_IQ_TYPE_FL_INT_CAP, @@ -1102,85 +1273,239 @@ struct fw_iq_cmd { __be64 fl1addr; }; -#define FW_IQ_CMD_PFN(x) ((x) << 8) -#define FW_IQ_CMD_VFN(x) ((x) << 0) - -#define FW_IQ_CMD_ALLOC (1U << 31) -#define FW_IQ_CMD_FREE (1U << 30) -#define FW_IQ_CMD_MODIFY (1U << 29) -#define FW_IQ_CMD_IQSTART(x) ((x) << 28) -#define FW_IQ_CMD_IQSTOP(x) ((x) << 27) - -#define FW_IQ_CMD_TYPE(x) ((x) << 29) -#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28) -#define FW_IQ_CMD_VIID(x) ((x) << 16) -#define FW_IQ_CMD_IQANDST(x) ((x) << 15) -#define FW_IQ_CMD_IQANUS(x) ((x) << 14) -#define FW_IQ_CMD_IQANUD(x) ((x) << 12) -#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0) - -#define FW_IQ_CMD_IQDROPRSS (1U << 15) -#define FW_IQ_CMD_IQGTSMODE (1U << 14) -#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12) -#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11) -#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6) -#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4) -#define FW_IQ_CMD_IQO (1U << 3) -#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2) -#define FW_IQ_CMD_IQESIZE(x) ((x) << 0) - -#define FW_IQ_CMD_IQNS(x) ((x) << 31) -#define FW_IQ_CMD_IQRO(x) ((x) << 30) -#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28) -#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27) -#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26) -#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20) -#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15) -#define FW_IQ_CMD_FL0DBP(x) ((x) << 14) -#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13) -#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12) -#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11) -#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10) -#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9) -#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8) -#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7) -#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6) -#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4) -#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3) -#define FW_IQ_CMD_FL0PADEN(x) ((x) << 2) -#define FW_IQ_CMD_FL0PACKEN(x) ((x) << 1) -#define FW_IQ_CMD_FL0CONGEN (1U << 0) - -#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15) -#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10) -#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7) -#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4) -#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3) -#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0) - -#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20) -#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15) -#define FW_IQ_CMD_FL1DBP(x) ((x) << 14) -#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13) -#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12) -#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11) -#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10) -#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9) -#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8) -#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7) -#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6) -#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4) -#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3) -#define FW_IQ_CMD_FL1PADEN (1U << 2) -#define FW_IQ_CMD_FL1PACKEN (1U << 1) -#define FW_IQ_CMD_FL1CONGEN (1U << 0) - -#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15) -#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10) -#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7) -#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4) -#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3) -#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0) +#define FW_IQ_CMD_PFN_S 8 +#define FW_IQ_CMD_PFN_V(x) ((x) << FW_IQ_CMD_PFN_S) + +#define FW_IQ_CMD_VFN_S 0 +#define FW_IQ_CMD_VFN_V(x) ((x) << FW_IQ_CMD_VFN_S) + +#define FW_IQ_CMD_ALLOC_S 31 +#define FW_IQ_CMD_ALLOC_V(x) ((x) << FW_IQ_CMD_ALLOC_S) +#define FW_IQ_CMD_ALLOC_F FW_IQ_CMD_ALLOC_V(1U) + +#define FW_IQ_CMD_FREE_S 30 +#define FW_IQ_CMD_FREE_V(x) ((x) << FW_IQ_CMD_FREE_S) +#define FW_IQ_CMD_FREE_F FW_IQ_CMD_FREE_V(1U) + +#define FW_IQ_CMD_MODIFY_S 29 +#define FW_IQ_CMD_MODIFY_V(x) ((x) << FW_IQ_CMD_MODIFY_S) +#define FW_IQ_CMD_MODIFY_F FW_IQ_CMD_MODIFY_V(1U) + +#define FW_IQ_CMD_IQSTART_S 28 +#define FW_IQ_CMD_IQSTART_V(x) ((x) << FW_IQ_CMD_IQSTART_S) +#define FW_IQ_CMD_IQSTART_F FW_IQ_CMD_IQSTART_V(1U) + +#define FW_IQ_CMD_IQSTOP_S 27 +#define FW_IQ_CMD_IQSTOP_V(x) ((x) << FW_IQ_CMD_IQSTOP_S) +#define FW_IQ_CMD_IQSTOP_F FW_IQ_CMD_IQSTOP_V(1U) + +#define FW_IQ_CMD_TYPE_S 29 +#define FW_IQ_CMD_TYPE_V(x) ((x) << FW_IQ_CMD_TYPE_S) + +#define FW_IQ_CMD_IQASYNCH_S 28 +#define FW_IQ_CMD_IQASYNCH_V(x) ((x) << FW_IQ_CMD_IQASYNCH_S) + +#define FW_IQ_CMD_VIID_S 16 +#define FW_IQ_CMD_VIID_V(x) ((x) << FW_IQ_CMD_VIID_S) + +#define FW_IQ_CMD_IQANDST_S 15 +#define FW_IQ_CMD_IQANDST_V(x) ((x) << FW_IQ_CMD_IQANDST_S) + +#define FW_IQ_CMD_IQANUS_S 14 +#define FW_IQ_CMD_IQANUS_V(x) ((x) << FW_IQ_CMD_IQANUS_S) + +#define FW_IQ_CMD_IQANUD_S 12 +#define FW_IQ_CMD_IQANUD_V(x) ((x) << FW_IQ_CMD_IQANUD_S) + +#define FW_IQ_CMD_IQANDSTINDEX_S 0 +#define FW_IQ_CMD_IQANDSTINDEX_V(x) ((x) << FW_IQ_CMD_IQANDSTINDEX_S) + +#define FW_IQ_CMD_IQDROPRSS_S 15 +#define FW_IQ_CMD_IQDROPRSS_V(x) ((x) << FW_IQ_CMD_IQDROPRSS_S) +#define FW_IQ_CMD_IQDROPRSS_F FW_IQ_CMD_IQDROPRSS_V(1U) + +#define FW_IQ_CMD_IQGTSMODE_S 14 +#define FW_IQ_CMD_IQGTSMODE_V(x) ((x) << FW_IQ_CMD_IQGTSMODE_S) +#define FW_IQ_CMD_IQGTSMODE_F FW_IQ_CMD_IQGTSMODE_V(1U) + +#define FW_IQ_CMD_IQPCIECH_S 12 +#define FW_IQ_CMD_IQPCIECH_V(x) ((x) << FW_IQ_CMD_IQPCIECH_S) + +#define FW_IQ_CMD_IQDCAEN_S 11 +#define FW_IQ_CMD_IQDCAEN_V(x) ((x) << FW_IQ_CMD_IQDCAEN_S) + +#define FW_IQ_CMD_IQDCACPU_S 6 +#define FW_IQ_CMD_IQDCACPU_V(x) ((x) << FW_IQ_CMD_IQDCACPU_S) + +#define FW_IQ_CMD_IQINTCNTTHRESH_S 4 +#define FW_IQ_CMD_IQINTCNTTHRESH_V(x) ((x) << FW_IQ_CMD_IQINTCNTTHRESH_S) + +#define FW_IQ_CMD_IQO_S 3 +#define FW_IQ_CMD_IQO_V(x) ((x) << FW_IQ_CMD_IQO_S) +#define FW_IQ_CMD_IQO_F FW_IQ_CMD_IQO_V(1U) + +#define FW_IQ_CMD_IQCPRIO_S 2 +#define FW_IQ_CMD_IQCPRIO_V(x) ((x) << FW_IQ_CMD_IQCPRIO_S) + +#define FW_IQ_CMD_IQESIZE_S 0 +#define FW_IQ_CMD_IQESIZE_V(x) ((x) << FW_IQ_CMD_IQESIZE_S) + +#define FW_IQ_CMD_IQNS_S 31 +#define FW_IQ_CMD_IQNS_V(x) ((x) << FW_IQ_CMD_IQNS_S) + +#define FW_IQ_CMD_IQRO_S 30 +#define FW_IQ_CMD_IQRO_V(x) ((x) << FW_IQ_CMD_IQRO_S) + +#define FW_IQ_CMD_IQFLINTIQHSEN_S 28 +#define FW_IQ_CMD_IQFLINTIQHSEN_V(x) ((x) << FW_IQ_CMD_IQFLINTIQHSEN_S) + +#define FW_IQ_CMD_IQFLINTCONGEN_S 27 +#define FW_IQ_CMD_IQFLINTCONGEN_V(x) ((x) << FW_IQ_CMD_IQFLINTCONGEN_S) + +#define FW_IQ_CMD_IQFLINTISCSIC_S 26 +#define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S) + +#define FW_IQ_CMD_FL0CNGCHMAP_S 20 +#define FW_IQ_CMD_FL0CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL0CNGCHMAP_S) + +#define FW_IQ_CMD_FL0CACHELOCK_S 15 +#define FW_IQ_CMD_FL0CACHELOCK_V(x) ((x) << FW_IQ_CMD_FL0CACHELOCK_S) + +#define FW_IQ_CMD_FL0DBP_S 14 +#define FW_IQ_CMD_FL0DBP_V(x) ((x) << FW_IQ_CMD_FL0DBP_S) + +#define FW_IQ_CMD_FL0DATANS_S 13 +#define FW_IQ_CMD_FL0DATANS_V(x) ((x) << FW_IQ_CMD_FL0DATANS_S) + +#define FW_IQ_CMD_FL0DATARO_S 12 +#define FW_IQ_CMD_FL0DATARO_V(x) ((x) << FW_IQ_CMD_FL0DATARO_S) +#define FW_IQ_CMD_FL0DATARO_F FW_IQ_CMD_FL0DATARO_V(1U) + +#define FW_IQ_CMD_FL0CONGCIF_S 11 +#define FW_IQ_CMD_FL0CONGCIF_V(x) ((x) << FW_IQ_CMD_FL0CONGCIF_S) + +#define FW_IQ_CMD_FL0ONCHIP_S 10 +#define FW_IQ_CMD_FL0ONCHIP_V(x) ((x) << FW_IQ_CMD_FL0ONCHIP_S) + +#define FW_IQ_CMD_FL0STATUSPGNS_S 9 +#define FW_IQ_CMD_FL0STATUSPGNS_V(x) ((x) << FW_IQ_CMD_FL0STATUSPGNS_S) + +#define FW_IQ_CMD_FL0STATUSPGRO_S 8 +#define FW_IQ_CMD_FL0STATUSPGRO_V(x) ((x) << FW_IQ_CMD_FL0STATUSPGRO_S) + +#define FW_IQ_CMD_FL0FETCHNS_S 7 +#define FW_IQ_CMD_FL0FETCHNS_V(x) ((x) << FW_IQ_CMD_FL0FETCHNS_S) + +#define FW_IQ_CMD_FL0FETCHRO_S 6 +#define FW_IQ_CMD_FL0FETCHRO_V(x) ((x) << FW_IQ_CMD_FL0FETCHRO_S) +#define FW_IQ_CMD_FL0FETCHRO_F FW_IQ_CMD_FL0FETCHRO_V(1U) + +#define FW_IQ_CMD_FL0HOSTFCMODE_S 4 +#define FW_IQ_CMD_FL0HOSTFCMODE_V(x) ((x) << FW_IQ_CMD_FL0HOSTFCMODE_S) + +#define FW_IQ_CMD_FL0CPRIO_S 3 +#define FW_IQ_CMD_FL0CPRIO_V(x) ((x) << FW_IQ_CMD_FL0CPRIO_S) + +#define FW_IQ_CMD_FL0PADEN_S 2 +#define FW_IQ_CMD_FL0PADEN_V(x) ((x) << FW_IQ_CMD_FL0PADEN_S) +#define FW_IQ_CMD_FL0PADEN_F FW_IQ_CMD_FL0PADEN_V(1U) + +#define FW_IQ_CMD_FL0PACKEN_S 1 +#define FW_IQ_CMD_FL0PACKEN_V(x) ((x) << FW_IQ_CMD_FL0PACKEN_S) +#define FW_IQ_CMD_FL0PACKEN_F FW_IQ_CMD_FL0PACKEN_V(1U) + +#define FW_IQ_CMD_FL0CONGEN_S 0 +#define FW_IQ_CMD_FL0CONGEN_V(x) ((x) << FW_IQ_CMD_FL0CONGEN_S) +#define FW_IQ_CMD_FL0CONGEN_F FW_IQ_CMD_FL0CONGEN_V(1U) + +#define FW_IQ_CMD_FL0DCAEN_S 15 +#define FW_IQ_CMD_FL0DCAEN_V(x) ((x) << FW_IQ_CMD_FL0DCAEN_S) + +#define FW_IQ_CMD_FL0DCACPU_S 10 +#define FW_IQ_CMD_FL0DCACPU_V(x) ((x) << FW_IQ_CMD_FL0DCACPU_S) + +#define FW_IQ_CMD_FL0FBMIN_S 7 +#define FW_IQ_CMD_FL0FBMIN_V(x) ((x) << FW_IQ_CMD_FL0FBMIN_S) + +#define FW_IQ_CMD_FL0FBMAX_S 4 +#define FW_IQ_CMD_FL0FBMAX_V(x) ((x) << FW_IQ_CMD_FL0FBMAX_S) + +#define FW_IQ_CMD_FL0CIDXFTHRESHO_S 3 +#define FW_IQ_CMD_FL0CIDXFTHRESHO_V(x) ((x) << FW_IQ_CMD_FL0CIDXFTHRESHO_S) +#define FW_IQ_CMD_FL0CIDXFTHRESHO_F FW_IQ_CMD_FL0CIDXFTHRESHO_V(1U) + +#define FW_IQ_CMD_FL0CIDXFTHRESH_S 0 +#define FW_IQ_CMD_FL0CIDXFTHRESH_V(x) ((x) << FW_IQ_CMD_FL0CIDXFTHRESH_S) + +#define FW_IQ_CMD_FL1CNGCHMAP_S 20 +#define FW_IQ_CMD_FL1CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL1CNGCHMAP_S) + +#define FW_IQ_CMD_FL1CACHELOCK_S 15 +#define FW_IQ_CMD_FL1CACHELOCK_V(x) ((x) << FW_IQ_CMD_FL1CACHELOCK_S) + +#define FW_IQ_CMD_FL1DBP_S 14 +#define FW_IQ_CMD_FL1DBP_V(x) ((x) << FW_IQ_CMD_FL1DBP_S) + +#define FW_IQ_CMD_FL1DATANS_S 13 +#define FW_IQ_CMD_FL1DATANS_V(x) ((x) << FW_IQ_CMD_FL1DATANS_S) + +#define FW_IQ_CMD_FL1DATARO_S 12 +#define FW_IQ_CMD_FL1DATARO_V(x) ((x) << FW_IQ_CMD_FL1DATARO_S) + +#define FW_IQ_CMD_FL1CONGCIF_S 11 +#define FW_IQ_CMD_FL1CONGCIF_V(x) ((x) << FW_IQ_CMD_FL1CONGCIF_S) + +#define FW_IQ_CMD_FL1ONCHIP_S 10 +#define FW_IQ_CMD_FL1ONCHIP_V(x) ((x) << FW_IQ_CMD_FL1ONCHIP_S) + +#define FW_IQ_CMD_FL1STATUSPGNS_S 9 +#define FW_IQ_CMD_FL1STATUSPGNS_V(x) ((x) << FW_IQ_CMD_FL1STATUSPGNS_S) + +#define FW_IQ_CMD_FL1STATUSPGRO_S 8 +#define FW_IQ_CMD_FL1STATUSPGRO_V(x) ((x) << FW_IQ_CMD_FL1STATUSPGRO_S) + +#define FW_IQ_CMD_FL1FETCHNS_S 7 +#define FW_IQ_CMD_FL1FETCHNS_V(x) ((x) << FW_IQ_CMD_FL1FETCHNS_S) + +#define FW_IQ_CMD_FL1FETCHRO_S 6 +#define FW_IQ_CMD_FL1FETCHRO_V(x) ((x) << FW_IQ_CMD_FL1FETCHRO_S) + +#define FW_IQ_CMD_FL1HOSTFCMODE_S 4 +#define FW_IQ_CMD_FL1HOSTFCMODE_V(x) ((x) << FW_IQ_CMD_FL1HOSTFCMODE_S) + +#define FW_IQ_CMD_FL1CPRIO_S 3 +#define FW_IQ_CMD_FL1CPRIO_V(x) ((x) << FW_IQ_CMD_FL1CPRIO_S) + +#define FW_IQ_CMD_FL1PADEN_S 2 +#define FW_IQ_CMD_FL1PADEN_V(x) ((x) << FW_IQ_CMD_FL1PADEN_S) +#define FW_IQ_CMD_FL1PADEN_F FW_IQ_CMD_FL1PADEN_V(1U) + +#define FW_IQ_CMD_FL1PACKEN_S 1 +#define FW_IQ_CMD_FL1PACKEN_V(x) ((x) << FW_IQ_CMD_FL1PACKEN_S) +#define FW_IQ_CMD_FL1PACKEN_F FW_IQ_CMD_FL1PACKEN_V(1U) + +#define FW_IQ_CMD_FL1CONGEN_S 0 +#define FW_IQ_CMD_FL1CONGEN_V(x) ((x) << FW_IQ_CMD_FL1CONGEN_S) +#define FW_IQ_CMD_FL1CONGEN_F FW_IQ_CMD_FL1CONGEN_V(1U) + +#define FW_IQ_CMD_FL1DCAEN_S 15 +#define FW_IQ_CMD_FL1DCAEN_V(x) ((x) << FW_IQ_CMD_FL1DCAEN_S) + +#define FW_IQ_CMD_FL1DCACPU_S 10 +#define FW_IQ_CMD_FL1DCACPU_V(x) ((x) << FW_IQ_CMD_FL1DCACPU_S) + +#define FW_IQ_CMD_FL1FBMIN_S 7 +#define FW_IQ_CMD_FL1FBMIN_V(x) ((x) << FW_IQ_CMD_FL1FBMIN_S) + +#define FW_IQ_CMD_FL1FBMAX_S 4 +#define FW_IQ_CMD_FL1FBMAX_V(x) ((x) << FW_IQ_CMD_FL1FBMAX_S) + +#define FW_IQ_CMD_FL1CIDXFTHRESHO_S 3 +#define FW_IQ_CMD_FL1CIDXFTHRESHO_V(x) ((x) << FW_IQ_CMD_FL1CIDXFTHRESHO_S) +#define FW_IQ_CMD_FL1CIDXFTHRESHO_F FW_IQ_CMD_FL1CIDXFTHRESHO_V(1U) + +#define FW_IQ_CMD_FL1CIDXFTHRESH_S 0 +#define FW_IQ_CMD_FL1CIDXFTHRESH_V(x) ((x) << FW_IQ_CMD_FL1CIDXFTHRESH_S) struct fw_eq_eth_cmd { __be32 op_to_vfn; @@ -1195,40 +1520,102 @@ struct fw_eq_eth_cmd { __be64 r9; }; -#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8) -#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0) -#define FW_EQ_ETH_CMD_ALLOC (1U << 31) -#define FW_EQ_ETH_CMD_FREE (1U << 30) -#define FW_EQ_ETH_CMD_MODIFY (1U << 29) -#define FW_EQ_ETH_CMD_EQSTART (1U << 28) -#define FW_EQ_ETH_CMD_EQSTOP (1U << 27) - -#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0) -#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) -#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0) -#define FW_EQ_ETH_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26) -#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25) -#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24) -#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23) -#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22) -#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20) -#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19) -#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18) -#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16) -#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0) - -#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31) -#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26) -#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23) -#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20) -#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19) -#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) -#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) - -#define FW_EQ_ETH_CMD_AUTOEQUEQE (1U << 30) -#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) +#define FW_EQ_ETH_CMD_PFN_S 8 +#define FW_EQ_ETH_CMD_PFN_V(x) ((x) << FW_EQ_ETH_CMD_PFN_S) + +#define FW_EQ_ETH_CMD_VFN_S 0 +#define FW_EQ_ETH_CMD_VFN_V(x) ((x) << FW_EQ_ETH_CMD_VFN_S) + +#define FW_EQ_ETH_CMD_ALLOC_S 31 +#define FW_EQ_ETH_CMD_ALLOC_V(x) ((x) << FW_EQ_ETH_CMD_ALLOC_S) +#define FW_EQ_ETH_CMD_ALLOC_F FW_EQ_ETH_CMD_ALLOC_V(1U) + +#define FW_EQ_ETH_CMD_FREE_S 30 +#define FW_EQ_ETH_CMD_FREE_V(x) ((x) << FW_EQ_ETH_CMD_FREE_S) +#define FW_EQ_ETH_CMD_FREE_F FW_EQ_ETH_CMD_FREE_V(1U) + +#define FW_EQ_ETH_CMD_MODIFY_S 29 +#define FW_EQ_ETH_CMD_MODIFY_V(x) ((x) << FW_EQ_ETH_CMD_MODIFY_S) +#define FW_EQ_ETH_CMD_MODIFY_F FW_EQ_ETH_CMD_MODIFY_V(1U) + +#define FW_EQ_ETH_CMD_EQSTART_S 28 +#define FW_EQ_ETH_CMD_EQSTART_V(x) ((x) << FW_EQ_ETH_CMD_EQSTART_S) +#define FW_EQ_ETH_CMD_EQSTART_F FW_EQ_ETH_CMD_EQSTART_V(1U) + +#define FW_EQ_ETH_CMD_EQSTOP_S 27 +#define FW_EQ_ETH_CMD_EQSTOP_V(x) ((x) << FW_EQ_ETH_CMD_EQSTOP_S) +#define FW_EQ_ETH_CMD_EQSTOP_F FW_EQ_ETH_CMD_EQSTOP_V(1U) + +#define FW_EQ_ETH_CMD_EQID_S 0 +#define FW_EQ_ETH_CMD_EQID_M 0xfffff +#define FW_EQ_ETH_CMD_EQID_V(x) ((x) << FW_EQ_ETH_CMD_EQID_S) +#define FW_EQ_ETH_CMD_EQID_G(x) \ + (((x) >> FW_EQ_ETH_CMD_EQID_S) & FW_EQ_ETH_CMD_EQID_M) + +#define FW_EQ_ETH_CMD_PHYSEQID_S 0 +#define FW_EQ_ETH_CMD_PHYSEQID_M 0xfffff +#define FW_EQ_ETH_CMD_PHYSEQID_V(x) ((x) << FW_EQ_ETH_CMD_PHYSEQID_S) +#define FW_EQ_ETH_CMD_PHYSEQID_G(x) \ + (((x) >> FW_EQ_ETH_CMD_PHYSEQID_S) & FW_EQ_ETH_CMD_PHYSEQID_M) + +#define FW_EQ_ETH_CMD_FETCHSZM_S 26 +#define FW_EQ_ETH_CMD_FETCHSZM_V(x) ((x) << FW_EQ_ETH_CMD_FETCHSZM_S) +#define FW_EQ_ETH_CMD_FETCHSZM_F FW_EQ_ETH_CMD_FETCHSZM_V(1U) + +#define FW_EQ_ETH_CMD_STATUSPGNS_S 25 +#define FW_EQ_ETH_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_ETH_CMD_STATUSPGNS_S) + +#define FW_EQ_ETH_CMD_STATUSPGRO_S 24 +#define FW_EQ_ETH_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_ETH_CMD_STATUSPGRO_S) + +#define FW_EQ_ETH_CMD_FETCHNS_S 23 +#define FW_EQ_ETH_CMD_FETCHNS_V(x) ((x) << FW_EQ_ETH_CMD_FETCHNS_S) + +#define FW_EQ_ETH_CMD_FETCHRO_S 22 +#define FW_EQ_ETH_CMD_FETCHRO_V(x) ((x) << FW_EQ_ETH_CMD_FETCHRO_S) + +#define FW_EQ_ETH_CMD_HOSTFCMODE_S 20 +#define FW_EQ_ETH_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_ETH_CMD_HOSTFCMODE_S) + +#define FW_EQ_ETH_CMD_CPRIO_S 19 +#define FW_EQ_ETH_CMD_CPRIO_V(x) ((x) << FW_EQ_ETH_CMD_CPRIO_S) + +#define FW_EQ_ETH_CMD_ONCHIP_S 18 +#define FW_EQ_ETH_CMD_ONCHIP_V(x) ((x) << FW_EQ_ETH_CMD_ONCHIP_S) + +#define FW_EQ_ETH_CMD_PCIECHN_S 16 +#define FW_EQ_ETH_CMD_PCIECHN_V(x) ((x) << FW_EQ_ETH_CMD_PCIECHN_S) + +#define FW_EQ_ETH_CMD_IQID_S 0 +#define FW_EQ_ETH_CMD_IQID_V(x) ((x) << FW_EQ_ETH_CMD_IQID_S) + +#define FW_EQ_ETH_CMD_DCAEN_S 31 +#define FW_EQ_ETH_CMD_DCAEN_V(x) ((x) << FW_EQ_ETH_CMD_DCAEN_S) + +#define FW_EQ_ETH_CMD_DCACPU_S 26 +#define FW_EQ_ETH_CMD_DCACPU_V(x) ((x) << FW_EQ_ETH_CMD_DCACPU_S) + +#define FW_EQ_ETH_CMD_FBMIN_S 23 +#define FW_EQ_ETH_CMD_FBMIN_V(x) ((x) << FW_EQ_ETH_CMD_FBMIN_S) + +#define FW_EQ_ETH_CMD_FBMAX_S 20 +#define FW_EQ_ETH_CMD_FBMAX_V(x) ((x) << FW_EQ_ETH_CMD_FBMAX_S) + +#define FW_EQ_ETH_CMD_CIDXFTHRESHO_S 19 +#define FW_EQ_ETH_CMD_CIDXFTHRESHO_V(x) ((x) << FW_EQ_ETH_CMD_CIDXFTHRESHO_S) + +#define FW_EQ_ETH_CMD_CIDXFTHRESH_S 16 +#define FW_EQ_ETH_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_ETH_CMD_CIDXFTHRESH_S) + +#define FW_EQ_ETH_CMD_EQSIZE_S 0 +#define FW_EQ_ETH_CMD_EQSIZE_V(x) ((x) << FW_EQ_ETH_CMD_EQSIZE_S) + +#define FW_EQ_ETH_CMD_AUTOEQUEQE_S 30 +#define FW_EQ_ETH_CMD_AUTOEQUEQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUEQE_S) +#define FW_EQ_ETH_CMD_AUTOEQUEQE_F FW_EQ_ETH_CMD_AUTOEQUEQE_V(1U) + +#define FW_EQ_ETH_CMD_VIID_S 16 +#define FW_EQ_ETH_CMD_VIID_V(x) ((x) << FW_EQ_ETH_CMD_VIID_S) struct fw_eq_ctrl_cmd { __be32 op_to_vfn; @@ -1240,38 +1627,102 @@ struct fw_eq_ctrl_cmd { __be64 eqaddr; }; -#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8) -#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0) - -#define FW_EQ_CTRL_CMD_ALLOC (1U << 31) -#define FW_EQ_CTRL_CMD_FREE (1U << 30) -#define FW_EQ_CTRL_CMD_MODIFY (1U << 29) -#define FW_EQ_CTRL_CMD_EQSTART (1U << 28) -#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27) - -#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20) -#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0) -#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) -#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26) -#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25) -#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24) -#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23) -#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22) -#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20) -#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19) -#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18) -#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16) -#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0) - -#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31) -#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26) -#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23) -#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20) -#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19) -#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16) -#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0) +#define FW_EQ_CTRL_CMD_PFN_S 8 +#define FW_EQ_CTRL_CMD_PFN_V(x) ((x) << FW_EQ_CTRL_CMD_PFN_S) + +#define FW_EQ_CTRL_CMD_VFN_S 0 +#define FW_EQ_CTRL_CMD_VFN_V(x) ((x) << FW_EQ_CTRL_CMD_VFN_S) + +#define FW_EQ_CTRL_CMD_ALLOC_S 31 +#define FW_EQ_CTRL_CMD_ALLOC_V(x) ((x) << FW_EQ_CTRL_CMD_ALLOC_S) +#define FW_EQ_CTRL_CMD_ALLOC_F FW_EQ_CTRL_CMD_ALLOC_V(1U) + +#define FW_EQ_CTRL_CMD_FREE_S 30 +#define FW_EQ_CTRL_CMD_FREE_V(x) ((x) << FW_EQ_CTRL_CMD_FREE_S) +#define FW_EQ_CTRL_CMD_FREE_F FW_EQ_CTRL_CMD_FREE_V(1U) + +#define FW_EQ_CTRL_CMD_MODIFY_S 29 +#define FW_EQ_CTRL_CMD_MODIFY_V(x) ((x) << FW_EQ_CTRL_CMD_MODIFY_S) +#define FW_EQ_CTRL_CMD_MODIFY_F FW_EQ_CTRL_CMD_MODIFY_V(1U) + +#define FW_EQ_CTRL_CMD_EQSTART_S 28 +#define FW_EQ_CTRL_CMD_EQSTART_V(x) ((x) << FW_EQ_CTRL_CMD_EQSTART_S) +#define FW_EQ_CTRL_CMD_EQSTART_F FW_EQ_CTRL_CMD_EQSTART_V(1U) + +#define FW_EQ_CTRL_CMD_EQSTOP_S 27 +#define FW_EQ_CTRL_CMD_EQSTOP_V(x) ((x) << FW_EQ_CTRL_CMD_EQSTOP_S) +#define FW_EQ_CTRL_CMD_EQSTOP_F FW_EQ_CTRL_CMD_EQSTOP_V(1U) + +#define FW_EQ_CTRL_CMD_CMPLIQID_S 20 +#define FW_EQ_CTRL_CMD_CMPLIQID_V(x) ((x) << FW_EQ_CTRL_CMD_CMPLIQID_S) + +#define FW_EQ_CTRL_CMD_EQID_S 0 +#define FW_EQ_CTRL_CMD_EQID_M 0xfffff +#define FW_EQ_CTRL_CMD_EQID_V(x) ((x) << FW_EQ_CTRL_CMD_EQID_S) +#define FW_EQ_CTRL_CMD_EQID_G(x) \ + (((x) >> FW_EQ_CTRL_CMD_EQID_S) & FW_EQ_CTRL_CMD_EQID_M) + +#define FW_EQ_CTRL_CMD_PHYSEQID_S 0 +#define FW_EQ_CTRL_CMD_PHYSEQID_M 0xfffff +#define FW_EQ_CTRL_CMD_PHYSEQID_G(x) \ + (((x) >> FW_EQ_CTRL_CMD_PHYSEQID_S) & FW_EQ_CTRL_CMD_PHYSEQID_M) + +#define FW_EQ_CTRL_CMD_FETCHSZM_S 26 +#define FW_EQ_CTRL_CMD_FETCHSZM_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHSZM_S) +#define FW_EQ_CTRL_CMD_FETCHSZM_F FW_EQ_CTRL_CMD_FETCHSZM_V(1U) + +#define FW_EQ_CTRL_CMD_STATUSPGNS_S 25 +#define FW_EQ_CTRL_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_CTRL_CMD_STATUSPGNS_S) +#define FW_EQ_CTRL_CMD_STATUSPGNS_F FW_EQ_CTRL_CMD_STATUSPGNS_V(1U) + +#define FW_EQ_CTRL_CMD_STATUSPGRO_S 24 +#define FW_EQ_CTRL_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_CTRL_CMD_STATUSPGRO_S) +#define FW_EQ_CTRL_CMD_STATUSPGRO_F FW_EQ_CTRL_CMD_STATUSPGRO_V(1U) + +#define FW_EQ_CTRL_CMD_FETCHNS_S 23 +#define FW_EQ_CTRL_CMD_FETCHNS_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHNS_S) +#define FW_EQ_CTRL_CMD_FETCHNS_F FW_EQ_CTRL_CMD_FETCHNS_V(1U) + +#define FW_EQ_CTRL_CMD_FETCHRO_S 22 +#define FW_EQ_CTRL_CMD_FETCHRO_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHRO_S) +#define FW_EQ_CTRL_CMD_FETCHRO_F FW_EQ_CTRL_CMD_FETCHRO_V(1U) + +#define FW_EQ_CTRL_CMD_HOSTFCMODE_S 20 +#define FW_EQ_CTRL_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_CTRL_CMD_HOSTFCMODE_S) + +#define FW_EQ_CTRL_CMD_CPRIO_S 19 +#define FW_EQ_CTRL_CMD_CPRIO_V(x) ((x) << FW_EQ_CTRL_CMD_CPRIO_S) + +#define FW_EQ_CTRL_CMD_ONCHIP_S 18 +#define FW_EQ_CTRL_CMD_ONCHIP_V(x) ((x) << FW_EQ_CTRL_CMD_ONCHIP_S) + +#define FW_EQ_CTRL_CMD_PCIECHN_S 16 +#define FW_EQ_CTRL_CMD_PCIECHN_V(x) ((x) << FW_EQ_CTRL_CMD_PCIECHN_S) + +#define FW_EQ_CTRL_CMD_IQID_S 0 +#define FW_EQ_CTRL_CMD_IQID_V(x) ((x) << FW_EQ_CTRL_CMD_IQID_S) + +#define FW_EQ_CTRL_CMD_DCAEN_S 31 +#define FW_EQ_CTRL_CMD_DCAEN_V(x) ((x) << FW_EQ_CTRL_CMD_DCAEN_S) + +#define FW_EQ_CTRL_CMD_DCACPU_S 26 +#define FW_EQ_CTRL_CMD_DCACPU_V(x) ((x) << FW_EQ_CTRL_CMD_DCACPU_S) + +#define FW_EQ_CTRL_CMD_FBMIN_S 23 +#define FW_EQ_CTRL_CMD_FBMIN_V(x) ((x) << FW_EQ_CTRL_CMD_FBMIN_S) + +#define FW_EQ_CTRL_CMD_FBMAX_S 20 +#define FW_EQ_CTRL_CMD_FBMAX_V(x) ((x) << FW_EQ_CTRL_CMD_FBMAX_S) + +#define FW_EQ_CTRL_CMD_CIDXFTHRESHO_S 19 +#define FW_EQ_CTRL_CMD_CIDXFTHRESHO_V(x) \ + ((x) << FW_EQ_CTRL_CMD_CIDXFTHRESHO_S) + +#define FW_EQ_CTRL_CMD_CIDXFTHRESH_S 16 +#define FW_EQ_CTRL_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_CTRL_CMD_CIDXFTHRESH_S) + +#define FW_EQ_CTRL_CMD_EQSIZE_S 0 +#define FW_EQ_CTRL_CMD_EQSIZE_V(x) ((x) << FW_EQ_CTRL_CMD_EQSIZE_S) struct fw_eq_ofld_cmd { __be32 op_to_vfn; @@ -1283,45 +1734,112 @@ struct fw_eq_ofld_cmd { __be64 eqaddr; }; -#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8) -#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0) - -#define FW_EQ_OFLD_CMD_ALLOC (1U << 31) -#define FW_EQ_OFLD_CMD_FREE (1U << 30) -#define FW_EQ_OFLD_CMD_MODIFY (1U << 29) -#define FW_EQ_OFLD_CMD_EQSTART (1U << 28) -#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27) - -#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0) -#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) -#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26) -#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25) -#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24) -#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23) -#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22) -#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20) -#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19) -#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18) -#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16) -#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0) - -#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31) -#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26) -#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23) -#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20) -#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19) -#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16) -#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0) +#define FW_EQ_OFLD_CMD_PFN_S 8 +#define FW_EQ_OFLD_CMD_PFN_V(x) ((x) << FW_EQ_OFLD_CMD_PFN_S) + +#define FW_EQ_OFLD_CMD_VFN_S 0 +#define FW_EQ_OFLD_CMD_VFN_V(x) ((x) << FW_EQ_OFLD_CMD_VFN_S) + +#define FW_EQ_OFLD_CMD_ALLOC_S 31 +#define FW_EQ_OFLD_CMD_ALLOC_V(x) ((x) << FW_EQ_OFLD_CMD_ALLOC_S) +#define FW_EQ_OFLD_CMD_ALLOC_F FW_EQ_OFLD_CMD_ALLOC_V(1U) + +#define FW_EQ_OFLD_CMD_FREE_S 30 +#define FW_EQ_OFLD_CMD_FREE_V(x) ((x) << FW_EQ_OFLD_CMD_FREE_S) +#define FW_EQ_OFLD_CMD_FREE_F FW_EQ_OFLD_CMD_FREE_V(1U) + +#define FW_EQ_OFLD_CMD_MODIFY_S 29 +#define FW_EQ_OFLD_CMD_MODIFY_V(x) ((x) << FW_EQ_OFLD_CMD_MODIFY_S) +#define FW_EQ_OFLD_CMD_MODIFY_F FW_EQ_OFLD_CMD_MODIFY_V(1U) + +#define FW_EQ_OFLD_CMD_EQSTART_S 28 +#define FW_EQ_OFLD_CMD_EQSTART_V(x) ((x) << FW_EQ_OFLD_CMD_EQSTART_S) +#define FW_EQ_OFLD_CMD_EQSTART_F FW_EQ_OFLD_CMD_EQSTART_V(1U) + +#define FW_EQ_OFLD_CMD_EQSTOP_S 27 +#define FW_EQ_OFLD_CMD_EQSTOP_V(x) ((x) << FW_EQ_OFLD_CMD_EQSTOP_S) +#define FW_EQ_OFLD_CMD_EQSTOP_F FW_EQ_OFLD_CMD_EQSTOP_V(1U) + +#define FW_EQ_OFLD_CMD_EQID_S 0 +#define FW_EQ_OFLD_CMD_EQID_M 0xfffff +#define FW_EQ_OFLD_CMD_EQID_V(x) ((x) << FW_EQ_OFLD_CMD_EQID_S) +#define FW_EQ_OFLD_CMD_EQID_G(x) \ + (((x) >> FW_EQ_OFLD_CMD_EQID_S) & FW_EQ_OFLD_CMD_EQID_M) + +#define FW_EQ_OFLD_CMD_PHYSEQID_S 0 +#define FW_EQ_OFLD_CMD_PHYSEQID_M 0xfffff +#define FW_EQ_OFLD_CMD_PHYSEQID_G(x) \ + (((x) >> FW_EQ_OFLD_CMD_PHYSEQID_S) & FW_EQ_OFLD_CMD_PHYSEQID_M) + +#define FW_EQ_OFLD_CMD_FETCHSZM_S 26 +#define FW_EQ_OFLD_CMD_FETCHSZM_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHSZM_S) + +#define FW_EQ_OFLD_CMD_STATUSPGNS_S 25 +#define FW_EQ_OFLD_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_OFLD_CMD_STATUSPGNS_S) + +#define FW_EQ_OFLD_CMD_STATUSPGRO_S 24 +#define FW_EQ_OFLD_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_OFLD_CMD_STATUSPGRO_S) + +#define FW_EQ_OFLD_CMD_FETCHNS_S 23 +#define FW_EQ_OFLD_CMD_FETCHNS_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHNS_S) + +#define FW_EQ_OFLD_CMD_FETCHRO_S 22 +#define FW_EQ_OFLD_CMD_FETCHRO_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHRO_S) +#define FW_EQ_OFLD_CMD_FETCHRO_F FW_EQ_OFLD_CMD_FETCHRO_V(1U) + +#define FW_EQ_OFLD_CMD_HOSTFCMODE_S 20 +#define FW_EQ_OFLD_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_OFLD_CMD_HOSTFCMODE_S) + +#define FW_EQ_OFLD_CMD_CPRIO_S 19 +#define FW_EQ_OFLD_CMD_CPRIO_V(x) ((x) << FW_EQ_OFLD_CMD_CPRIO_S) + +#define FW_EQ_OFLD_CMD_ONCHIP_S 18 +#define FW_EQ_OFLD_CMD_ONCHIP_V(x) ((x) << FW_EQ_OFLD_CMD_ONCHIP_S) + +#define FW_EQ_OFLD_CMD_PCIECHN_S 16 +#define FW_EQ_OFLD_CMD_PCIECHN_V(x) ((x) << FW_EQ_OFLD_CMD_PCIECHN_S) + +#define FW_EQ_OFLD_CMD_IQID_S 0 +#define FW_EQ_OFLD_CMD_IQID_V(x) ((x) << FW_EQ_OFLD_CMD_IQID_S) + +#define FW_EQ_OFLD_CMD_DCAEN_S 31 +#define FW_EQ_OFLD_CMD_DCAEN_V(x) ((x) << FW_EQ_OFLD_CMD_DCAEN_S) + +#define FW_EQ_OFLD_CMD_DCACPU_S 26 +#define FW_EQ_OFLD_CMD_DCACPU_V(x) ((x) << FW_EQ_OFLD_CMD_DCACPU_S) + +#define FW_EQ_OFLD_CMD_FBMIN_S 23 +#define FW_EQ_OFLD_CMD_FBMIN_V(x) ((x) << FW_EQ_OFLD_CMD_FBMIN_S) + +#define FW_EQ_OFLD_CMD_FBMAX_S 20 +#define FW_EQ_OFLD_CMD_FBMAX_V(x) ((x) << FW_EQ_OFLD_CMD_FBMAX_S) + +#define FW_EQ_OFLD_CMD_CIDXFTHRESHO_S 19 +#define FW_EQ_OFLD_CMD_CIDXFTHRESHO_V(x) \ + ((x) << FW_EQ_OFLD_CMD_CIDXFTHRESHO_S) + +#define FW_EQ_OFLD_CMD_CIDXFTHRESH_S 16 +#define FW_EQ_OFLD_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_OFLD_CMD_CIDXFTHRESH_S) + +#define FW_EQ_OFLD_CMD_EQSIZE_S 0 +#define FW_EQ_OFLD_CMD_EQSIZE_V(x) ((x) << FW_EQ_OFLD_CMD_EQSIZE_S) /* * Macros for VIID parsing: * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number */ -#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7) -#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1) -#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F) + +#define FW_VIID_PFN_S 8 +#define FW_VIID_PFN_M 0x7 +#define FW_VIID_PFN_G(x) (((x) >> FW_VIID_PFN_S) & FW_VIID_PFN_M) + +#define FW_VIID_VIVLD_S 7 +#define FW_VIID_VIVLD_M 0x1 +#define FW_VIID_VIVLD_G(x) (((x) >> FW_VIID_VIVLD_S) & FW_VIID_VIVLD_M) + +#define FW_VIID_VIN_S 0 +#define FW_VIID_VIN_M 0x7F +#define FW_VIID_VIN_G(x) (((x) >> FW_VIID_VIN_S) & FW_VIID_VIN_M) struct fw_vi_cmd { __be32 op_to_vfn; @@ -1341,15 +1859,35 @@ struct fw_vi_cmd { __be64 r10; }; -#define FW_VI_CMD_PFN(x) ((x) << 8) -#define FW_VI_CMD_VFN(x) ((x) << 0) -#define FW_VI_CMD_ALLOC (1U << 31) -#define FW_VI_CMD_FREE (1U << 30) -#define FW_VI_CMD_VIID(x) ((x) << 0) -#define FW_VI_CMD_VIID_GET(x) ((x) & 0xfff) -#define FW_VI_CMD_PORTID(x) ((x) << 4) -#define FW_VI_CMD_PORTID_GET(x) (((x) >> 4) & 0xf) -#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff) +#define FW_VI_CMD_PFN_S 8 +#define FW_VI_CMD_PFN_V(x) ((x) << FW_VI_CMD_PFN_S) + +#define FW_VI_CMD_VFN_S 0 +#define FW_VI_CMD_VFN_V(x) ((x) << FW_VI_CMD_VFN_S) + +#define FW_VI_CMD_ALLOC_S 31 +#define FW_VI_CMD_ALLOC_V(x) ((x) << FW_VI_CMD_ALLOC_S) +#define FW_VI_CMD_ALLOC_F FW_VI_CMD_ALLOC_V(1U) + +#define FW_VI_CMD_FREE_S 30 +#define FW_VI_CMD_FREE_V(x) ((x) << FW_VI_CMD_FREE_S) +#define FW_VI_CMD_FREE_F FW_VI_CMD_FREE_V(1U) + +#define FW_VI_CMD_VIID_S 0 +#define FW_VI_CMD_VIID_M 0xfff +#define FW_VI_CMD_VIID_V(x) ((x) << FW_VI_CMD_VIID_S) +#define FW_VI_CMD_VIID_G(x) (((x) >> FW_VI_CMD_VIID_S) & FW_VI_CMD_VIID_M) + +#define FW_VI_CMD_PORTID_S 4 +#define FW_VI_CMD_PORTID_M 0xf +#define FW_VI_CMD_PORTID_V(x) ((x) << FW_VI_CMD_PORTID_S) +#define FW_VI_CMD_PORTID_G(x) \ + (((x) >> FW_VI_CMD_PORTID_S) & FW_VI_CMD_PORTID_M) + +#define FW_VI_CMD_RSSSIZE_S 0 +#define FW_VI_CMD_RSSSIZE_M 0x7ff +#define FW_VI_CMD_RSSSIZE_G(x) \ + (((x) >> FW_VI_CMD_RSSSIZE_S) & FW_VI_CMD_RSSSIZE_M) /* Special VI_MAC command index ids */ #define FW_VI_MAC_ADD_MAC 0x3FF @@ -1385,16 +1923,37 @@ struct fw_vi_mac_cmd { } u; }; -#define FW_VI_MAC_CMD_VIID(x) ((x) << 0) -#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31) -#define FW_VI_MAC_CMD_HASHVECEN (1U << 23) -#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22) -#define FW_VI_MAC_CMD_VALID (1U << 15) -#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12) -#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10) -#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3) -#define FW_VI_MAC_CMD_IDX(x) ((x) << 0) -#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff) +#define FW_VI_MAC_CMD_VIID_S 0 +#define FW_VI_MAC_CMD_VIID_V(x) ((x) << FW_VI_MAC_CMD_VIID_S) + +#define FW_VI_MAC_CMD_FREEMACS_S 31 +#define FW_VI_MAC_CMD_FREEMACS_V(x) ((x) << FW_VI_MAC_CMD_FREEMACS_S) + +#define FW_VI_MAC_CMD_HASHVECEN_S 23 +#define FW_VI_MAC_CMD_HASHVECEN_V(x) ((x) << FW_VI_MAC_CMD_HASHVECEN_S) +#define FW_VI_MAC_CMD_HASHVECEN_F FW_VI_MAC_CMD_HASHVECEN_V(1U) + +#define FW_VI_MAC_CMD_HASHUNIEN_S 22 +#define FW_VI_MAC_CMD_HASHUNIEN_V(x) ((x) << FW_VI_MAC_CMD_HASHUNIEN_S) + +#define FW_VI_MAC_CMD_VALID_S 15 +#define FW_VI_MAC_CMD_VALID_V(x) ((x) << FW_VI_MAC_CMD_VALID_S) +#define FW_VI_MAC_CMD_VALID_F FW_VI_MAC_CMD_VALID_V(1U) + +#define FW_VI_MAC_CMD_PRIO_S 12 +#define FW_VI_MAC_CMD_PRIO_V(x) ((x) << FW_VI_MAC_CMD_PRIO_S) + +#define FW_VI_MAC_CMD_SMAC_RESULT_S 10 +#define FW_VI_MAC_CMD_SMAC_RESULT_M 0x3 +#define FW_VI_MAC_CMD_SMAC_RESULT_V(x) ((x) << FW_VI_MAC_CMD_SMAC_RESULT_S) +#define FW_VI_MAC_CMD_SMAC_RESULT_G(x) \ + (((x) >> FW_VI_MAC_CMD_SMAC_RESULT_S) & FW_VI_MAC_CMD_SMAC_RESULT_M) + +#define FW_VI_MAC_CMD_IDX_S 0 +#define FW_VI_MAC_CMD_IDX_M 0x3ff +#define FW_VI_MAC_CMD_IDX_V(x) ((x) << FW_VI_MAC_CMD_IDX_S) +#define FW_VI_MAC_CMD_IDX_G(x) \ + (((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M) #define FW_RXMODE_MTU_NO_CHG 65535 @@ -1405,17 +1964,30 @@ struct fw_vi_rxmode_cmd { __be32 r4_lo; }; -#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0) -#define FW_VI_RXMODE_CMD_MTU_MASK 0xffff -#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16) -#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14) -#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12) -#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10) -#define FW_VI_RXMODE_CMD_VLANEXEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << 8) +#define FW_VI_RXMODE_CMD_VIID_S 0 +#define FW_VI_RXMODE_CMD_VIID_V(x) ((x) << FW_VI_RXMODE_CMD_VIID_S) + +#define FW_VI_RXMODE_CMD_MTU_S 16 +#define FW_VI_RXMODE_CMD_MTU_M 0xffff +#define FW_VI_RXMODE_CMD_MTU_V(x) ((x) << FW_VI_RXMODE_CMD_MTU_S) + +#define FW_VI_RXMODE_CMD_PROMISCEN_S 14 +#define FW_VI_RXMODE_CMD_PROMISCEN_M 0x3 +#define FW_VI_RXMODE_CMD_PROMISCEN_V(x) ((x) << FW_VI_RXMODE_CMD_PROMISCEN_S) + +#define FW_VI_RXMODE_CMD_ALLMULTIEN_S 12 +#define FW_VI_RXMODE_CMD_ALLMULTIEN_M 0x3 +#define FW_VI_RXMODE_CMD_ALLMULTIEN_V(x) \ + ((x) << FW_VI_RXMODE_CMD_ALLMULTIEN_S) + +#define FW_VI_RXMODE_CMD_BROADCASTEN_S 10 +#define FW_VI_RXMODE_CMD_BROADCASTEN_M 0x3 +#define FW_VI_RXMODE_CMD_BROADCASTEN_V(x) \ + ((x) << FW_VI_RXMODE_CMD_BROADCASTEN_S) + +#define FW_VI_RXMODE_CMD_VLANEXEN_S 8 +#define FW_VI_RXMODE_CMD_VLANEXEN_M 0x3 +#define FW_VI_RXMODE_CMD_VLANEXEN_V(x) ((x) << FW_VI_RXMODE_CMD_VLANEXEN_S) struct fw_vi_enable_cmd { __be32 op_to_viid; @@ -1425,11 +1997,21 @@ struct fw_vi_enable_cmd { __be32 r4; }; -#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0) -#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31) -#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30) -#define FW_VI_ENABLE_CMD_DCB_INFO(x) ((x) << 28) -#define FW_VI_ENABLE_CMD_LED (1U << 29) +#define FW_VI_ENABLE_CMD_VIID_S 0 +#define FW_VI_ENABLE_CMD_VIID_V(x) ((x) << FW_VI_ENABLE_CMD_VIID_S) + +#define FW_VI_ENABLE_CMD_IEN_S 31 +#define FW_VI_ENABLE_CMD_IEN_V(x) ((x) << FW_VI_ENABLE_CMD_IEN_S) + +#define FW_VI_ENABLE_CMD_EEN_S 30 +#define FW_VI_ENABLE_CMD_EEN_V(x) ((x) << FW_VI_ENABLE_CMD_EEN_S) + +#define FW_VI_ENABLE_CMD_LED_S 29 +#define FW_VI_ENABLE_CMD_LED_V(x) ((x) << FW_VI_ENABLE_CMD_LED_S) +#define FW_VI_ENABLE_CMD_LED_F FW_VI_ENABLE_CMD_LED_V(1U) + +#define FW_VI_ENABLE_CMD_DCB_INFO_S 28 +#define FW_VI_ENABLE_CMD_DCB_INFO_V(x) ((x) << FW_VI_ENABLE_CMD_DCB_INFO_S) /* VI VF stats offset definitions */ #define VI_VF_NUM_STATS 16 @@ -1529,9 +2111,14 @@ struct fw_vi_stats_cmd { } u; }; -#define FW_VI_STATS_CMD_VIID(x) ((x) << 0) -#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12) -#define FW_VI_STATS_CMD_IX(x) ((x) << 0) +#define FW_VI_STATS_CMD_VIID_S 0 +#define FW_VI_STATS_CMD_VIID_V(x) ((x) << FW_VI_STATS_CMD_VIID_S) + +#define FW_VI_STATS_CMD_NSTATS_S 12 +#define FW_VI_STATS_CMD_NSTATS_V(x) ((x) << FW_VI_STATS_CMD_NSTATS_S) + +#define FW_VI_STATS_CMD_IX_S 0 +#define FW_VI_STATS_CMD_IX_V(x) ((x) << FW_VI_STATS_CMD_IX_S) struct fw_acl_mac_cmd { __be32 op_to_vfn; @@ -1548,9 +2135,14 @@ struct fw_acl_mac_cmd { u8 macaddr3[6]; }; -#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8) -#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0) -#define FW_ACL_MAC_CMD_EN(x) ((x) << 31) +#define FW_ACL_MAC_CMD_PFN_S 8 +#define FW_ACL_MAC_CMD_PFN_V(x) ((x) << FW_ACL_MAC_CMD_PFN_S) + +#define FW_ACL_MAC_CMD_VFN_S 0 +#define FW_ACL_MAC_CMD_VFN_V(x) ((x) << FW_ACL_MAC_CMD_VFN_S) + +#define FW_ACL_MAC_CMD_EN_S 31 +#define FW_ACL_MAC_CMD_EN_V(x) ((x) << FW_ACL_MAC_CMD_EN_S) struct fw_acl_vlan_cmd { __be32 op_to_vfn; @@ -1561,11 +2153,20 @@ struct fw_acl_vlan_cmd { __be16 vlanid[16]; }; -#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8) -#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0) -#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31) -#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7) -#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6) +#define FW_ACL_VLAN_CMD_PFN_S 8 +#define FW_ACL_VLAN_CMD_PFN_V(x) ((x) << FW_ACL_VLAN_CMD_PFN_S) + +#define FW_ACL_VLAN_CMD_VFN_S 0 +#define FW_ACL_VLAN_CMD_VFN_V(x) ((x) << FW_ACL_VLAN_CMD_VFN_S) + +#define FW_ACL_VLAN_CMD_EN_S 31 +#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S) + +#define FW_ACL_VLAN_CMD_DROPNOVLAN_S 7 +#define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x) ((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S) + +#define FW_ACL_VLAN_CMD_FM_S 6 +#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S) enum fw_port_cap { FW_PORT_CAP_SPEED_100M = 0x0001, @@ -1587,13 +2188,14 @@ enum fw_port_cap { }; enum fw_port_mdi { - FW_PORT_MDI_UNCHANGED, - FW_PORT_MDI_AUTO, - FW_PORT_MDI_F_STRAIGHT, - FW_PORT_MDI_F_CROSSOVER + FW_PORT_CAP_MDI_UNCHANGED, + FW_PORT_CAP_MDI_AUTO, + FW_PORT_CAP_MDI_F_STRAIGHT, + FW_PORT_CAP_MDI_F_CROSSOVER }; -#define FW_PORT_MDI(x) ((x) << 9) +#define FW_PORT_CAP_MDI_S 9 +#define FW_PORT_CAP_MDI_V(x) ((x) << FW_PORT_CAP_MDI_S) enum fw_port_action { FW_PORT_ACTION_L1_CFG = 0x0001, @@ -1753,52 +2355,105 @@ struct fw_port_cmd { } u; }; -#define FW_PORT_CMD_READ (1U << 22) - -#define FW_PORT_CMD_PORTID(x) ((x) << 0) -#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) - -#define FW_PORT_CMD_ACTION(x) ((x) << 16) -#define FW_PORT_CMD_ACTION_GET(x) (((x) >> 16) & 0xffff) - -#define FW_PORT_CMD_CTLBF(x) ((x) << 10) -#define FW_PORT_CMD_OVLAN3(x) ((x) << 7) -#define FW_PORT_CMD_OVLAN2(x) ((x) << 6) -#define FW_PORT_CMD_OVLAN1(x) ((x) << 5) -#define FW_PORT_CMD_OVLAN0(x) ((x) << 4) -#define FW_PORT_CMD_IVLAN0(x) ((x) << 3) - -#define FW_PORT_CMD_TXIPG(x) ((x) << 19) - -#define FW_PORT_CMD_LSTATUS (1U << 31) -#define FW_PORT_CMD_LSTATUS_GET(x) (((x) >> 31) & 0x1) -#define FW_PORT_CMD_LSPEED(x) ((x) << 24) -#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f) -#define FW_PORT_CMD_TXPAUSE (1U << 23) -#define FW_PORT_CMD_RXPAUSE (1U << 22) -#define FW_PORT_CMD_MDIOCAP (1U << 21) -#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f) -#define FW_PORT_CMD_LPTXPAUSE (1U << 15) -#define FW_PORT_CMD_LPRXPAUSE (1U << 14) -#define FW_PORT_CMD_PTYPE_MASK 0x1f -#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK) -#define FW_PORT_CMD_MODTYPE_MASK 0x1f -#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK) - -#define FW_PORT_CMD_DCBXDIS (1U << 7) -#define FW_PORT_CMD_APPLY (1U << 7) -#define FW_PORT_CMD_ALL_SYNCD (1U << 7) -#define FW_PORT_CMD_DCB_VERSION_GET(x) (((x) >> 8) & 0xf) - -#define FW_PORT_CMD_PPPEN(x) ((x) << 31) -#define FW_PORT_CMD_TPSRC(x) ((x) << 28) -#define FW_PORT_CMD_NCSISRC(x) ((x) << 24) - -#define FW_PORT_CMD_CH0(x) ((x) << 20) -#define FW_PORT_CMD_CH1(x) ((x) << 16) -#define FW_PORT_CMD_CH2(x) ((x) << 12) -#define FW_PORT_CMD_CH3(x) ((x) << 8) -#define FW_PORT_CMD_NCSICH(x) ((x) << 4) +#define FW_PORT_CMD_READ_S 22 +#define FW_PORT_CMD_READ_V(x) ((x) << FW_PORT_CMD_READ_S) +#define FW_PORT_CMD_READ_F FW_PORT_CMD_READ_V(1U) + +#define FW_PORT_CMD_PORTID_S 0 +#define FW_PORT_CMD_PORTID_M 0xf +#define FW_PORT_CMD_PORTID_V(x) ((x) << FW_PORT_CMD_PORTID_S) +#define FW_PORT_CMD_PORTID_G(x) \ + (((x) >> FW_PORT_CMD_PORTID_S) & FW_PORT_CMD_PORTID_M) + +#define FW_PORT_CMD_ACTION_S 16 +#define FW_PORT_CMD_ACTION_M 0xffff +#define FW_PORT_CMD_ACTION_V(x) ((x) << FW_PORT_CMD_ACTION_S) +#define FW_PORT_CMD_ACTION_G(x) \ + (((x) >> FW_PORT_CMD_ACTION_S) & FW_PORT_CMD_ACTION_M) + +#define FW_PORT_CMD_OVLAN3_S 7 +#define FW_PORT_CMD_OVLAN3_V(x) ((x) << FW_PORT_CMD_OVLAN3_S) + +#define FW_PORT_CMD_OVLAN2_S 6 +#define FW_PORT_CMD_OVLAN2_V(x) ((x) << FW_PORT_CMD_OVLAN2_S) + +#define FW_PORT_CMD_OVLAN1_S 5 +#define FW_PORT_CMD_OVLAN1_V(x) ((x) << FW_PORT_CMD_OVLAN1_S) + +#define FW_PORT_CMD_OVLAN0_S 4 +#define FW_PORT_CMD_OVLAN0_V(x) ((x) << FW_PORT_CMD_OVLAN0_S) + +#define FW_PORT_CMD_IVLAN0_S 3 +#define FW_PORT_CMD_IVLAN0_V(x) ((x) << FW_PORT_CMD_IVLAN0_S) + +#define FW_PORT_CMD_TXIPG_S 3 +#define FW_PORT_CMD_TXIPG_V(x) ((x) << FW_PORT_CMD_TXIPG_S) + +#define FW_PORT_CMD_LSTATUS_S 31 +#define FW_PORT_CMD_LSTATUS_M 0x1 +#define FW_PORT_CMD_LSTATUS_V(x) ((x) << FW_PORT_CMD_LSTATUS_S) +#define FW_PORT_CMD_LSTATUS_G(x) \ + (((x) >> FW_PORT_CMD_LSTATUS_S) & FW_PORT_CMD_LSTATUS_M) +#define FW_PORT_CMD_LSTATUS_F FW_PORT_CMD_LSTATUS_V(1U) + +#define FW_PORT_CMD_LSPEED_S 24 +#define FW_PORT_CMD_LSPEED_M 0x3f +#define FW_PORT_CMD_LSPEED_V(x) ((x) << FW_PORT_CMD_LSPEED_S) +#define FW_PORT_CMD_LSPEED_G(x) \ + (((x) >> FW_PORT_CMD_LSPEED_S) & FW_PORT_CMD_LSPEED_M) + +#define FW_PORT_CMD_TXPAUSE_S 23 +#define FW_PORT_CMD_TXPAUSE_V(x) ((x) << FW_PORT_CMD_TXPAUSE_S) +#define FW_PORT_CMD_TXPAUSE_F FW_PORT_CMD_TXPAUSE_V(1U) + +#define FW_PORT_CMD_RXPAUSE_S 22 +#define FW_PORT_CMD_RXPAUSE_V(x) ((x) << FW_PORT_CMD_RXPAUSE_S) +#define FW_PORT_CMD_RXPAUSE_F FW_PORT_CMD_RXPAUSE_V(1U) + +#define FW_PORT_CMD_MDIOCAP_S 21 +#define FW_PORT_CMD_MDIOCAP_V(x) ((x) << FW_PORT_CMD_MDIOCAP_S) +#define FW_PORT_CMD_MDIOCAP_F FW_PORT_CMD_MDIOCAP_V(1U) + +#define FW_PORT_CMD_MDIOADDR_S 16 +#define FW_PORT_CMD_MDIOADDR_M 0x1f +#define FW_PORT_CMD_MDIOADDR_G(x) \ + (((x) >> FW_PORT_CMD_MDIOADDR_S) & FW_PORT_CMD_MDIOADDR_M) + +#define FW_PORT_CMD_LPTXPAUSE_S 15 +#define FW_PORT_CMD_LPTXPAUSE_V(x) ((x) << FW_PORT_CMD_LPTXPAUSE_S) +#define FW_PORT_CMD_LPTXPAUSE_F FW_PORT_CMD_LPTXPAUSE_V(1U) + +#define FW_PORT_CMD_LPRXPAUSE_S 14 +#define FW_PORT_CMD_LPRXPAUSE_V(x) ((x) << FW_PORT_CMD_LPRXPAUSE_S) +#define FW_PORT_CMD_LPRXPAUSE_F FW_PORT_CMD_LPRXPAUSE_V(1U) + +#define FW_PORT_CMD_PTYPE_S 8 +#define FW_PORT_CMD_PTYPE_M 0x1f +#define FW_PORT_CMD_PTYPE_G(x) \ + (((x) >> FW_PORT_CMD_PTYPE_S) & FW_PORT_CMD_PTYPE_M) + +#define FW_PORT_CMD_MODTYPE_S 0 +#define FW_PORT_CMD_MODTYPE_M 0x1f +#define FW_PORT_CMD_MODTYPE_V(x) ((x) << FW_PORT_CMD_MODTYPE_S) +#define FW_PORT_CMD_MODTYPE_G(x) \ + (((x) >> FW_PORT_CMD_MODTYPE_S) & FW_PORT_CMD_MODTYPE_M) + +#define FW_PORT_CMD_DCBXDIS_S 7 +#define FW_PORT_CMD_DCBXDIS_V(x) ((x) << FW_PORT_CMD_DCBXDIS_S) +#define FW_PORT_CMD_DCBXDIS_F FW_PORT_CMD_DCBXDIS_V(1U) + +#define FW_PORT_CMD_APPLY_S 7 +#define FW_PORT_CMD_APPLY_V(x) ((x) << FW_PORT_CMD_APPLY_S) +#define FW_PORT_CMD_APPLY_F FW_PORT_CMD_APPLY_V(1U) + +#define FW_PORT_CMD_ALL_SYNCD_S 7 +#define FW_PORT_CMD_ALL_SYNCD_V(x) ((x) << FW_PORT_CMD_ALL_SYNCD_S) +#define FW_PORT_CMD_ALL_SYNCD_F FW_PORT_CMD_ALL_SYNCD_V(1U) + +#define FW_PORT_CMD_DCB_VERSION_S 12 +#define FW_PORT_CMD_DCB_VERSION_M 0x7 +#define FW_PORT_CMD_DCB_VERSION_G(x) \ + (((x) >> FW_PORT_CMD_DCB_VERSION_S) & FW_PORT_CMD_DCB_VERSION_M) enum fw_port_type { FW_PORT_TYPE_FIBER_XFI, @@ -1817,7 +2472,7 @@ enum fw_port_type { FW_PORT_TYPE_QSFP, FW_PORT_TYPE_BP40_BA, - FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK + FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M }; enum fw_port_module_type { @@ -1828,11 +2483,11 @@ enum fw_port_module_type { FW_PORT_MOD_TYPE_TWINAX_PASSIVE, FW_PORT_MOD_TYPE_TWINAX_ACTIVE, FW_PORT_MOD_TYPE_LRM, - FW_PORT_MOD_TYPE_ERROR = FW_PORT_CMD_MODTYPE_MASK - 3, - FW_PORT_MOD_TYPE_UNKNOWN = FW_PORT_CMD_MODTYPE_MASK - 2, - FW_PORT_MOD_TYPE_NOTSUPPORTED = FW_PORT_CMD_MODTYPE_MASK - 1, + FW_PORT_MOD_TYPE_ERROR = FW_PORT_CMD_MODTYPE_M - 3, + FW_PORT_MOD_TYPE_UNKNOWN = FW_PORT_CMD_MODTYPE_M - 2, + FW_PORT_MOD_TYPE_NOTSUPPORTED = FW_PORT_CMD_MODTYPE_M - 1, - FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK + FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_M }; enum fw_port_mod_sub_type { @@ -1988,11 +2643,6 @@ struct fw_port_stats_cmd { } u; }; -#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4) -#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0) -#define FW_PORT_STATS_CMD_TX(x) ((x) << 7) -#define FW_PORT_STATS_CMD_IX(x) ((x) << 0) - /* port loopback stats */ #define FW_NUM_LB_STATS 16 enum fw_port_lb_stats_index { @@ -2048,22 +2698,13 @@ struct fw_port_lb_stats_cmd { } u; }; -#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0) -#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4) -#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0) -#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0) - struct fw_rss_ind_tbl_cmd { __be32 op_to_viid; -#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0) __be32 retval_len16; __be16 niqid; __be16 startidx; __be32 r3; __be32 iq0_to_iq2; -#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20) -#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10) -#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0) __be32 iq3_to_iq5; __be32 iq6_to_iq8; __be32 iq9_to_iq11; @@ -2077,6 +2718,18 @@ struct fw_rss_ind_tbl_cmd { __be32 r15_lo; }; +#define FW_RSS_IND_TBL_CMD_VIID_S 0 +#define FW_RSS_IND_TBL_CMD_VIID_V(x) ((x) << FW_RSS_IND_TBL_CMD_VIID_S) + +#define FW_RSS_IND_TBL_CMD_IQ0_S 20 +#define FW_RSS_IND_TBL_CMD_IQ0_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ0_S) + +#define FW_RSS_IND_TBL_CMD_IQ1_S 10 +#define FW_RSS_IND_TBL_CMD_IQ1_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ1_S) + +#define FW_RSS_IND_TBL_CMD_IQ2_S 0 +#define FW_RSS_IND_TBL_CMD_IQ2_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ2_S) + struct fw_rss_glb_config_cmd { __be32 op_to_write; __be32 retval_len16; @@ -2090,27 +2743,75 @@ struct fw_rss_glb_config_cmd { struct fw_rss_glb_config_basicvirtual { __be32 mode_pkd; __be32 synmapen_to_hashtoeplitz; -#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8) -#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7) -#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6) -#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5) -#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4) -#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3) -#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2) -#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1) -#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0) __be64 r8; __be64 r9; } basicvirtual; } u; }; -#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28) -#define FW_RSS_GLB_CONFIG_CMD_MODE_GET(x) (((x) >> 28) & 0xf) +#define FW_RSS_GLB_CONFIG_CMD_MODE_S 28 +#define FW_RSS_GLB_CONFIG_CMD_MODE_M 0xf +#define FW_RSS_GLB_CONFIG_CMD_MODE_V(x) ((x) << FW_RSS_GLB_CONFIG_CMD_MODE_S) +#define FW_RSS_GLB_CONFIG_CMD_MODE_G(x) \ + (((x) >> FW_RSS_GLB_CONFIG_CMD_MODE_S) & FW_RSS_GLB_CONFIG_CMD_MODE_M) #define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0 #define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_S 8 +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_S) +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F \ + FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_S 7 +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F \ + FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_S 6 +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F \ + FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_S 5 +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F \ + FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_S 4 +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F \ + FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_S 3 +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_S) +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F \ + FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_S 2 +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_S) +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F \ + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_S 1 +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_S) +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F \ + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_S 0 +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_S) +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F \ + FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_V(1U) + struct fw_rss_vi_config_cmd { __be32 op_to_viid; #define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0) @@ -2124,19 +2825,51 @@ struct fw_rss_vi_config_cmd { struct fw_rss_vi_config_basicvirtual { __be32 r6; __be32 defaultq_to_udpen; -#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16) -#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(x) (((x) >> 16) & 0x3ff) -#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4) -#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3) -#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2) -#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1) -#define FW_RSS_VI_CONFIG_CMD_UDPEN (1U << 0) __be64 r9; __be64 r10; } basicvirtual; } u; }; +#define FW_RSS_VI_CONFIG_CMD_VIID_S 0 +#define FW_RSS_VI_CONFIG_CMD_VIID_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_VIID_S) + +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S 16 +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_M 0x3ff +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S) +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(x) \ + (((x) >> FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S) & \ + FW_RSS_VI_CONFIG_CMD_DEFAULTQ_M) + +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_S 4 +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_S 3 +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_S 2 +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_S 1 +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_UDPEN_S 0 +#define FW_RSS_VI_CONFIG_CMD_UDPEN_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_UDPEN_S) +#define FW_RSS_VI_CONFIG_CMD_UDPEN_F FW_RSS_VI_CONFIG_CMD_UDPEN_V(1U) + struct fw_clip_cmd { __be32 op_to_write; __be32 alloc_to_len16; @@ -2145,19 +2878,13 @@ struct fw_clip_cmd { __be32 r4[2]; }; -#define S_FW_CLIP_CMD_ALLOC 31 -#define M_FW_CLIP_CMD_ALLOC 0x1 -#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC) -#define G_FW_CLIP_CMD_ALLOC(x) \ - (((x) >> S_FW_CLIP_CMD_ALLOC) & M_FW_CLIP_CMD_ALLOC) -#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U) +#define FW_CLIP_CMD_ALLOC_S 31 +#define FW_CLIP_CMD_ALLOC_V(x) ((x) << FW_CLIP_CMD_ALLOC_S) +#define FW_CLIP_CMD_ALLOC_F FW_CLIP_CMD_ALLOC_V(1U) -#define S_FW_CLIP_CMD_FREE 30 -#define M_FW_CLIP_CMD_FREE 0x1 -#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE) -#define G_FW_CLIP_CMD_FREE(x) \ - (((x) >> S_FW_CLIP_CMD_FREE) & M_FW_CLIP_CMD_FREE) -#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U) +#define FW_CLIP_CMD_FREE_S 30 +#define FW_CLIP_CMD_FREE_V(x) ((x) << FW_CLIP_CMD_FREE_S) +#define FW_CLIP_CMD_FREE_F FW_CLIP_CMD_FREE_V(1U) enum fw_error_type { FW_ERROR_TYPE_EXCEPTION = 0x0, @@ -2196,7 +2923,6 @@ struct fw_error_cmd { struct fw_debug_cmd { __be32 op_type; -#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff) __be32 len16_pkd; union fw_debug { struct fw_debug_assert { @@ -2219,19 +2945,35 @@ struct fw_debug_cmd { } u; }; -#define FW_PCIE_FW_ERR (1U << 31) -#define FW_PCIE_FW_INIT (1U << 30) -#define FW_PCIE_FW_HALT (1U << 29) -#define FW_PCIE_FW_MASTER_VLD (1U << 15) -#define FW_PCIE_FW_MASTER_MASK 0x7 -#define FW_PCIE_FW_MASTER_SHIFT 12 -#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT) -#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \ - FW_PCIE_FW_MASTER_MASK) -#define FW_PCIE_FW_EVAL_MASK 0x7 -#define FW_PCIE_FW_EVAL_SHIFT 24 -#define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \ - FW_PCIE_FW_EVAL_MASK) +#define FW_DEBUG_CMD_TYPE_S 0 +#define FW_DEBUG_CMD_TYPE_M 0xff +#define FW_DEBUG_CMD_TYPE_G(x) \ + (((x) >> FW_DEBUG_CMD_TYPE_S) & FW_DEBUG_CMD_TYPE_M) + +#define PCIE_FW_ERR_S 31 +#define PCIE_FW_ERR_V(x) ((x) << PCIE_FW_ERR_S) +#define PCIE_FW_ERR_F PCIE_FW_ERR_V(1U) + +#define PCIE_FW_INIT_S 30 +#define PCIE_FW_INIT_V(x) ((x) << PCIE_FW_INIT_S) +#define PCIE_FW_INIT_F PCIE_FW_INIT_V(1U) + +#define PCIE_FW_HALT_S 29 +#define PCIE_FW_HALT_V(x) ((x) << PCIE_FW_HALT_S) +#define PCIE_FW_HALT_F PCIE_FW_HALT_V(1U) + +#define PCIE_FW_EVAL_S 24 +#define PCIE_FW_EVAL_M 0x7 +#define PCIE_FW_EVAL_G(x) (((x) >> PCIE_FW_EVAL_S) & PCIE_FW_EVAL_M) + +#define PCIE_FW_MASTER_VLD_S 15 +#define PCIE_FW_MASTER_VLD_V(x) ((x) << PCIE_FW_MASTER_VLD_S) +#define PCIE_FW_MASTER_VLD_F PCIE_FW_MASTER_VLD_V(1U) + +#define PCIE_FW_MASTER_S 12 +#define PCIE_FW_MASTER_M 0x7 +#define PCIE_FW_MASTER_V(x) ((x) << PCIE_FW_MASTER_S) +#define PCIE_FW_MASTER_G(x) (((x) >> PCIE_FW_MASTER_S) & PCIE_FW_MASTER_M) struct fw_hdr { u8 ver; @@ -2259,10 +3001,25 @@ enum fw_hdr_chip { FW_HDR_CHIP_T5 }; -#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) -#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) -#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) -#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) +#define FW_HDR_FW_VER_MAJOR_S 24 +#define FW_HDR_FW_VER_MAJOR_M 0xff +#define FW_HDR_FW_VER_MAJOR_G(x) \ + (((x) >> FW_HDR_FW_VER_MAJOR_S) & FW_HDR_FW_VER_MAJOR_M) + +#define FW_HDR_FW_VER_MINOR_S 16 +#define FW_HDR_FW_VER_MINOR_M 0xff +#define FW_HDR_FW_VER_MINOR_G(x) \ + (((x) >> FW_HDR_FW_VER_MINOR_S) & FW_HDR_FW_VER_MINOR_M) + +#define FW_HDR_FW_VER_MICRO_S 8 +#define FW_HDR_FW_VER_MICRO_M 0xff +#define FW_HDR_FW_VER_MICRO_G(x) \ + (((x) >> FW_HDR_FW_VER_MICRO_S) & FW_HDR_FW_VER_MICRO_M) + +#define FW_HDR_FW_VER_BUILD_S 0 +#define FW_HDR_FW_VER_BUILD_M 0xff +#define FW_HDR_FW_VER_BUILD_G(x) \ + (((x) >> FW_HDR_FW_VER_BUILD_S) & FW_HDR_FW_VER_BUILD_M) enum fw_hdr_intfver { FW_HDR_INTFVER_NIC = 0x00, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 3d06e77d712..d00a751f058 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -138,6 +138,8 @@ struct sge_fl { struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */ __be64 *desc; /* address of HW RX descriptor ring */ dma_addr_t addr; /* PCI bus address of hardware ring */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ }; /* @@ -178,6 +180,8 @@ struct sge_rspq { u16 abs_id; /* SGE abs QID for the response Q */ __be64 *desc; /* address of hardware response ring */ dma_addr_t phys_addr; /* PCI bus address of ring */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ unsigned int iqe_len; /* entry size */ unsigned int size; /* capcity of response Q */ struct adapter *adapter; /* our adapter */ @@ -240,6 +244,8 @@ struct sge_txq { struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */ struct sge_qstat *stat; /* queue status entry */ dma_addr_t phys_addr; /* PCI bus address of hardware ring */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ }; /* @@ -345,6 +351,7 @@ struct sge { struct adapter { /* PCI resources */ void __iomem *regs; + void __iomem *bar2; struct pci_dev *pdev; struct device *pdev_dev; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 0b42bddaf28..aa74ec34a46 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1030,10 +1030,10 @@ static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq, pktcnt_idx = closest_thres(&adapter->sge, cnt); if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) { - v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | - FW_PARAMS_PARAM_X( + v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V( FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | - FW_PARAMS_PARAM_YZ(rspq->cntxt_id); + FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id); err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx); if (err) return err; @@ -1230,14 +1230,14 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev, sizeof(drvinfo->bus_info)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%u.%u.%u.%u, TP %u.%u.%u.%u", - FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.tprev), - FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.tprev), - FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.tprev), - FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.tprev)); + FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev), + FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev), + FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev), + FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev)); } /* @@ -2095,7 +2095,6 @@ static int adap_init0(struct adapter *adapter) unsigned int ethqsets; int err; u32 param, val = 0; - unsigned int chipid; /* * Wait for the device to become ready before proceeding ... @@ -2123,17 +2122,6 @@ static int adap_init0(struct adapter *adapter) return err; } - adapter->params.chip = 0; - switch (adapter->pdev->device >> 12) { - case CHELSIO_T4: - adapter->params.chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); - break; - case CHELSIO_T5: - chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); - adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); - break; - } - /* * Grab basic operational parameters. These will predominantly have * been set up by the Physical Function Driver or will be hard coded @@ -2184,8 +2172,8 @@ static int adap_init0(struct adapter *adapter) * firmware won't understand this and we'll just get * unencapsulated messages ... */ - param = FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP); + param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP); val = 1; (void) t4vf_set_params(adapter, 1, ¶m, &val); @@ -2594,6 +2582,27 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, goto err_free_adapter; } + /* Wait for the device to become ready before proceeding ... + */ + err = t4vf_prep_adapter(adapter); + if (err) { + dev_err(adapter->pdev_dev, "device didn't become ready:" + " err=%d\n", err); + goto err_unmap_bar0; + } + + /* For T5 and later we want to use the new BAR-based User Doorbells, + * so we need to map BAR2 here ... + */ + if (!is_t4(adapter->params.chip)) { + adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if (!adapter->bar2) { + dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n"); + err = -ENOMEM; + goto err_unmap_bar0; + } + } /* * Initialize adapter level features. */ @@ -2786,6 +2795,10 @@ err_free_dev: } err_unmap_bar: + if (!is_t4(adapter->params.chip)) + iounmap(adapter->bar2); + +err_unmap_bar0: iounmap(adapter->regs); err_free_adapter: @@ -2856,6 +2869,8 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev) free_netdev(netdev); } iounmap(adapter->regs); + if (!is_t4(adapter->params.chip)) + iounmap(adapter->bar2); kfree(adapter); } @@ -2908,67 +2923,18 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } -/* - * PCI Device registration data structures. - */ -#define CH_DEVICE(devid) \ - { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 } - -static const struct pci_device_id cxgb4vf_pci_tbl[] = { - CH_DEVICE(0xb000), /* PE10K FPGA */ - CH_DEVICE(0x4801), /* T420-cr */ - CH_DEVICE(0x4802), /* T422-cr */ - CH_DEVICE(0x4803), /* T440-cr */ - CH_DEVICE(0x4804), /* T420-bch */ - CH_DEVICE(0x4805), /* T440-bch */ - CH_DEVICE(0x4806), /* T460-ch */ - CH_DEVICE(0x4807), /* T420-so */ - CH_DEVICE(0x4808), /* T420-cx */ - CH_DEVICE(0x4809), /* T420-bt */ - CH_DEVICE(0x480a), /* T404-bt */ - CH_DEVICE(0x480d), /* T480-cr */ - CH_DEVICE(0x480e), /* T440-lp-cr */ - CH_DEVICE(0x4880), - CH_DEVICE(0x4881), - CH_DEVICE(0x4882), - CH_DEVICE(0x4883), - CH_DEVICE(0x4884), - CH_DEVICE(0x4885), - CH_DEVICE(0x4886), - CH_DEVICE(0x4887), - CH_DEVICE(0x4888), - CH_DEVICE(0x5801), /* T520-cr */ - CH_DEVICE(0x5802), /* T522-cr */ - CH_DEVICE(0x5803), /* T540-cr */ - CH_DEVICE(0x5804), /* T520-bch */ - CH_DEVICE(0x5805), /* T540-bch */ - CH_DEVICE(0x5806), /* T540-ch */ - CH_DEVICE(0x5807), /* T520-so */ - CH_DEVICE(0x5808), /* T520-cx */ - CH_DEVICE(0x5809), /* T520-bt */ - CH_DEVICE(0x580a), /* T504-bt */ - CH_DEVICE(0x580b), /* T520-sr */ - CH_DEVICE(0x580c), /* T504-bt */ - CH_DEVICE(0x580d), /* T580-cr */ - CH_DEVICE(0x580e), /* T540-lp-cr */ - CH_DEVICE(0x580f), /* Amsterdam */ - CH_DEVICE(0x5810), /* T580-lp-cr */ - CH_DEVICE(0x5811), /* T520-lp-cr */ - CH_DEVICE(0x5812), /* T560-cr */ - CH_DEVICE(0x5813), /* T580-cr */ - CH_DEVICE(0x5814), /* T580-so-cr */ - CH_DEVICE(0x5815), /* T502-bt */ - CH_DEVICE(0x5880), - CH_DEVICE(0x5881), - CH_DEVICE(0x5882), - CH_DEVICE(0x5883), - CH_DEVICE(0x5884), - CH_DEVICE(0x5885), - CH_DEVICE(0x5886), - CH_DEVICE(0x5887), - CH_DEVICE(0x5888), - { 0, } -}; +/* Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static struct pci_device_id cxgb4vf_pci_tbl[] = { +#define CH_PCI_DEVICE_ID_FUNCTION 0x8 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + { PCI_VDEVICE(CHELSIO, (devid)), 0 } + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } } + +#include "../cxgb4/t4_pci_id_tbl.h" MODULE_DESCRIPTION(DRV_DESC); MODULE_AUTHOR("Chelsio Communications"); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index fdd078d7d82..f7fd1317d99 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -118,7 +118,7 @@ enum { * we can specify for immediate data in the firmware Ethernet TX * Work Request. */ - MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK, + MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M, /* * Max size of a WR sent through a control TX queue. @@ -525,19 +525,40 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) { u32 val; - /* - * The SGE keeps track of its Producer and Consumer Indices in terms + /* The SGE keeps track of its Producer and Consumer Indices in terms * of Egress Queue Units so we can only tell it about integral numbers * of multiples of Free List Entries per Egress Queue Units ... */ if (fl->pend_cred >= FL_PER_EQ_UNIT) { - val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); - if (!is_t4(adapter->params.chip)) - val |= DBTYPE(1); + if (is_t4(adapter->params.chip)) + val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); + else + val = PIDX_T5(fl->pend_cred / FL_PER_EQ_UNIT) | + DBTYPE(1); + val |= DBPRIO(1); + + /* Make sure all memory writes to the Free List queue are + * committed before we tell the hardware about them. + */ wmb(); - t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, - DBPRIO(1) | - QID(fl->cntxt_id) | val); + + /* If we don't have access to the new User Doorbell (T5+), use + * the old doorbell mechanism; otherwise use the new BAR2 + * mechanism. + */ + if (unlikely(fl->bar2_addr == NULL)) { + t4_write_reg(adapter, + T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, + QID(fl->cntxt_id) | val); + } else { + writel(val | QID(fl->bar2_qid), + fl->bar2_addr + SGE_UDB_KDOORBELL); + + /* This Write memory Barrier will force the write to + * the User Doorbell area to be flushed. + */ + wmb(); + } fl->pend_cred %= FL_PER_EQ_UNIT; } } @@ -598,6 +619,8 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, */ BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT); + gfp |= __GFP_NOWARN; + /* * If we support large pages, prefer large buffers and fail over to * small pages if we can't allocate large pages to satisfy the refill. @@ -608,8 +631,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, goto alloc_small_pages; while (n) { - page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, - s->fl_pg_order); + page = __dev_alloc_pages(gfp, s->fl_pg_order); if (unlikely(!page)) { /* * We've failed inour attempt to allocate a "large @@ -653,7 +675,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, alloc_small_pages: while (n--) { - page = __skb_alloc_page(gfp | __GFP_NOWARN, NULL); + page = __dev_alloc_page(gfp); if (unlikely(!page)) { fl->alloc_failed++; break; @@ -902,7 +924,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, sgl->addr0 = cpu_to_be64(addr[1]); } - sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | + sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); if (likely(--nfrags == 0)) return; @@ -948,14 +970,74 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, int n) { - /* - * Warn if we write doorbells with the wrong priority and write - * descriptors before telling HW. + /* Make sure that all writes to the TX Descriptors are committed + * before we tell the hardware about them. */ - WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO(1)); wmb(); - t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, - QID(tq->cntxt_id) | PIDX(n)); + + /* If we don't have access to the new User Doorbell (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(tq->bar2_addr == NULL)) { + u32 val = PIDX(n); + + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, + QID(tq->cntxt_id) | val); + } else { + u32 val = PIDX_T5(n); + + /* T4 and later chips share the same PIDX field offset within + * the doorbell, but T5 and later shrank the field in order to + * gain a bit for Doorbell Priority. The field was absurdly + * large in the first place (14 bits) so we just use the T5 + * and later limits and warn if a Queue ID is too large. + */ + WARN_ON(val & DBPRIO(1)); + + /* If we're only writing a single Egress Unit and the BAR2 + * Queue ID is 0, we can use the Write Combining Doorbell + * Gather Buffer; otherwise we use the simple doorbell. + */ + if (n == 1 && tq->bar2_qid == 0) { + unsigned int index = (tq->pidx + ? (tq->pidx - 1) + : (tq->size - 1)); + __be64 *src = (__be64 *)&tq->desc[index]; + __be64 __iomem *dst = (__be64 *)(tq->bar2_addr + + SGE_UDB_WCDOORBELL); + unsigned int count = EQ_UNIT / sizeof(__be64); + + /* Copy the TX Descriptor in a tight loop in order to + * try to get it to the adapter in a single Write + * Combined transfer on the PCI-E Bus. If the Write + * Combine fails (say because of an interrupt, etc.) + * the hardware will simply take the last write as a + * simple doorbell write with a PIDX Increment of 1 + * and will fetch the TX Descriptor from memory via + * DMA. + */ + while (count) { + writeq(*src, dst); + src++; + dst++; + count--; + } + } else + writel(val | QID(tq->bar2_qid), + tq->bar2_addr + SGE_UDB_KDOORBELL); + + /* This Write Memory Barrier will force the write to the User + * Doorbell area to be flushed. This is needed to prevent + * writes on different CPUs for the same queue from hitting + * the adapter out of order. This is required when some Work + * Requests take the Write Combine Gather Buffer path (user + * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some + * take the traditional path where we simply increment the + * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the + * hardware DMA read the actual Work Request. + */ + wmb(); + } } /** @@ -1145,7 +1227,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) goto out_free; } - wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); + wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); if (unlikely(credits < ETHTXQ_STOP_THRES)) { /* * After we're done injecting the Work Request for this @@ -1157,7 +1239,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) * has opened up. */ txq_stop(txq); - wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } /* @@ -1187,9 +1269,9 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; wr->op_immdlen = - cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | - FW_WR_IMMDLEN(sizeof(*lso) + - sizeof(*cpl))); + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(sizeof(*lso) + + sizeof(*cpl))); /* * Fill in the LSO CPL message. */ @@ -1224,8 +1306,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); wr->op_immdlen = - cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | - FW_WR_IMMDLEN(len)); + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(len)); /* * Set up TX Packet CPL pointer, control word and perform @@ -1781,6 +1863,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) unsigned int intr_params; struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi); int work_done = process_responses(rspq, budget); + u32 val; if (likely(work_done < budget)) { napi_complete(napi); @@ -1792,11 +1875,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) if (unlikely(work_done == 0)) rspq->unhandled_irqs++; - t4_write_reg(rspq->adapter, - T4VF_SGE_BASE_ADDR + SGE_VF_GTS, - CIDXINC(work_done) | - INGRESSQID((u32)rspq->cntxt_id) | - SEINTARM(intr_params)); + val = CIDXINC(work_done) | SEINTARM(intr_params); + if (is_t4(rspq->adapter->params.chip)) { + t4_write_reg(rspq->adapter, + T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + val | INGRESSQID((u32)rspq->cntxt_id)); + } else { + writel(val | INGRESSQID(rspq->bar2_qid), + rspq->bar2_addr + SGE_UDB_GTS); + wmb(); + } return work_done; } @@ -1821,6 +1909,7 @@ static unsigned int process_intrq(struct adapter *adapter) struct sge *s = &adapter->sge; struct sge_rspq *intrq = &s->intrq; unsigned int work_done; + u32 val; spin_lock(&adapter->sge.intrq_lock); for (work_done = 0; ; work_done++) { @@ -1886,10 +1975,15 @@ static unsigned int process_intrq(struct adapter *adapter) rspq_next(intrq); } - t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, - CIDXINC(work_done) | - INGRESSQID(intrq->cntxt_id) | - SEINTARM(intrq->intr_params)); + val = CIDXINC(work_done) | SEINTARM(intrq->intr_params); + if (is_t4(adapter->params.chip)) + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + val | INGRESSQID(intrq->cntxt_id)); + else { + writel(val | INGRESSQID(intrq->bar2_qid), + intrq->bar2_addr + SGE_UDB_GTS); + wmb(); + } spin_unlock(&adapter->sge.intrq_lock); @@ -2035,6 +2129,35 @@ static void sge_tx_timer_cb(unsigned long data) } /** + * bar2_address - return the BAR2 address for an SGE Queue's Registers + * @adapter: the adapter + * @qid: the SGE Queue ID + * @qtype: the SGE Queue Type (Egress or Ingress) + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 address for the SGE Queue Registers associated with + * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also + * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE + * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" + * Registers are supported (e.g. the Write Combining Doorbell Buffer). + */ +static void __iomem *bar2_address(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + unsigned int *pbar2_qid) +{ + u64 bar2_qoffset; + int ret; + + ret = t4_bar2_sge_qregs(adapter, qid, qtype, + &bar2_qoffset, pbar2_qid); + if (ret) + return NULL; + + return adapter->bar2 + bar2_qoffset; +} + +/** * t4vf_sge_alloc_rxq - allocate an SGE RX Queue * @adapter: the adapter * @rspq: pointer to to the new rxq's Response Queue to be filled in @@ -2087,26 +2210,26 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, * into OS-independent common code ... */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC | - FW_IQ_CMD_IQSTART(1) | + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F | + FW_IQ_CMD_IQSTART_F | FW_LEN16(cmd)); cmd.type_to_iqandstindex = - cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | - FW_IQ_CMD_IQASYNCH(iqasynch) | - FW_IQ_CMD_VIID(pi->viid) | - FW_IQ_CMD_IQANDST(iqandst) | - FW_IQ_CMD_IQANUS(1) | - FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) | - FW_IQ_CMD_IQANDSTINDEX(intr_dest)); + cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | + FW_IQ_CMD_IQASYNCH_V(iqasynch) | + FW_IQ_CMD_VIID_V(pi->viid) | + FW_IQ_CMD_IQANDST_V(iqandst) | + FW_IQ_CMD_IQANUS_V(1) | + FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) | + FW_IQ_CMD_IQANDSTINDEX_V(intr_dest)); cmd.iqdroprss_to_iqesize = - cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) | - FW_IQ_CMD_IQGTSMODE | - FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) | - FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4)); + cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) | + FW_IQ_CMD_IQGTSMODE_F | + FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) | + FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4)); cmd.iqsize = cpu_to_be16(rspq->size); cmd.iqaddr = cpu_to_be64(rspq->phys_addr); @@ -2140,13 +2263,13 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, */ cmd.iqns_to_fl0congen = cpu_to_be32( - FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) | - FW_IQ_CMD_FL0PACKEN(1) | - FW_IQ_CMD_FL0PADEN(1)); + FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) | + FW_IQ_CMD_FL0PACKEN_F | + FW_IQ_CMD_FL0PADEN_F); cmd.fl0dcaen_to_fl0cidxfthresh = cpu_to_be16( - FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) | - FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B)); + FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) | + FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B)); cmd.fl0size = cpu_to_be16(flsz); cmd.fl0addr = cpu_to_be64(fl->addr); } @@ -2165,6 +2288,10 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, rspq->gen = 1; rspq->next_intr_params = rspq->intr_params; rspq->cntxt_id = be16_to_cpu(rpl.iqid); + rspq->bar2_addr = bar2_address(adapter, + rspq->cntxt_id, + T4_BAR2_QTYPE_INGRESS, + &rspq->bar2_qid); rspq->abs_id = be16_to_cpu(rpl.physiqid); rspq->size--; /* subtract status entry */ rspq->adapter = adapter; @@ -2183,6 +2310,15 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, fl->alloc_failed = 0; fl->large_alloc_failed = 0; fl->starving = 0; + + /* Note, we must initialize the BAR2 Free List User Doorbell + * information before refilling the Free List! + */ + fl->bar2_addr = bar2_address(adapter, + fl->cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &fl->bar2_qid); + refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL); } @@ -2250,24 +2386,25 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, * into the common code ... */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC | - FW_EQ_ETH_CMD_EQSTART | + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F | + FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(cmd)); - cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE | - FW_EQ_ETH_CMD_VIID(pi->viid)); + cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F | + FW_EQ_ETH_CMD_VIID_V(pi->viid)); cmd.fetchszm_to_iqid = - cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) | - FW_EQ_ETH_CMD_PCIECHN(pi->port_id) | - FW_EQ_ETH_CMD_IQID(iqid)); + cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) | + FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) | + FW_EQ_ETH_CMD_IQID_V(iqid)); cmd.dcaen_to_eqsize = - cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) | - FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) | - FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) | - FW_EQ_ETH_CMD_EQSIZE(nentries)); + cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) | + FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) | + FW_EQ_ETH_CMD_CIDXFTHRESH_V( + SGE_CIDXFLUSHTHRESH_32) | + FW_EQ_ETH_CMD_EQSIZE_V(nentries)); cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); /* @@ -2293,9 +2430,13 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, txq->q.cidx = 0; txq->q.pidx = 0; txq->q.stat = (void *)&txq->q.desc[txq->q.size]; - txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd)); + txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd)); + txq->q.bar2_addr = bar2_address(adapter, + txq->q.cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &txq->q.bar2_qid); txq->q.abs_id = - FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd)); + FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd)); txq->txq = devq; txq->tso = 0; txq->tx_cso = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 4b6a6d14d86..8d3237f5e36 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -67,7 +67,7 @@ enum chip_type { /* * The "len16" field of a Firmware Command Structure ... */ -#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) +#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) /* * Per-VF statistics. @@ -135,9 +135,12 @@ struct dev_params { struct sge_params { u32 sge_control; /* padding, boundaries, lengths, etc. */ u32 sge_control2; /* T5: more of the same */ - u32 sge_host_page_size; /* RDMA page sizes */ - u32 sge_queues_per_page; /* RDMA queues/page */ - u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ + u32 sge_host_page_size; /* PF0-7 page sizes */ + u32 sge_egress_queues_per_page; /* PF0-7 egress queues/page */ + u32 sge_ingress_queues_per_page;/* PF0-7 ingress queues/page */ + u32 sge_vf_hps; /* host page size for our vf */ + u32 sge_vf_eq_qpp; /* egress queues/page for our VF */ + u32 sge_vf_iq_qpp; /* ingress queues/page for our VF */ u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ u32 sge_congestion_control; /* congestion thresholds, etc. */ @@ -267,6 +270,8 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false); } +#define CHELSIO_PCI_ID_VER(dev_id) ((dev_id) >> 12) + static inline int is_t4(enum chip_type chip) { return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; @@ -278,6 +283,13 @@ int t4vf_port_init(struct adapter *, int); int t4vf_fw_reset(struct adapter *); int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); +enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; +int t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid); + int t4vf_get_sge_params(struct adapter *); int t4vf_get_vpd_params(struct adapter *); int t4vf_get_dev_params(struct adapter *); @@ -309,5 +321,6 @@ int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int, int t4vf_eth_eq_free(struct adapter *, unsigned int); int t4vf_handle_fw_rpl(struct adapter *, const __be64 *); +int t4vf_prep_adapter(struct adapter *); #endif /* __T4VF_COMMON_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 1e896b92323..02e8833b779 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -204,20 +204,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, /* return value in low-order little-endian word */ v = t4_read_reg(adapter, mbox_data); - if (FW_CMD_RETVAL_GET(v)) + if (FW_CMD_RETVAL_G(v)) dump_mbox(adapter, "FW Error", mbox_data); if (rpl) { /* request bit in high-order BE word */ WARN_ON((be32_to_cpu(*(const u32 *)cmd) - & FW_CMD_REQUEST) == 0); + & FW_CMD_REQUEST_F) == 0); get_mbox_rpl(adapter, rpl, size, mbox_data); WARN_ON((be32_to_cpu(*(u32 *)rpl) - & FW_CMD_REQUEST) != 0); + & FW_CMD_REQUEST_F) != 0); } t4_write_reg(adapter, mbox_ctl, MBOWNER(MBOX_OWNER_NONE)); - return -FW_CMD_RETVAL_GET(v); + return -FW_CMD_RETVAL_G(v); } } @@ -287,17 +287,17 @@ int t4vf_port_init(struct adapter *adapter, int pidx) * like MAC address, etc. */ memset(&vi_cmd, 0, sizeof(vi_cmd)); - vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); - vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(pi->viid)); + vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid)); v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); if (v) return v; - BUG_ON(pi->port_id != FW_VI_CMD_PORTID_GET(vi_rpl.portid_pkd)); - pi->rss_size = FW_VI_CMD_RSSSIZE_GET(be16_to_cpu(vi_rpl.rsssize_pkd)); + BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd)); + pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd)); t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac); /* @@ -308,12 +308,12 @@ int t4vf_port_init(struct adapter *adapter, int pidx) return 0; memset(&port_cmd, 0, sizeof(port_cmd)); - port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ | - FW_PORT_CMD_PORTID(pi->port_id)); + port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(pi->port_id)); port_cmd.action_to_len16 = - cpu_to_be32(FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | + cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | FW_LEN16(port_cmd)); v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); if (v) @@ -349,8 +349,8 @@ int t4vf_fw_reset(struct adapter *adapter) struct fw_reset_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) | - FW_CMD_WRITE); + cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) | + FW_CMD_WRITE_F); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } @@ -377,12 +377,12 @@ static int t4vf_query_params(struct adapter *adapter, unsigned int nparams, return -EINVAL; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, param[nparams].mnem), 16); - cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) p->mnem = htonl(*params++); @@ -415,12 +415,12 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams, return -EINVAL; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F); len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, param[nparams]), 16); - cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { p->mnem = cpu_to_be32(*params++); p->val = cpu_to_be32(*vals++); @@ -430,6 +430,95 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams, } /** + * t4_bar2_sge_qregs - return BAR2 SGE Queue register information + * @adapter: the adapter + * @qid: the Queue ID + * @qtype: the Ingress or Egress type for @qid + * @pbar2_qoffset: BAR2 Queue Offset + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 SGE Queue Registers information associated with the + * indicated Absolute Queue ID. These are passed back in return value + * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue + * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. + * + * This may return an error which indicates that BAR2 SGE Queue + * registers aren't available. If an error is not returned, then the + * following values are returned: + * + * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers + * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid + * + * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which + * require the "Inferred Queue ID" ability may be used. E.g. the + * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, + * then these "Inferred Queue ID" register may not be used. + */ +int t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid) +{ + unsigned int page_shift, page_size, qpp_shift, qpp_mask; + u64 bar2_page_offset, bar2_qoffset; + unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; + + /* T4 doesn't support BAR2 SGE Queue registers. + */ + if (is_t4(adapter->params.chip)) + return -EINVAL; + + /* Get our SGE Page Size parameters. + */ + page_shift = adapter->params.sge.sge_vf_hps + 10; + page_size = 1 << page_shift; + + /* Get the right Queues per Page parameters for our Queue. + */ + qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS + ? adapter->params.sge.sge_vf_eq_qpp + : adapter->params.sge.sge_vf_iq_qpp); + qpp_mask = (1 << qpp_shift) - 1; + + /* Calculate the basics of the BAR2 SGE Queue register area: + * o The BAR2 page the Queue registers will be in. + * o The BAR2 Queue ID. + * o The BAR2 Queue ID Offset into the BAR2 page. + */ + bar2_page_offset = ((qid >> qpp_shift) << page_shift); + bar2_qid = qid & qpp_mask; + bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; + + /* If the BAR2 Queue ID Offset is less than the Page Size, then the + * hardware will infer the Absolute Queue ID simply from the writes to + * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a + * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply + * write to the first BAR2 SGE Queue Area within the BAR2 Page with + * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID + * from the BAR2 Page and BAR2 Queue ID. + * + * One important censequence of this is that some BAR2 SGE registers + * have a "Queue ID" field and we can write the BAR2 SGE Queue ID + * there. But other registers synthesize the SGE Queue ID purely + * from the writes to the registers -- the Write Combined Doorbell + * Buffer is a good example. These BAR2 SGE Registers are only + * available for those BAR2 SGE Register areas where the SGE Absolute + * Queue ID can be inferred from simple writes. + */ + bar2_qoffset = bar2_page_offset; + bar2_qinferred = (bar2_qid_offset < page_size); + if (bar2_qinferred) { + bar2_qoffset += bar2_qid_offset; + bar2_qid = 0; + } + + *pbar2_qoffset = bar2_qoffset; + *pbar2_qid = bar2_qid; + return 0; +} + +/** * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters * @adapter: the adapter * @@ -443,20 +532,20 @@ int t4vf_get_sge_params(struct adapter *adapter) u32 params[7], vals[7]; int v; - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_CONTROL)); - params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_HOST_PAGE_SIZE)); - params[2] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE0)); - params[3] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE1)); - params[4] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_0_AND_1)); - params[5] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_2_AND_3)); - params[6] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_4_AND_5)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE)); + params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0)); + params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1)); + params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1)); + params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3)); + params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5)); v = t4vf_query_params(adapter, 7, params, vals); if (v) return v; @@ -479,8 +568,8 @@ int t4vf_get_sge_params(struct adapter *adapter) * right value. */ if (!is_t4(adapter->params.chip)) { - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A)); v = t4vf_query_params(adapter, 1, params, vals); if (v != FW_SUCCESS) { dev_err(adapter->pdev_dev, @@ -491,16 +580,65 @@ int t4vf_get_sge_params(struct adapter *adapter) sge_params->sge_control2 = vals[0]; } - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); - params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL)); v = t4vf_query_params(adapter, 2, params, vals); if (v) return v; sge_params->sge_ingress_rx_threshold = vals[0]; sge_params->sge_congestion_control = vals[1]; + /* For T5 and later we want to use the new BAR2 Doorbells. + * Unfortunately, older firmware didn't allow the this register to be + * read. + */ + if (!is_t4(adapter->params.chip)) { + u32 whoami; + unsigned int pf, s_hps, s_qpp; + + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V( + SGE_EGRESS_QUEUES_PER_PAGE_VF_A)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V( + SGE_INGRESS_QUEUES_PER_PAGE_VF_A)); + v = t4vf_query_params(adapter, 2, params, vals); + if (v != FW_SUCCESS) { + dev_warn(adapter->pdev_dev, + "Unable to get VF SGE Queues/Page; " + "probably old firmware.\n"); + return v; + } + sge_params->sge_egress_queues_per_page = vals[0]; + sge_params->sge_ingress_queues_per_page = vals[1]; + + /* We need the Queues/Page for our VF. This is based on the + * PF from which we're instantiated and is indexed in the + * register we just read. Do it once here so other code in + * the driver can just use it. + */ + whoami = t4_read_reg(adapter, + T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI); + pf = SOURCEPF_GET(whoami); + + s_hps = (HOSTPAGESIZEPF0_S + + (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); + sge_params->sge_vf_hps = + ((sge_params->sge_host_page_size >> s_hps) + & HOSTPAGESIZEPF0_M); + + s_qpp = (QUEUESPERPAGEPF0_S + + (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf); + sge_params->sge_vf_eq_qpp = + ((sge_params->sge_egress_queues_per_page >> s_qpp) + & QUEUESPERPAGEPF0_MASK); + sge_params->sge_vf_iq_qpp = + ((sge_params->sge_ingress_queues_per_page >> s_qpp) + & QUEUESPERPAGEPF0_MASK); + } + return 0; } @@ -517,8 +655,8 @@ int t4vf_get_vpd_params(struct adapter *adapter) u32 params[7], vals[7]; int v; - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); v = t4vf_query_params(adapter, 1, params, vals); if (v) return v; @@ -540,10 +678,10 @@ int t4vf_get_dev_params(struct adapter *adapter) u32 params[7], vals[7]; int v; - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV)); - params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV)); + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV)); v = t4vf_query_params(adapter, 2, params, vals); if (v) return v; @@ -571,9 +709,9 @@ int t4vf_get_rss_glb_config(struct adapter *adapter) * our RSS configuration. */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (v) @@ -585,7 +723,7 @@ int t4vf_get_rss_glb_config(struct adapter *adapter) * filtering at this point to weed out modes which don't support * VF Drivers ... */ - rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_GET( + rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G( be32_to_cpu(rpl.u.manual.mode_pkd)); switch (rss->mode) { case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { @@ -593,26 +731,26 @@ int t4vf_get_rss_glb_config(struct adapter *adapter) rpl.u.basicvirtual.synmapen_to_hashtoeplitz); rss->u.basicvirtual.synmapen = - ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0); rss->u.basicvirtual.syn4tupenipv6 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0); rss->u.basicvirtual.syn2tupenipv6 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0); rss->u.basicvirtual.syn4tupenipv4 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0); rss->u.basicvirtual.syn2tupenipv4 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0); rss->u.basicvirtual.ofdmapen = - ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0); rss->u.basicvirtual.tnlmapen = - ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0); rss->u.basicvirtual.tnlalllookup = - ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0); rss->u.basicvirtual.hashtoeplitz = - ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0); + ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0); /* we need at least Tunnel Map Enable to be set */ if (!rss->u.basicvirtual.tnlmapen) @@ -647,9 +785,9 @@ int t4vf_get_vfres(struct adapter *adapter) * with error on command failure. */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PFVF_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (v) @@ -659,22 +797,22 @@ int t4vf_get_vfres(struct adapter *adapter) * Extract VF resource limits and return success. */ word = be32_to_cpu(rpl.niqflint_niq); - vfres->niqflint = FW_PFVF_CMD_NIQFLINT_GET(word); - vfres->niq = FW_PFVF_CMD_NIQ_GET(word); + vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); + vfres->niq = FW_PFVF_CMD_NIQ_G(word); word = be32_to_cpu(rpl.type_to_neq); - vfres->neq = FW_PFVF_CMD_NEQ_GET(word); - vfres->pmask = FW_PFVF_CMD_PMASK_GET(word); + vfres->neq = FW_PFVF_CMD_NEQ_G(word); + vfres->pmask = FW_PFVF_CMD_PMASK_G(word); word = be32_to_cpu(rpl.tc_to_nexactf); - vfres->tc = FW_PFVF_CMD_TC_GET(word); - vfres->nvi = FW_PFVF_CMD_NVI_GET(word); - vfres->nexactf = FW_PFVF_CMD_NEXACTF_GET(word); + vfres->tc = FW_PFVF_CMD_TC_G(word); + vfres->nvi = FW_PFVF_CMD_NVI_G(word); + vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); word = be32_to_cpu(rpl.r_caps_to_nethctrl); - vfres->r_caps = FW_PFVF_CMD_R_CAPS_GET(word); - vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_GET(word); - vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_GET(word); + vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); + vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); + vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); return 0; } @@ -695,9 +833,9 @@ int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, int v; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ | + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | FW_RSS_VI_CONFIG_CMD_VIID(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); @@ -709,17 +847,17 @@ int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen); config->basicvirtual.ip6fourtupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) != 0); + ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0); config->basicvirtual.ip6twotupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) != 0); + ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0); config->basicvirtual.ip4fourtupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) != 0); + ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0); config->basicvirtual.ip4twotupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) != 0); + ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0); config->basicvirtual.udpen = - ((word & FW_RSS_VI_CONFIG_CMD_UDPEN) != 0); + ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0); config->basicvirtual.defaultq = - FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(word); + FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word); break; } @@ -745,9 +883,9 @@ int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, struct fw_rss_vi_config_cmd cmd, rpl; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_RSS_VI_CONFIG_CMD_VIID(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); switch (adapter->params.rss.mode) { @@ -755,16 +893,16 @@ int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, u32 word = 0; if (config->basicvirtual.ip6fourtupen) - word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; + word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F; if (config->basicvirtual.ip6twotupen) - word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; + word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F; if (config->basicvirtual.ip4fourtupen) - word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; + word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F; if (config->basicvirtual.ip4twotupen) - word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; + word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F; if (config->basicvirtual.udpen) - word |= FW_RSS_VI_CONFIG_CMD_UDPEN; - word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ( + word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F; + word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V( config->basicvirtual.defaultq); cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word); break; @@ -803,10 +941,10 @@ int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, * Initialize firmware command template to write the RSS table. */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_RSS_IND_TBL_CMD_VIID(viid)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_RSS_IND_TBL_CMD_VIID_V(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); /* @@ -857,9 +995,9 @@ int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, if (rsp >= rsp_end) rsp = rspq; } - *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | - FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | - FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); + *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) | + FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) | + FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2])); } /* @@ -892,18 +1030,18 @@ int t4vf_alloc_vi(struct adapter *adapter, int port_id) * VIID. */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_CMD_EXEC); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_F); cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | - FW_VI_CMD_ALLOC); - cmd.portid_pkd = FW_VI_CMD_PORTID(port_id); + FW_VI_CMD_ALLOC_F); + cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (v) return v; - return FW_VI_CMD_VIID_GET(be16_to_cpu(rpl.type_viid)); + return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid)); } /** @@ -922,12 +1060,12 @@ int t4vf_free_vi(struct adapter *adapter, int viid) * Execute a VI command to free the Virtual Interface. */ memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F); cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | - FW_VI_CMD_FREE); - cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(viid)); + FW_VI_CMD_FREE_F); + cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } @@ -946,12 +1084,12 @@ int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, struct fw_vi_enable_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC | - FW_VI_ENABLE_CMD_VIID(viid)); - cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN(rx_en) | - FW_VI_ENABLE_CMD_EEN(tx_en) | + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | + FW_VI_ENABLE_CMD_VIID_V(viid)); + cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) | + FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(cmd)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } @@ -970,11 +1108,11 @@ int t4vf_identify_port(struct adapter *adapter, unsigned int viid, struct fw_vi_enable_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC | - FW_VI_ENABLE_CMD_VIID(viid)); - cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED | + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | + FW_VI_ENABLE_CMD_VIID_V(viid)); + cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(cmd)); cmd.blinkdur = cpu_to_be16(nblinks); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); @@ -1001,28 +1139,28 @@ int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid, /* convert to FW values */ if (mtu < 0) - mtu = FW_VI_RXMODE_CMD_MTU_MASK; + mtu = FW_VI_RXMODE_CMD_MTU_M; if (promisc < 0) - promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; + promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; if (all_multi < 0) - all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; + all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; if (bcast < 0) - bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; + bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; if (vlanex < 0) - vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; + vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_RXMODE_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_VI_RXMODE_CMD_VIID(viid)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_VI_RXMODE_CMD_VIID_V(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); cmd.mtu_to_vlanexen = - cpu_to_be32(FW_VI_RXMODE_CMD_MTU(mtu) | - FW_VI_RXMODE_CMD_PROMISCEN(promisc) | - FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | - FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | - FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); + cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) | + FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | + FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | + FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | + FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); } @@ -1072,19 +1210,19 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, int i; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - (free ? FW_CMD_EXEC : 0) | - FW_VI_MAC_CMD_VIID(viid)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + (free ? FW_CMD_EXEC_F : 0) | + FW_VI_MAC_CMD_VIID_V(viid)); cmd.freemacs_to_len16 = - cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | - FW_CMD_LEN16(len16)); + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | + FW_CMD_LEN16_V(len16)); for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { p->valid_to_idx = cpu_to_be16( - FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); + FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); } @@ -1095,7 +1233,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, break; for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { - u16 index = FW_VI_MAC_CMD_IDX_GET( + u16 index = FW_VI_MAC_CMD_IDX_G( be16_to_cpu(p->valid_to_idx)); if (idx) @@ -1161,19 +1299,19 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid, idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_VI_MAC_CMD_VIID(viid)); - cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); - p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_IDX(idx)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_VI_MAC_CMD_VIID_V(viid)); + cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); + p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(idx)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (ret == 0) { p = &rpl.u.exact[0]; - ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); + ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); if (ret >= max_naddr) ret = -ENOMEM; } @@ -1198,13 +1336,13 @@ int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid, u.exact[0]), 16); memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_VI_ENABLE_CMD_VIID(viid)); - cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN | - FW_VI_MAC_CMD_HASHUNIEN(ucast) | - FW_CMD_LEN16(len16)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_VI_ENABLE_CMD_VIID_V(viid)); + cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F | + FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | + FW_CMD_LEN16_V(len16)); cmd.u.hash.hashvec = cpu_to_be64(vec); return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); } @@ -1240,14 +1378,14 @@ int t4vf_get_port_stats(struct adapter *adapter, int pidx, int ret; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_STATS_CMD) | - FW_VI_STATS_CMD_VIID(pi->viid) | - FW_CMD_REQUEST | - FW_CMD_READ); - cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) | + FW_VI_STATS_CMD_VIID_V(pi->viid) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); cmd.u.ctl.nstats_ix = - cpu_to_be16(FW_VI_STATS_CMD_IX(ix) | - FW_VI_STATS_CMD_NSTATS(nstats)); + cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) | + FW_VI_STATS_CMD_NSTATS_V(nstats)); ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); if (ret) return ret; @@ -1299,13 +1437,13 @@ int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype, struct fw_iq_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE | + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(cmd)); cmd.type_to_iqandstindex = - cpu_to_be32(FW_IQ_CMD_TYPE(iqtype)); + cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); cmd.iqid = cpu_to_be16(iqid); cmd.fl0id = cpu_to_be16(fl0id); @@ -1325,12 +1463,12 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) struct fw_eq_eth_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE | + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(cmd)); - cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID(eqid)); + cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } @@ -1344,7 +1482,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) { const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl; - u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi)); + u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi)); switch (opcode) { case FW_PORT_CMD: { @@ -1359,7 +1497,7 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) /* * Extract various fields from port status change message. */ - action = FW_PORT_CMD_ACTION_GET( + action = FW_PORT_CMD_ACTION_G( be32_to_cpu(port_cmd->action_to_len16)); if (action != FW_PORT_ACTION_GET_PORT_INFO) { dev_err(adapter->pdev_dev, @@ -1368,24 +1506,24 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) break; } - port_id = FW_PORT_CMD_PORTID_GET( + port_id = FW_PORT_CMD_PORTID_G( be32_to_cpu(port_cmd->op_to_portid)); word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); - link_ok = (word & FW_PORT_CMD_LSTATUS) != 0; + link_ok = (word & FW_PORT_CMD_LSTATUS_F) != 0; speed = 0; fc = 0; - if (word & FW_PORT_CMD_RXPAUSE) + if (word & FW_PORT_CMD_RXPAUSE_F) fc |= PAUSE_RX; - if (word & FW_PORT_CMD_TXPAUSE) + if (word & FW_PORT_CMD_TXPAUSE_F) fc |= PAUSE_TX; - if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) speed = 100; - else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) speed = 1000; - else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) speed = 10000; - else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) + else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) speed = 40000; /* @@ -1420,3 +1558,38 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) } return 0; } + +/** + */ +int t4vf_prep_adapter(struct adapter *adapter) +{ + int err; + unsigned int chipid; + + /* Wait for the device to become ready before proceeding ... + */ + err = t4vf_wait_dev_ready(adapter); + if (err) + return err; + + /* Default port and clock for debugging in case we can't reach + * firmware. + */ + adapter->params.nports = 1; + adapter->params.vfres.pmask = 1; + adapter->params.vpd.cclk = 50000; + + adapter->params.chip = 0; + switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) { + case CHELSIO_T4: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0); + break; + + case CHELSIO_T5: + chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); + break; + } + + return 0; +} |