diff options
Diffstat (limited to 'drivers/net/ethernet/ibm')
-rw-r--r-- | drivers/net/ethernet/ibm/ehea/ehea.h | 20 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ehea/ehea_ethtool.c | 33 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ehea/ehea_hw.h | 25 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ehea/ehea_main.c | 478 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ehea/ehea_qmr.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/emac/core.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmveth.c | 6 |
7 files changed, 147 insertions, 419 deletions
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h index 0b8e6a97a98..410d6a1984e 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea.h +++ b/drivers/net/ethernet/ibm/ehea/ehea.h @@ -33,7 +33,6 @@ #include <linux/ethtool.h> #include <linux/vmalloc.h> #include <linux/if_vlan.h> -#include <linux/inet_lro.h> #include <asm/ibmebus.h> #include <asm/abs_addr.h> @@ -58,8 +57,6 @@ #define EHEA_MIN_ENTRIES_QP 127 #define EHEA_SMALL_QUEUES -#define EHEA_NUM_TX_QP 1 -#define EHEA_LRO_MAX_AGGR 64 #ifdef EHEA_SMALL_QUEUES #define EHEA_MAX_CQE_COUNT 1023 @@ -83,18 +80,16 @@ #define EHEA_SG_RQ3 0 #define EHEA_MAX_PACKET_SIZE 9022 /* for jumbo frames */ -#define EHEA_RQ2_PKT_SIZE 1522 +#define EHEA_RQ2_PKT_SIZE 2048 #define EHEA_L_PKT_SIZE 256 /* low latency */ -#define MAX_LRO_DESCRIPTORS 8 - /* Send completion signaling */ /* Protection Domain Identifier */ #define EHEA_PD_ID 0xaabcdeff #define EHEA_RQ2_THRESHOLD 1 -#define EHEA_RQ3_THRESHOLD 9 /* use RQ3 threshold of 1522 bytes */ +#define EHEA_RQ3_THRESHOLD 4 /* use RQ3 threshold of 2048 bytes */ #define EHEA_SPEED_10G 10000 #define EHEA_SPEED_1G 1000 @@ -363,7 +358,6 @@ struct ehea_port_res { struct port_stats p_stats; struct ehea_mr send_mr; /* send memory region */ struct ehea_mr recv_mr; /* receive memory region */ - spinlock_t xmit_lock; struct ehea_port *port; char int_recv_name[EHEA_IRQ_NAME_SIZE]; char int_send_name[EHEA_IRQ_NAME_SIZE]; @@ -376,8 +370,6 @@ struct ehea_port_res { struct ehea_q_skb_arr rq3_skba; struct ehea_q_skb_arr sq_skba; int sq_skba_size; - spinlock_t netif_queue; - int queue_stopped; int swqe_refill_th; atomic_t swqe_avail; int swqe_ll_count; @@ -386,9 +378,6 @@ struct ehea_port_res { u64 tx_bytes; u64 rx_packets; u64 rx_bytes; - u32 poll_counter; - struct net_lro_mgr lro_mgr; - struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; int sq_restart_flag; }; @@ -453,7 +442,7 @@ struct ehea_bcmc_reg_array { struct ehea_port { struct ehea_adapter *adapter; /* adapter that owns this port */ struct net_device *netdev; - struct net_device_stats stats; + struct rtnl_link_stats64 stats; struct ehea_port_res port_res[EHEA_MAX_PORT_RES]; struct platform_device ofdev; /* Open Firmware Device */ struct ehea_mc_list *mc_list; /* Multicast MAC addresses */ @@ -464,8 +453,6 @@ struct ehea_port { char int_aff_name[EHEA_IRQ_NAME_SIZE]; int allmulti; /* Indicates IFF_ALLMULTI state */ int promisc; /* Indicates IFF_PROMISC state */ - int num_tx_qps; - int num_add_tx_qps; int num_mcs; int resets; unsigned long flags; @@ -475,7 +462,6 @@ struct ehea_port { u32 msg_enable; u32 sig_comp_iv; u32 state; - u32 lro_max_aggr; u8 phy_link; u8 full_duplex; u8 autoneg; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c index 7f642aef5e8..05b7359bde8 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c @@ -180,7 +180,7 @@ static void ehea_set_msglevel(struct net_device *dev, u32 value) port->msg_enable = value; } -static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = { +static const char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = { {"sig_comp_iv"}, {"swqe_refill_th"}, {"port resets"}, @@ -189,7 +189,6 @@ static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = { {"IP cksum errors"}, {"Frame cksum errors"}, {"num SQ stopped"}, - {"SQ stopped"}, {"PR0 free_swqes"}, {"PR1 free_swqes"}, {"PR2 free_swqes"}, @@ -198,9 +197,14 @@ static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = { {"PR5 free_swqes"}, {"PR6 free_swqes"}, {"PR7 free_swqes"}, - {"LRO aggregated"}, - {"LRO flushed"}, - {"LRO no_desc"}, + {"PR8 free_swqes"}, + {"PR9 free_swqes"}, + {"PR10 free_swqes"}, + {"PR11 free_swqes"}, + {"PR12 free_swqes"}, + {"PR13 free_swqes"}, + {"PR14 free_swqes"}, + {"PR15 free_swqes"}, }; static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -255,25 +259,8 @@ static void ehea_get_ethtool_stats(struct net_device *dev, tmp += port->port_res[k].p_stats.queue_stopped; data[i++] = tmp; - for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) - tmp |= port->port_res[k].queue_stopped; - data[i++] = tmp; - - for (k = 0; k < 8; k++) + for (k = 0; k < 16; k++) data[i++] = atomic_read(&port->port_res[k].swqe_avail); - - for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) - tmp |= port->port_res[k].lro_mgr.stats.aggregated; - data[i++] = tmp; - - for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) - tmp |= port->port_res[k].lro_mgr.stats.flushed; - data[i++] = tmp; - - for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) - tmp |= port->port_res[k].lro_mgr.stats.no_desc; - data[i++] = tmp; - } const struct ethtool_ops ehea_ethtool_ops = { diff --git a/drivers/net/ethernet/ibm/ehea/ehea_hw.h b/drivers/net/ethernet/ibm/ehea/ehea_hw.h index 567981b4b2c..1a2fe4dc3eb 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_hw.h +++ b/drivers/net/ethernet/ibm/ehea/ehea_hw.h @@ -210,36 +210,11 @@ static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value) __raw_writeq(value, (void __iomem *)(epa.addr + offset)); } -#define epa_store_eq(epa, offset, value)\ - epa_store(epa, EQTEMM_OFFSET(offset), value) -#define epa_load_eq(epa, offset)\ - epa_load(epa, EQTEMM_OFFSET(offset)) - #define epa_store_cq(epa, offset, value)\ epa_store(epa, CQTEMM_OFFSET(offset), value) #define epa_load_cq(epa, offset)\ epa_load(epa, CQTEMM_OFFSET(offset)) -#define epa_store_qp(epa, offset, value)\ - epa_store(epa, QPTEMM_OFFSET(offset), value) -#define epa_load_qp(epa, offset)\ - epa_load(epa, QPTEMM_OFFSET(offset)) - -#define epa_store_qped(epa, offset, value)\ - epa_store(epa, QPEDMM_OFFSET(offset), value) -#define epa_load_qped(epa, offset)\ - epa_load(epa, QPEDMM_OFFSET(offset)) - -#define epa_store_mrmw(epa, offset, value)\ - epa_store(epa, MRMWMM_OFFSET(offset), value) -#define epa_load_mrmw(epa, offset)\ - epa_load(epa, MRMWMM_OFFSET(offset)) - -#define epa_store_base(epa, offset, value)\ - epa_store(epa, HCAGR_OFFSET(offset), value) -#define epa_load_base(epa, offset)\ - epa_load(epa, HCAGR_OFFSET(offset)) - static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes) { struct h_epa epa = qp->epas.kernel; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index dfefe809c48..37b70f7052b 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -61,10 +61,7 @@ static int rq1_entries = EHEA_DEF_ENTRIES_RQ1; static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; static int sq_entries = EHEA_DEF_ENTRIES_SQ; -static int use_mcs; -static int use_lro; -static int lro_max_aggr = EHEA_LRO_MAX_AGGR; -static int num_tx_qps = EHEA_NUM_TX_QP; +static int use_mcs = 1; static int prop_carrier_state; module_param(msg_level, int, 0); @@ -74,11 +71,7 @@ module_param(rq3_entries, int, 0); module_param(sq_entries, int, 0); module_param(prop_carrier_state, int, 0); module_param(use_mcs, int, 0); -module_param(use_lro, int, 0); -module_param(lro_max_aggr, int, 0); -module_param(num_tx_qps, int, 0); -MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS"); MODULE_PARM_DESC(msg_level, "msg_level"); MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical " "port to stack. 1:yes, 0:no. Default = 0 "); @@ -94,12 +87,8 @@ MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 " MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " "[2^x - 1], x = [6..14]. Default = " __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")"); -MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 "); - -MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = " - __MODULE_STRING(EHEA_LRO_MAX_AGGR)); -MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, " - "Default = 0"); +MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, " + "Default = 1"); static int port_name_cnt; static LIST_HEAD(adapter_list); @@ -173,7 +162,7 @@ static void ehea_update_firmware_handles(void) continue; num_ports++; - num_portres += port->num_def_qps + port->num_add_tx_qps; + num_portres += port->num_def_qps; } } @@ -199,9 +188,7 @@ static void ehea_update_firmware_handles(void) (num_ports == 0)) continue; - for (l = 0; - l < port->num_def_qps + port->num_add_tx_qps; - l++) { + for (l = 0; l < port->num_def_qps; l++) { struct ehea_port_res *pr = &port->port_res[l]; arr[i].adh = adapter->handle; @@ -327,10 +314,10 @@ out: spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags); } -static struct net_device_stats *ehea_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct ehea_port *port = netdev_priv(dev); - struct net_device_stats *stats = &port->stats; u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0; int i; @@ -339,7 +326,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) rx_bytes += port->port_res[i].rx_bytes; } - for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { + for (i = 0; i < port->num_def_qps; i++) { tx_packets += port->port_res[i].tx_packets; tx_bytes += port->port_res[i].tx_bytes; } @@ -357,7 +344,7 @@ static void ehea_update_stats(struct work_struct *work) struct ehea_port *port = container_of(work, struct ehea_port, stats_work.work); struct net_device *dev = port->netdev; - struct net_device_stats *stats = &port->stats; + struct rtnl_link_stats64 *stats = &port->stats; struct hcp_ehea_port_cb2 *cb2; u64 hret; @@ -551,7 +538,8 @@ static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) } static inline void ehea_fill_skb(struct net_device *dev, - struct sk_buff *skb, struct ehea_cqe *cqe) + struct sk_buff *skb, struct ehea_cqe *cqe, + struct ehea_port_res *pr) { int length = cqe->num_bytes_transfered - 4; /*remove CRC */ @@ -565,6 +553,8 @@ static inline void ehea_fill_skb(struct net_device *dev, skb->csum = csum_unfold(~cqe->inet_checksum_value); } else skb->ip_summed = CHECKSUM_UNNECESSARY; + + skb_record_rx_queue(skb, pr - &pr->port->port_res[0]); } static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array, @@ -657,49 +647,6 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, return 0; } -static int get_skb_hdr(struct sk_buff *skb, void **iphdr, - void **tcph, u64 *hdr_flags, void *priv) -{ - struct ehea_cqe *cqe = priv; - unsigned int ip_len; - struct iphdr *iph; - - /* non tcp/udp packets */ - if (!cqe->header_length) - return -1; - - /* non tcp packet */ - skb_reset_network_header(skb); - iph = ip_hdr(skb); - if (iph->protocol != IPPROTO_TCP) - return -1; - - ip_len = ip_hdrlen(skb); - skb_set_transport_header(skb, ip_len); - *tcph = tcp_hdr(skb); - - /* check if ip header and tcp header are complete */ - if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) - return -1; - - *hdr_flags = LRO_IPV4 | LRO_TCP; - *iphdr = iph; - - return 0; -} - -static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe, - struct sk_buff *skb) -{ - if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) - __vlan_hwaccel_put_tag(skb, cqe->vlan_tag); - - if (skb->dev->features & NETIF_F_LRO) - lro_receive_skb(&pr->lro_mgr, skb, cqe); - else - netif_receive_skb(skb); -} - static int ehea_proc_rwqes(struct net_device *dev, struct ehea_port_res *pr, int budget) @@ -750,7 +697,7 @@ static int ehea_proc_rwqes(struct net_device *dev, } skb_copy_to_linear_data(skb, ((char *)cqe) + 64, cqe->num_bytes_transfered - 4); - ehea_fill_skb(dev, skb, cqe); + ehea_fill_skb(dev, skb, cqe, pr); } else if (rq == 2) { /* RQ2 */ skb = get_skb_by_index(skb_arr_rq2, @@ -760,7 +707,7 @@ static int ehea_proc_rwqes(struct net_device *dev, "rq2: skb=NULL\n"); break; } - ehea_fill_skb(dev, skb, cqe); + ehea_fill_skb(dev, skb, cqe, pr); processed_rq2++; } else { /* RQ3 */ @@ -771,12 +718,16 @@ static int ehea_proc_rwqes(struct net_device *dev, "rq3: skb=NULL\n"); break; } - ehea_fill_skb(dev, skb, cqe); + ehea_fill_skb(dev, skb, cqe, pr); processed_rq3++; } processed_bytes += skb->len; - ehea_proc_skb(pr, cqe, skb); + + if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) + __vlan_hwaccel_put_tag(skb, cqe->vlan_tag); + + napi_gro_receive(&pr->napi, skb); } else { pr->p_stats.poll_receive_errors++; port_reset = ehea_treat_poll_error(pr, rq, cqe, @@ -787,8 +738,6 @@ static int ehea_proc_rwqes(struct net_device *dev, } cqe = ehea_poll_rq1(qp, &wqe_index); } - if (dev->features & NETIF_F_LRO) - lro_flush_all(&pr->lro_mgr); pr->rx_packets += processed; pr->rx_bytes += processed_bytes; @@ -806,7 +755,7 @@ static void reset_sq_restart_flag(struct ehea_port *port) { int i; - for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { + for (i = 0; i < port->num_def_qps; i++) { struct ehea_port_res *pr = &port->port_res[i]; pr->sq_restart_flag = 0; } @@ -819,7 +768,7 @@ static void check_sqs(struct ehea_port *port) int swqe_index; int i, k; - for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { + for (i = 0; i < port->num_def_qps; i++) { struct ehea_port_res *pr = &port->port_res[i]; int ret; k = 0; @@ -857,7 +806,8 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) int cqe_counter = 0; int swqe_av = 0; int index; - unsigned long flags; + struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev, + pr - &pr->port->port_res[0]); cqe = ehea_poll_cq(send_cq); while (cqe && (quota > 0)) { @@ -907,20 +857,20 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) ehea_update_feca(send_cq, cqe_counter); atomic_add(swqe_av, &pr->swqe_avail); - spin_lock_irqsave(&pr->netif_queue, flags); - - if (pr->queue_stopped && (atomic_read(&pr->swqe_avail) - >= pr->swqe_refill_th)) { - netif_wake_queue(pr->port->netdev); - pr->queue_stopped = 0; + if (unlikely(netif_tx_queue_stopped(txq) && + (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) { + __netif_tx_lock(txq, smp_processor_id()); + if (netif_tx_queue_stopped(txq) && + (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th)) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); } - spin_unlock_irqrestore(&pr->netif_queue, flags); + wake_up(&pr->port->swqe_avail_wq); return cqe; } -#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16 #define EHEA_POLL_MAX_CQES 65535 static int ehea_poll(struct napi_struct *napi, int budget) @@ -930,18 +880,13 @@ static int ehea_poll(struct napi_struct *napi, int budget) struct net_device *dev = pr->port->netdev; struct ehea_cqe *cqe; struct ehea_cqe *cqe_skb = NULL; - int force_irq, wqe_index; + int wqe_index; int rx = 0; - force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ); cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); + rx += ehea_proc_rwqes(dev, pr, budget - rx); - if (!force_irq) - rx += ehea_proc_rwqes(dev, pr, budget - rx); - - while ((rx != budget) || force_irq) { - pr->poll_counter = 0; - force_irq = 0; + while (rx != budget) { napi_complete(napi); ehea_reset_cq_ep(pr->recv_cq); ehea_reset_cq_ep(pr->send_cq); @@ -961,7 +906,6 @@ static int ehea_poll(struct napi_struct *napi, int budget) rx += ehea_proc_rwqes(dev, pr, budget - rx); } - pr->poll_counter++; return rx; } @@ -1113,13 +1057,6 @@ int ehea_sense_port_attr(struct ehea_port *port) goto out_free; } - port->num_tx_qps = num_tx_qps; - - if (port->num_def_qps >= port->num_tx_qps) - port->num_add_tx_qps = 0; - else - port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps; - ret = 0; out_free: if (ret || netif_msg_probe(port)) @@ -1251,7 +1188,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) netif_info(port, link, dev, "Logical port down\n"); netif_carrier_off(dev); - netif_stop_queue(dev); + netif_tx_disable(dev); } if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { @@ -1282,7 +1219,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) case EHEA_EC_PORT_MALFUNC: netdev_info(dev, "Port malfunction\n"); netif_carrier_off(dev); - netif_stop_queue(dev); + netif_tx_disable(dev); break; default: netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe); @@ -1360,7 +1297,7 @@ static int ehea_reg_interrupts(struct net_device *dev) port->qp_eq->attr.ist1); - for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { + for (i = 0; i < port->num_def_qps; i++) { pr = &port->port_res[i]; snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, "%s-queue%d", dev->name, i); @@ -1403,7 +1340,7 @@ static void ehea_free_interrupts(struct net_device *dev) /* send */ - for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { + for (i = 0; i < port->num_def_qps; i++) { pr = &port->port_res[i]; ibmebus_free_irq(pr->eq->attr.ist1, pr); netif_info(port, intr, dev, @@ -1534,8 +1471,6 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, pr->rx_packets = rx_packets; pr->port = port; - spin_lock_init(&pr->xmit_lock); - spin_lock_init(&pr->netif_queue); pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); if (!pr->eq) { @@ -1626,15 +1561,6 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64); - pr->lro_mgr.max_aggr = pr->port->lro_max_aggr; - pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; - pr->lro_mgr.lro_arr = pr->lro_desc; - pr->lro_mgr.get_skb_header = get_skb_hdr; - pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; - pr->lro_mgr.dev = port->netdev; - pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; - pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; - ret = 0; goto out; @@ -1691,96 +1617,35 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) return ret; } -/* - * The write_* functions store information in swqe which is used by - * the hardware to calculate the ip/tcp/udp checksum - */ - -static inline void write_ip_start_end(struct ehea_swqe *swqe, - const struct sk_buff *skb) -{ - swqe->ip_start = skb_network_offset(skb); - swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1); -} - -static inline void write_tcp_offset_end(struct ehea_swqe *swqe, - const struct sk_buff *skb) -{ - swqe->tcp_offset = - (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check)); - - swqe->tcp_end = (u16)skb->len - 1; -} - -static inline void write_udp_offset_end(struct ehea_swqe *swqe, - const struct sk_buff *skb) -{ - swqe->tcp_offset = - (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check)); - - swqe->tcp_end = (u16)skb->len - 1; -} - - -static void write_swqe2_TSO(struct sk_buff *skb, - struct ehea_swqe *swqe, u32 lkey) -{ - struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; - u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; - int skb_data_size = skb_headlen(skb); - int headersize; - - /* Packet is TCP with TSO enabled */ - swqe->tx_control |= EHEA_SWQE_TSO; - swqe->mss = skb_shinfo(skb)->gso_size; - /* copy only eth/ip/tcp headers to immediate data and - * the rest of skb->data to sg1entry - */ - headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); - - skb_data_size = skb_headlen(skb); - - if (skb_data_size >= headersize) { - /* copy immediate data */ - skb_copy_from_linear_data(skb, imm_data, headersize); - swqe->immediate_data_length = headersize; - - if (skb_data_size > headersize) { - /* set sg1entry data */ - sg1entry->l_key = lkey; - sg1entry->len = skb_data_size - headersize; - sg1entry->vaddr = - ehea_map_vaddr(skb->data + headersize); - swqe->descriptors++; - } - } else - pr_err("cannot handle fragmented headers\n"); -} - -static void write_swqe2_nonTSO(struct sk_buff *skb, - struct ehea_swqe *swqe, u32 lkey) +static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe, + u32 lkey) { int skb_data_size = skb_headlen(skb); u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; + unsigned int immediate_len = SWQE2_MAX_IMM; + + swqe->descriptors = 0; - /* Packet is any nonTSO type - * - * Copy as much as possible skb->data to immediate data and - * the rest to sg1entry - */ - if (skb_data_size >= SWQE2_MAX_IMM) { - /* copy immediate data */ - skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM); + if (skb_is_gso(skb)) { + swqe->tx_control |= EHEA_SWQE_TSO; + swqe->mss = skb_shinfo(skb)->gso_size; + /* + * For TSO packets we only copy the headers into the + * immediate area. + */ + immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); + } - swqe->immediate_data_length = SWQE2_MAX_IMM; + if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) { + skb_copy_from_linear_data(skb, imm_data, immediate_len); + swqe->immediate_data_length = immediate_len; - if (skb_data_size > SWQE2_MAX_IMM) { - /* copy sg1entry data */ + if (skb_data_size > immediate_len) { sg1entry->l_key = lkey; - sg1entry->len = skb_data_size - SWQE2_MAX_IMM; + sg1entry->len = skb_data_size - immediate_len; sg1entry->vaddr = - ehea_map_vaddr(skb->data + SWQE2_MAX_IMM); + ehea_map_vaddr(skb->data + immediate_len); swqe->descriptors++; } } else { @@ -1799,13 +1664,9 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, nfrags = skb_shinfo(skb)->nr_frags; sg1entry = &swqe->u.immdata_desc.sg_entry; sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list; - swqe->descriptors = 0; sg1entry_contains_frag_data = 0; - if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size) - write_swqe2_TSO(skb, swqe, lkey); - else - write_swqe2_nonTSO(skb, swqe, lkey); + write_swqe2_immediate(skb, swqe, lkey); /* write descriptors */ if (nfrags > 0) { @@ -1815,7 +1676,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, /* copy sg1entry data */ sg1entry->l_key = lkey; - sg1entry->len = frag->size; + sg1entry->len = skb_frag_size(frag); sg1entry->vaddr = ehea_map_vaddr(skb_frag_address(frag)); swqe->descriptors++; @@ -1828,7 +1689,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, sgentry = &sg_list[i - sg1entry_contains_frag_data]; sgentry->l_key = lkey; - sgentry->len = frag->size; + sgentry->len = skb_frag_size(frag); sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag)); swqe->descriptors++; } @@ -2120,41 +1981,44 @@ static int ehea_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, - struct ehea_swqe *swqe, u32 lkey) +static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe) { - if (skb->protocol == htons(ETH_P_IP)) { - const struct iphdr *iph = ip_hdr(skb); + swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC; - /* IPv4 */ - swqe->tx_control |= EHEA_SWQE_CRC - | EHEA_SWQE_IP_CHECKSUM - | EHEA_SWQE_TCP_CHECKSUM - | EHEA_SWQE_IMM_DATA_PRESENT - | EHEA_SWQE_DESCRIPTORS_PRESENT; + if (skb->protocol != htons(ETH_P_IP)) + return; - write_ip_start_end(swqe, skb); + if (skb->ip_summed == CHECKSUM_PARTIAL) + swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM; - if (iph->protocol == IPPROTO_UDP) { - if ((iph->frag_off & IP_MF) || - (iph->frag_off & IP_OFFSET)) - /* IP fragment, so don't change cs */ - swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; - else - write_udp_offset_end(swqe, skb); - } else if (iph->protocol == IPPROTO_TCP) { - write_tcp_offset_end(swqe, skb); - } + swqe->ip_start = skb_network_offset(skb); + swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1; - /* icmp (big data) and ip segmentation packets (all other ip - packets) do not require any special handling */ + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + if (skb->ip_summed == CHECKSUM_PARTIAL) + swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM; - } else { - /* Other Ethernet Protocol */ - swqe->tx_control |= EHEA_SWQE_CRC - | EHEA_SWQE_IMM_DATA_PRESENT - | EHEA_SWQE_DESCRIPTORS_PRESENT; + swqe->tcp_offset = swqe->ip_end + 1 + + offsetof(struct udphdr, check); + break; + + case IPPROTO_TCP: + if (skb->ip_summed == CHECKSUM_PARTIAL) + swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM; + + swqe->tcp_offset = swqe->ip_end + 1 + + offsetof(struct tcphdr, check); + break; } +} + +static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, + struct ehea_swqe *swqe, u32 lkey) +{ + swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT; + + xmit_common(skb, swqe); write_swqe2_data(skb, dev, swqe, lkey); } @@ -2162,105 +2026,30 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, struct ehea_swqe *swqe) { - int nfrags = skb_shinfo(skb)->nr_frags; u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0]; - skb_frag_t *frag; - int i; - - if (skb->protocol == htons(ETH_P_IP)) { - const struct iphdr *iph = ip_hdr(skb); - - /* IPv4 */ - write_ip_start_end(swqe, skb); - - if (iph->protocol == IPPROTO_TCP) { - swqe->tx_control |= EHEA_SWQE_CRC - | EHEA_SWQE_IP_CHECKSUM - | EHEA_SWQE_TCP_CHECKSUM - | EHEA_SWQE_IMM_DATA_PRESENT; - write_tcp_offset_end(swqe, skb); + xmit_common(skb, swqe); - } else if (iph->protocol == IPPROTO_UDP) { - if ((iph->frag_off & IP_MF) || - (iph->frag_off & IP_OFFSET)) - /* IP fragment, so don't change cs */ - swqe->tx_control |= EHEA_SWQE_CRC - | EHEA_SWQE_IMM_DATA_PRESENT; - else { - swqe->tx_control |= EHEA_SWQE_CRC - | EHEA_SWQE_IP_CHECKSUM - | EHEA_SWQE_TCP_CHECKSUM - | EHEA_SWQE_IMM_DATA_PRESENT; - - write_udp_offset_end(swqe, skb); - } - } else { - /* icmp (big data) and - ip segmentation packets (all other ip packets) */ - swqe->tx_control |= EHEA_SWQE_CRC - | EHEA_SWQE_IP_CHECKSUM - | EHEA_SWQE_IMM_DATA_PRESENT; - } - } else { - /* Other Ethernet Protocol */ - swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT; - } - /* copy (immediate) data */ - if (nfrags == 0) { - /* data is in a single piece */ + if (!skb->data_len) skb_copy_from_linear_data(skb, imm_data, skb->len); - } else { - /* first copy data from the skb->data buffer ... */ - skb_copy_from_linear_data(skb, imm_data, - skb_headlen(skb)); - imm_data += skb_headlen(skb); + else + skb_copy_bits(skb, 0, imm_data, skb->len); - /* ... then copy data from the fragments */ - for (i = 0; i < nfrags; i++) { - frag = &skb_shinfo(skb)->frags[i]; - memcpy(imm_data, skb_frag_address(frag), frag->size); - imm_data += frag->size; - } - } swqe->immediate_data_length = skb->len; dev_kfree_skb(skb); } -static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps) -{ - struct tcphdr *tcp; - u32 tmp; - - if ((skb->protocol == htons(ETH_P_IP)) && - (ip_hdr(skb)->protocol == IPPROTO_TCP)) { - tcp = (struct tcphdr *)(skb_network_header(skb) + - (ip_hdr(skb)->ihl * 4)); - tmp = (tcp->source + (tcp->dest << 16)) % 31; - tmp += ip_hdr(skb)->daddr % 31; - return tmp % num_qps; - } else - return 0; -} - static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ehea_port *port = netdev_priv(dev); struct ehea_swqe *swqe; - unsigned long flags; u32 lkey; int swqe_index; struct ehea_port_res *pr; + struct netdev_queue *txq; - pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)]; - - if (!spin_trylock(&pr->xmit_lock)) - return NETDEV_TX_BUSY; - - if (pr->queue_stopped) { - spin_unlock(&pr->xmit_lock); - return NETDEV_TX_BUSY; - } + pr = &port->port_res[skb_get_queue_mapping(skb)]; + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); swqe = ehea_get_swqe(pr->qp, &swqe_index); memset(swqe, 0, SWQE_HEADER_SIZE); @@ -2310,23 +2099,16 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) ehea_dump(swqe, 512, "swqe"); if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { - netif_stop_queue(dev); + netif_tx_stop_queue(txq); swqe->tx_control |= EHEA_SWQE_PURGE; } ehea_post_swqe(pr->qp, swqe); if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { - spin_lock_irqsave(&pr->netif_queue, flags); - if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { - pr->p_stats.queue_stopped++; - netif_stop_queue(dev); - pr->queue_stopped = 1; - } - spin_unlock_irqrestore(&pr->netif_queue, flags); + pr->p_stats.queue_stopped++; + netif_tx_stop_queue(txq); } - dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ - spin_unlock(&pr->xmit_lock); return NETDEV_TX_OK; } @@ -2471,8 +2253,7 @@ out: return ret; } -static int ehea_port_res_setup(struct ehea_port *port, int def_qps, - int add_tx_qps) +static int ehea_port_res_setup(struct ehea_port *port, int def_qps) { int ret, i; struct port_res_cfg pr_cfg, pr_cfg_small_rx; @@ -2505,7 +2286,7 @@ static int ehea_port_res_setup(struct ehea_port *port, int def_qps, if (ret) goto out_clean_pr; } - for (i = def_qps; i < def_qps + add_tx_qps; i++) { + for (i = def_qps; i < def_qps; i++) { ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg_small_rx, i); if (ret) @@ -2528,7 +2309,7 @@ static int ehea_clean_all_portres(struct ehea_port *port) int ret = 0; int i; - for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) + for (i = 0; i < port->num_def_qps; i++) ret |= ehea_clean_portres(port, &port->port_res[i]); ret |= ehea_destroy_eq(port->qp_eq); @@ -2560,8 +2341,7 @@ static int ehea_up(struct net_device *dev) if (port->state == EHEA_PORT_UP) return 0; - ret = ehea_port_res_setup(port, port->num_def_qps, - port->num_add_tx_qps); + ret = ehea_port_res_setup(port, port->num_def_qps); if (ret) { netdev_err(dev, "port_res_failed\n"); goto out; @@ -2580,7 +2360,7 @@ static int ehea_up(struct net_device *dev) goto out_clean_pr; } - for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { + for (i = 0; i < port->num_def_qps; i++) { ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); if (ret) { netdev_err(dev, "activate_qp failed\n"); @@ -2626,7 +2406,7 @@ static void port_napi_disable(struct ehea_port *port) { int i; - for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) + for (i = 0; i < port->num_def_qps; i++) napi_disable(&port->port_res[i].napi); } @@ -2634,7 +2414,7 @@ static void port_napi_enable(struct ehea_port *port) { int i; - for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) + for (i = 0; i < port->num_def_qps; i++) napi_enable(&port->port_res[i].napi); } @@ -2650,7 +2430,7 @@ static int ehea_open(struct net_device *dev) ret = ehea_up(dev); if (!ret) { port_napi_enable(port); - netif_start_queue(dev); + netif_tx_start_all_queues(dev); } mutex_unlock(&port->port_lock); @@ -2696,7 +2476,7 @@ static int ehea_stop(struct net_device *dev) cancel_work_sync(&port->reset_task); cancel_delayed_work_sync(&port->stats_work); mutex_lock(&port->port_lock); - netif_stop_queue(dev); + netif_tx_stop_all_queues(dev); port_napi_disable(port); ret = ehea_down(dev); mutex_unlock(&port->port_lock); @@ -2722,7 +2502,7 @@ static void ehea_flush_sq(struct ehea_port *port) { int i; - for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { + for (i = 0; i < port->num_def_qps; i++) { struct ehea_port_res *pr = &port->port_res[i]; int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count; int ret; @@ -2756,7 +2536,7 @@ int ehea_stop_qps(struct net_device *dev) goto out; } - for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) { + for (i = 0; i < (port->num_def_qps); i++) { struct ehea_port_res *pr = &port->port_res[i]; struct ehea_qp *qp = pr->qp; @@ -2858,7 +2638,7 @@ int ehea_restart_qps(struct net_device *dev) goto out; } - for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) { + for (i = 0; i < (port->num_def_qps); i++) { struct ehea_port_res *pr = &port->port_res[i]; struct ehea_qp *qp = pr->qp; @@ -2920,7 +2700,7 @@ static void ehea_reset_port(struct work_struct *work) mutex_lock(&dlpar_mem_lock); port->resets++; mutex_lock(&port->port_lock); - netif_stop_queue(dev); + netif_tx_disable(dev); port_napi_disable(port); @@ -2936,7 +2716,7 @@ static void ehea_reset_port(struct work_struct *work) port_napi_enable(port); - netif_wake_queue(dev); + netif_tx_wake_all_queues(dev); out: mutex_unlock(&port->port_lock); mutex_unlock(&dlpar_mem_lock); @@ -2963,7 +2743,7 @@ static void ehea_rereg_mrs(void) if (dev->flags & IFF_UP) { mutex_lock(&port->port_lock); - netif_stop_queue(dev); + netif_tx_disable(dev); ehea_flush_sq(port); ret = ehea_stop_qps(dev); if (ret) { @@ -3008,7 +2788,7 @@ static void ehea_rereg_mrs(void) if (!ret) { check_sqs(port); port_napi_enable(port); - netif_wake_queue(dev); + netif_tx_wake_all_queues(dev); } else { netdev_err(dev, "Unable to restart QPS\n"); } @@ -3163,7 +2943,7 @@ static const struct net_device_ops ehea_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ehea_netpoll, #endif - .ndo_get_stats = ehea_get_stats, + .ndo_get_stats64 = ehea_get_stats64, .ndo_set_mac_address = ehea_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = ehea_set_multicast_list, @@ -3184,7 +2964,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, int jumbo; /* allocate memory for the port structures */ - dev = alloc_etherdev(sizeof(struct ehea_port)); + dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES); if (!dev) { pr_err("no mem for net_device\n"); @@ -3216,6 +2996,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, if (ret) goto out_free_mc_list; + netif_set_real_num_rx_queues(dev, port->num_def_qps); + netif_set_real_num_tx_queues(dev, port->num_def_qps); + port_dev = ehea_register_port(port, dn); if (!port_dev) goto out_free_mc_list; @@ -3228,17 +3011,16 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, dev->netdev_ops = &ehea_netdev_ops; ehea_set_ethtool_ops(dev); - dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO + dev->hw_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER - | NETIF_F_LLTX | NETIF_F_RXCSUM; + | NETIF_F_RXCSUM; + dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA | + NETIF_F_IP_CSUM; dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; - if (use_lro) - dev->features |= NETIF_F_LRO; - INIT_WORK(&port->reset_task, ehea_reset_port); INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats); @@ -3252,8 +3034,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, goto out_unreg_port; } - port->lro_max_aggr = lro_max_aggr; - ret = ehea_get_jumboframe_status(port, &jumbo); if (ret) netdev_err(dev, "failed determining jumbo frame status\n"); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.h b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h index fddff8ec8cf..337a47ecf4a 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.h +++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h @@ -107,7 +107,7 @@ struct ehea_swqe { u8 immediate_data_length; u8 tcp_offset; u8 reserved2; - u16 tcp_end; + u16 reserved2b; u8 wrap_tag; u8 descriptors; /* number of valid descriptors in WQE */ u16 reserved3; diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 6b3a033d9de..ed79b2d3ad3 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -1453,7 +1453,7 @@ static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) /* skb fragments */ for (i = 0; i < nr_frags; ++i) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; - len = frag->size; + len = skb_frag_size(frag); if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF)) goto undo_frame; diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 4da972eaabb..b1cd41b9c61 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1014,15 +1014,15 @@ retry_bounce: /* Map the frags */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0, - frag->size, DMA_TO_DEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) goto map_failed_frags; - descs[i+1].fields.flags_len = desc_flags | frag->size; + descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag); descs[i+1].fields.address = dma_addr; } |