summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJan-Bernd Themann <ossthema@de.ibm.com>2006-09-13 17:44:31 +0200
committerJeff Garzik <jeff@garzik.org>2006-09-13 13:23:52 -0400
commit7a291083225af6e22ffaa46b3d91cfc1a1ccaab4 (patch)
treec87a93ee7d5c1c63ce98dc90a62cd0b4dfc4318f /drivers
parent7de745e56244156233e5cdd62b462e52e638d408 (diff)
[PATCH] ehea: IBM eHEA Ethernet Device Driver
Hi Jeff, I fixed the __iomem issue and tested the driver with sparse. Looks good so far. Thanks for your effort. Jan-Bernd Themann Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com> drivers/net/Kconfig | 9 drivers/net/Makefile | 1 drivers/net/ehea/Makefile | 6 drivers/net/ehea/ehea.h | 447 ++++++ drivers/net/ehea/ehea_ethtool.c | 294 ++++ drivers/net/ehea/ehea_hcall.h | 51 drivers/net/ehea/ehea_hw.h | 287 ++++ drivers/net/ehea/ehea_main.c | 2654 ++++++++++++++++++++++++++++++++++++++++ drivers/net/ehea/ehea_phyp.c | 705 ++++++++++ drivers/net/ehea/ehea_phyp.h | 455 ++++++ drivers/net/ehea/ehea_qmr.c | 582 ++++++++ drivers/net/ehea/ehea_qmr.h | 358 +++++ 12 files changed, 5849 insertions(+) Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/Kconfig9
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/ehea/Makefile6
-rw-r--r--drivers/net/ehea/ehea.h447
-rw-r--r--drivers/net/ehea/ehea_ethtool.c294
-rw-r--r--drivers/net/ehea/ehea_hcall.h51
-rw-r--r--drivers/net/ehea/ehea_hw.h287
-rw-r--r--drivers/net/ehea/ehea_main.c2654
-rw-r--r--drivers/net/ehea/ehea_phyp.c705
-rw-r--r--drivers/net/ehea/ehea_phyp.h455
-rw-r--r--drivers/net/ehea/ehea_qmr.c582
-rw-r--r--drivers/net/ehea/ehea_qmr.h358
12 files changed, 5849 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index a36fc60bd88..d30ab6b492d 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2360,6 +2360,15 @@ config CHELSIO_T1
To compile this driver as a module, choose M here: the module
will be called cxgb.
+config EHEA
+ tristate "eHEA Ethernet support"
+ depends on IBMEBUS
+ ---help---
+ This driver supports the IBM pSeries eHEA ethernet adapter.
+
+ To compile the driver as a module, choose M here. The module
+ will be called ehea.
+
config IXGB
tristate "Intel(R) PRO/10GbE support"
depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 6ff17649c0f..0f329e56345 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_IBM_EMAC) += ibm_emac/
obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_CHELSIO_T1) += chelsio/
+obj-$(CONFIG_EHEA) += ehea/
obj-$(CONFIG_BONDING) += bonding/
obj-$(CONFIG_GIANFAR) += gianfar_driver.o
diff --git a/drivers/net/ehea/Makefile b/drivers/net/ehea/Makefile
new file mode 100644
index 00000000000..775d9969b5c
--- /dev/null
+++ b/drivers/net/ehea/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the eHEA ethernet device driver for IBM eServer System p
+#
+ehea-y = ehea_main.o ehea_phyp.o ehea_qmr.o ehea_ethtool.o ehea_phyp.o
+obj-$(CONFIG_EHEA) += ehea.o
+
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
new file mode 100644
index 00000000000..87c510f22ac
--- /dev/null
+++ b/drivers/net/ehea/ehea.h
@@ -0,0 +1,447 @@
+/*
+ * linux/drivers/net/ehea/ehea.h
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __EHEA_H__
+#define __EHEA_H__
+
+#include <linux/module.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/if_vlan.h>
+
+#include <asm/ibmebus.h>
+#include <asm/abs_addr.h>
+#include <asm/io.h>
+
+#define DRV_NAME "ehea"
+#define DRV_VERSION "EHEA_0027"
+
+#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
+ | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+#define EHEA_MAX_ENTRIES_RQ1 32767
+#define EHEA_MAX_ENTRIES_RQ2 16383
+#define EHEA_MAX_ENTRIES_RQ3 16383
+#define EHEA_MAX_ENTRIES_SQ 32767
+#define EHEA_MIN_ENTRIES_QP 127
+
+#define EHEA_NUM_TX_QP 1
+
+#ifdef EHEA_SMALL_QUEUES
+#define EHEA_MAX_CQE_COUNT 1023
+#define EHEA_DEF_ENTRIES_SQ 1023
+#define EHEA_DEF_ENTRIES_RQ1 4095
+#define EHEA_DEF_ENTRIES_RQ2 1023
+#define EHEA_DEF_ENTRIES_RQ3 1023
+#else
+#define EHEA_MAX_CQE_COUNT 32000
+#define EHEA_DEF_ENTRIES_SQ 16000
+#define EHEA_DEF_ENTRIES_RQ1 32080
+#define EHEA_DEF_ENTRIES_RQ2 4020
+#define EHEA_DEF_ENTRIES_RQ3 4020
+#endif
+
+#define EHEA_MAX_ENTRIES_EQ 20
+
+#define EHEA_SG_SQ 2
+#define EHEA_SG_RQ1 1
+#define EHEA_SG_RQ2 0
+#define EHEA_SG_RQ3 0
+
+#define EHEA_MAX_PACKET_SIZE 9022 /* for jumbo frames */
+#define EHEA_RQ2_PKT_SIZE 1522
+#define EHEA_L_PKT_SIZE 256 /* low latency */
+
+#define EHEA_POLL_MAX_RWQE 1000
+
+/* Send completion signaling */
+#define EHEA_SIG_IV_LONG 1
+
+/* Protection Domain Identifier */
+#define EHEA_PD_ID 0xaabcdeff
+
+#define EHEA_RQ2_THRESHOLD 1
+#define EHEA_RQ3_THRESHOLD 9 /* use RQ3 threshold of 1522 bytes */
+
+#define EHEA_SPEED_10G 10000
+#define EHEA_SPEED_1G 1000
+#define EHEA_SPEED_100M 100
+#define EHEA_SPEED_10M 10
+#define EHEA_SPEED_AUTONEG 0
+
+/* Broadcast/Multicast registration types */
+#define EHEA_BCMC_SCOPE_ALL 0x08
+#define EHEA_BCMC_SCOPE_SINGLE 0x00
+#define EHEA_BCMC_MULTICAST 0x04
+#define EHEA_BCMC_BROADCAST 0x00
+#define EHEA_BCMC_UNTAGGED 0x02
+#define EHEA_BCMC_TAGGED 0x00
+#define EHEA_BCMC_VLANID_ALL 0x01
+#define EHEA_BCMC_VLANID_SINGLE 0x00
+
+/* Use this define to kmallocate pHYP control blocks */
+#define H_CB_ALIGNMENT 4096
+
+#define EHEA_CACHE_LINE 128
+
+/* Memory Regions */
+#define EHEA_MR_MAX_TX_PAGES 20
+#define EHEA_MR_TX_DATA_PN 3
+#define EHEA_MR_ACC_CTRL 0x00800000
+#define EHEA_RWQES_PER_MR_RQ2 10
+#define EHEA_RWQES_PER_MR_RQ3 10
+
+#define EHEA_WATCH_DOG_TIMEOUT 10*HZ
+
+/* utility functions */
+
+#define ehea_info(fmt, args...) \
+ printk(KERN_INFO DRV_NAME ": " fmt "\n", ## args)
+
+#define ehea_error(fmt, args...) \
+ printk(KERN_ERR DRV_NAME ": Error in %s: " fmt "\n", __func__, ## args)
+
+#ifdef DEBUG
+#define ehea_debug(fmt, args...) \
+ printk(KERN_DEBUG DRV_NAME ": " fmt, ## args)
+#else
+#define ehea_debug(fmt, args...) do {} while (0)
+#endif
+
+void ehea_dump(void *adr, int len, char *msg);
+
+#define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
+
+#define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
+
+#define EHEA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
+
+#define EHEA_BMASK_MASK(mask) \
+ (0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff))
+
+#define EHEA_BMASK_SET(mask, value) \
+ ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
+
+#define EHEA_BMASK_GET(mask, value) \
+ (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
+
+/*
+ * Generic ehea page
+ */
+struct ehea_page {
+ u8 entries[PAGE_SIZE];
+};
+
+/*
+ * Generic queue in linux kernel virtual memory
+ */
+struct hw_queue {
+ u64 current_q_offset; /* current queue entry */
+ struct ehea_page **queue_pages; /* array of pages belonging to queue */
+ u32 qe_size; /* queue entry size */
+ u32 queue_length; /* queue length allocated in bytes */
+ u32 pagesize;
+ u32 toggle_state; /* toggle flag - per page */
+ u32 reserved; /* 64 bit alignment */
+};
+
+/*
+ * For pSeries this is a 64bit memory address where
+ * I/O memory is mapped into CPU address space
+ */
+struct h_epa {
+ void __iomem *addr;
+};
+
+struct h_epa_user {
+ u64 addr;
+};
+
+struct h_epas {
+ struct h_epa kernel; /* kernel space accessible resource,
+ set to 0 if unused */
+ struct h_epa_user user; /* user space accessible resource
+ set to 0 if unused */
+};
+
+struct ehea_qp;
+struct ehea_cq;
+struct ehea_eq;
+struct ehea_port;
+struct ehea_av;
+
+/*
+ * Queue attributes passed to ehea_create_qp()
+ */
+struct ehea_qp_init_attr {
+ /* input parameter */
+ u32 qp_token; /* queue token */
+ u8 low_lat_rq1;
+ u8 signalingtype; /* cqe generation flag */
+ u8 rq_count; /* num of receive queues */
+ u8 eqe_gen; /* eqe generation flag */
+ u16 max_nr_send_wqes; /* max number of send wqes */
+ u16 max_nr_rwqes_rq1; /* max number of receive wqes */
+ u16 max_nr_rwqes_rq2;
+ u16 max_nr_rwqes_rq3;
+ u8 wqe_size_enc_sq;
+ u8 wqe_size_enc_rq1;
+ u8 wqe_size_enc_rq2;
+ u8 wqe_size_enc_rq3;
+ u8 swqe_imm_data_len; /* immediate data length for swqes */
+ u16 port_nr;
+ u16 rq2_threshold;
+ u16 rq3_threshold;
+ u64 send_cq_handle;
+ u64 recv_cq_handle;
+ u64 aff_eq_handle;
+
+ /* output parameter */
+ u32 qp_nr;
+ u16 act_nr_send_wqes;
+ u16 act_nr_rwqes_rq1;
+ u16 act_nr_rwqes_rq2;
+ u16 act_nr_rwqes_rq3;
+ u8 act_wqe_size_enc_sq;
+ u8 act_wqe_size_enc_rq1;
+ u8 act_wqe_size_enc_rq2;
+ u8 act_wqe_size_enc_rq3;
+ u32 nr_sq_pages;
+ u32 nr_rq1_pages;
+ u32 nr_rq2_pages;
+ u32 nr_rq3_pages;
+ u32 liobn_sq;
+ u32 liobn_rq1;
+ u32 liobn_rq2;
+ u32 liobn_rq3;
+};
+
+/*
+ * Event Queue attributes, passed as paramter
+ */
+struct ehea_eq_attr {
+ u32 type;
+ u32 max_nr_of_eqes;
+ u8 eqe_gen; /* generate eqe flag */
+ u64 eq_handle;
+ u32 act_nr_of_eqes;
+ u32 nr_pages;
+ u32 ist1; /* Interrupt service token */
+ u32 ist2;
+ u32 ist3;
+ u32 ist4;
+};
+
+
+/*
+ * Event Queue
+ */
+struct ehea_eq {
+ struct ehea_adapter *adapter;
+ struct hw_queue hw_queue;
+ u64 fw_handle;
+ struct h_epas epas;
+ spinlock_t spinlock;
+ struct ehea_eq_attr attr;
+};
+
+/*
+ * HEA Queues
+ */
+struct ehea_qp {
+ struct ehea_adapter *adapter;
+ u64 fw_handle; /* QP handle for firmware calls */
+ struct hw_queue hw_squeue;
+ struct hw_queue hw_rqueue1;
+ struct hw_queue hw_rqueue2;
+ struct hw_queue hw_rqueue3;
+ struct h_epas epas;
+ struct ehea_qp_init_attr init_attr;
+};
+
+/*
+ * Completion Queue attributes
+ */
+struct ehea_cq_attr {
+ /* input parameter */
+ u32 max_nr_of_cqes;
+ u32 cq_token;
+ u64 eq_handle;
+
+ /* output parameter */
+ u32 act_nr_of_cqes;
+ u32 nr_pages;
+};
+
+/*
+ * Completion Queue
+ */
+struct ehea_cq {
+ struct ehea_adapter *adapter;
+ u64 fw_handle;
+ struct hw_queue hw_queue;
+ struct h_epas epas;
+ struct ehea_cq_attr attr;
+};
+
+/*
+ * Memory Region
+ */
+struct ehea_mr {
+ u64 handle;
+ u64 vaddr;
+ u32 lkey;
+};
+
+/*
+ * Port state information
+ */
+struct port_state {
+ int poll_max_processed;
+ int poll_receive_errors;
+ int ehea_poll;
+ int queue_stopped;
+ int min_swqe_avail;
+ u64 sqc_stop_sum;
+ int pkt_send;
+ int pkt_xmit;
+ int send_tasklet;
+ int nwqe;
+};
+
+#define EHEA_IRQ_NAME_SIZE 20
+
+/*
+ * Queue SKB Array
+ */
+struct ehea_q_skb_arr {
+ struct sk_buff **arr; /* skb array for queue */
+ int len; /* array length */
+ int index; /* array index */
+ int os_skbs; /* rq2/rq3 only: outstanding skbs */
+};
+
+/*
+ * Port resources
+ */
+struct ehea_port_res {
+ struct ehea_mr send_mr; /* send memory region */
+ struct ehea_mr recv_mr; /* receive memory region */
+ spinlock_t xmit_lock;
+ struct ehea_port *port;
+ char int_recv_name[EHEA_IRQ_NAME_SIZE];
+ char int_send_name[EHEA_IRQ_NAME_SIZE];
+ struct ehea_qp *qp;
+ struct ehea_cq *send_cq;
+ struct ehea_cq *recv_cq;
+ struct ehea_eq *send_eq;
+ struct ehea_eq *recv_eq;
+ spinlock_t send_lock;
+ struct ehea_q_skb_arr rq1_skba;
+ struct ehea_q_skb_arr rq2_skba;
+ struct ehea_q_skb_arr rq3_skba;
+ struct ehea_q_skb_arr sq_skba;
+ spinlock_t netif_queue;
+ int queue_stopped;
+ int swqe_refill_th;
+ atomic_t swqe_avail;
+ int swqe_ll_count;
+ int swqe_count;
+ u32 swqe_id_counter;
+ u64 tx_packets;
+ struct tasklet_struct send_comp_task;
+ spinlock_t recv_lock;
+ struct port_state p_state;
+ u64 rx_packets;
+ u32 poll_counter;
+};
+
+
+struct ehea_adapter {
+ u64 handle;
+ u8 num_ports;
+ struct ehea_port *port[16];
+ struct ehea_eq *neq; /* notification event queue */
+ struct workqueue_struct *ehea_wq;
+ struct tasklet_struct neq_tasklet;
+ struct ehea_mr mr;
+ u32 pd; /* protection domain */
+ u64 max_mc_mac; /* max number of multicast mac addresses */
+};
+
+
+struct ehea_mc_list {
+ struct list_head list;
+ u64 macaddr;
+};
+
+#define EHEA_PORT_UP 1
+#define EHEA_PORT_DOWN 0
+#define EHEA_MAX_PORT_RES 16
+struct ehea_port {
+ struct ehea_adapter *adapter; /* adapter that owns this port */
+ struct net_device *netdev;
+ struct net_device_stats stats;
+ struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
+ struct device_node *of_dev_node; /* Open Firmware Device Node */
+ struct ehea_mc_list *mc_list; /* Multicast MAC addresses */
+ struct vlan_group *vgrp;
+ struct ehea_eq *qp_eq;
+ struct work_struct reset_task;
+ struct semaphore port_lock;
+ char int_aff_name[EHEA_IRQ_NAME_SIZE];
+ int allmulti; /* Indicates IFF_ALLMULTI state */
+ int promisc; /* Indicates IFF_PROMISC state */
+ int num_add_tx_qps;
+ int resets;
+ u64 mac_addr;
+ u32 logical_port_id;
+ u32 port_speed;
+ u32 msg_enable;
+ u32 sig_comp_iv;
+ u32 state;
+ u8 full_duplex;
+ u8 autoneg;
+ u8 num_def_qps;
+};
+
+struct port_res_cfg {
+ int max_entries_rcq;
+ int max_entries_scq;
+ int max_entries_sq;
+ int max_entries_rq1;
+ int max_entries_rq2;
+ int max_entries_rq3;
+};
+
+
+void ehea_set_ethtool_ops(struct net_device *netdev);
+int ehea_sense_port_attr(struct ehea_port *port);
+int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
+
+#endif /* __EHEA_H__ */
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
new file mode 100644
index 00000000000..6906af6277c
--- /dev/null
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -0,0 +1,294 @@
+/*
+ * linux/drivers/net/ehea/ehea_ethtool.c
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "ehea.h"
+#include "ehea_phyp.h"
+
+static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ int ret;
+
+ ret = ehea_sense_port_attr(port);
+
+ if (ret)
+ return ret;
+
+ if (netif_carrier_ok(dev)) {
+ switch(port->port_speed) {
+ case EHEA_SPEED_10M: cmd->speed = SPEED_10; break;
+ case EHEA_SPEED_100M: cmd->speed = SPEED_100; break;
+ case EHEA_SPEED_1G: cmd->speed = SPEED_1000; break;
+ case EHEA_SPEED_10G: cmd->speed = SPEED_10000; break;
+ }
+ cmd->duplex = port->full_duplex == 1 ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ } else {
+ cmd->speed = -1;
+ cmd->duplex = -1;
+ }
+
+ cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full
+ | SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half
+ | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half
+ | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
+
+ cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg
+ | ADVERTISED_FIBRE);
+
+ cmd->port = PORT_FIBRE;
+ cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ int ret = 0;
+ u32 sp;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ sp = EHEA_SPEED_AUTONEG;
+ goto doit;
+ }
+
+ switch(cmd->speed) {
+ case SPEED_10:
+ if (cmd->duplex == DUPLEX_FULL)
+ sp = H_SPEED_10M_F;
+ else
+ sp = H_SPEED_10M_H;
+ break;
+
+ case SPEED_100:
+ if (cmd->duplex == DUPLEX_FULL)
+ sp = H_SPEED_100M_F;
+ else
+ sp = H_SPEED_100M_H;
+ break;
+
+ case SPEED_1000:
+ if (cmd->duplex == DUPLEX_FULL)
+ sp = H_SPEED_1G_F;
+ else
+ ret = -EINVAL;
+ break;
+
+ case SPEED_10000:
+ if (cmd->duplex == DUPLEX_FULL)
+ sp = H_SPEED_10G_F;
+ else
+ ret = -EINVAL;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ goto out;
+doit:
+ ret = ehea_set_portspeed(port, sp);
+
+ if (!ret)
+ ehea_info("%s: Port speed succesfully set: %dMbps "
+ "%s Duplex",
+ port->netdev->name, port->port_speed,
+ port->full_duplex == 1 ? "Full" : "Half");
+out:
+ return ret;
+}
+
+static int ehea_nway_reset(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ int ret;
+
+ ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG);
+
+ if (!ret)
+ ehea_info("%s: Port speed succesfully set: %dMbps "
+ "%s Duplex",
+ port->netdev->name, port->port_speed,
+ port->full_duplex == 1 ? "Full" : "Half");
+ return ret;
+}
+
+static void ehea_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver) - 1);
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version) - 1);
+}
+
+static u32 ehea_get_msglevel(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ return port->msg_enable;
+}
+
+static void ehea_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ port->msg_enable = value;
+}
+
+static u32 ehea_get_rx_csum(struct net_device *dev)
+{
+ return 1;
+}
+
+static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
+ {"poll_max_processed"},
+ {"queue_stopped"},
+ {"min_swqe_avail"},
+ {"poll_receive_err"},
+ {"pkt_send"},
+ {"pkt_xmit"},
+ {"send_tasklet"},
+ {"ehea_poll"},
+ {"nwqe"},
+ {"swqe_available_0"},
+ {"sig_comp_iv"},
+ {"swqe_refill_th"},
+ {"port resets"},
+ {"rxo"},
+ {"rx64"},
+ {"rx65"},
+ {"rx128"},
+ {"rx256"},
+ {"rx512"},
+ {"rx1024"},
+ {"txo"},
+ {"tx64"},
+ {"tx65"},
+ {"tx128"},
+ {"tx256"},
+ {"tx512"},
+ {"tx1024"},
+};
+
+static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ if (stringset == ETH_SS_STATS) {
+ memcpy(data, &ehea_ethtool_stats_keys,
+ sizeof(ehea_ethtool_stats_keys));
+ }
+}
+
+static int ehea_get_stats_count(struct net_device *dev)
+{
+ return ARRAY_SIZE(ehea_ethtool_stats_keys);
+}
+
+static void ehea_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ u64 hret;
+ int i;
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_adapter *adapter = port->adapter;
+ struct ehea_port_res *pr = &port->port_res[0];
+ struct port_state *p_state = &pr->p_state;
+ struct hcp_ehea_port_cb6 *cb6;
+
+ for (i = 0; i < ehea_get_stats_count(dev); i++)
+ data[i] = 0;
+
+ i = 0;
+
+ data[i++] = p_state->poll_max_processed;
+ data[i++] = p_state->queue_stopped;
+ data[i++] = p_state->min_swqe_avail;
+ data[i++] = p_state->poll_receive_errors;
+ data[i++] = p_state->pkt_send;
+ data[i++] = p_state->pkt_xmit;
+ data[i++] = p_state->send_tasklet;
+ data[i++] = p_state->ehea_poll;
+ data[i++] = p_state->nwqe;
+ data[i++] = atomic_read(&port->port_res[0].swqe_avail);
+ data[i++] = port->sig_comp_iv;
+ data[i++] = port->port_res[0].swqe_refill_th;
+ data[i++] = port->resets;
+
+ cb6 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!cb6) {
+ ehea_error("no mem for cb6");
+ return;
+ }
+
+ hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
+ H_PORT_CB6, H_PORT_CB6_ALL, cb6);
+ if (netif_msg_hw(port))
+ ehea_dump(cb6, sizeof(*cb6), "ehea_get_ethtool_stats");
+
+ if (hret == H_SUCCESS) {
+ data[i++] = cb6->rxo;
+ data[i++] = cb6->rx64;
+ data[i++] = cb6->rx65;
+ data[i++] = cb6->rx128;
+ data[i++] = cb6->rx256;
+ data[i++] = cb6->rx512;
+ data[i++] = cb6->rx1024;
+ data[i++] = cb6->txo;
+ data[i++] = cb6->tx64;
+ data[i++] = cb6->tx65;
+ data[i++] = cb6->tx128;
+ data[i++] = cb6->tx256;
+ data[i++] = cb6->tx512;
+ data[i++] = cb6->tx1024;
+ } else
+ ehea_error("query_ehea_port failed");
+
+ kfree(cb6);
+}
+
+struct ethtool_ops ehea_ethtool_ops = {
+ .get_settings = ehea_get_settings,
+ .get_drvinfo = ehea_get_drvinfo,
+ .get_msglevel = ehea_get_msglevel,
+ .set_msglevel = ehea_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ethtool_op_set_tso,
+ .get_strings = ehea_get_strings,
+ .get_stats_count = ehea_get_stats_count,
+ .get_ethtool_stats = ehea_get_ethtool_stats,
+ .get_rx_csum = ehea_get_rx_csum,
+ .set_settings = ehea_set_settings,
+ .nway_reset = ehea_nway_reset, /* Restart autonegotiation */
+};
+
+void ehea_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &ehea_ethtool_ops);
+}
diff --git a/drivers/net/ehea/ehea_hcall.h b/drivers/net/ehea/ehea_hcall.h
new file mode 100644
index 00000000000..8e7d1c3edc6
--- /dev/null
+++ b/drivers/net/ehea/ehea_hcall.h
@@ -0,0 +1,51 @@
+/*
+ * linux/drivers/net/ehea/ehea_hcall.h
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __EHEA_HCALL_H__
+#define __EHEA_HCALL_H__
+
+/**
+ * This file contains HCALL defines that are to be included in the appropriate
+ * kernel files later
+ */
+
+#define H_ALLOC_HEA_RESOURCE 0x278
+#define H_MODIFY_HEA_QP 0x250
+#define H_QUERY_HEA_QP 0x254
+#define H_QUERY_HEA 0x258
+#define H_QUERY_HEA_PORT 0x25C
+#define H_MODIFY_HEA_PORT 0x260
+#define H_REG_BCMC 0x264
+#define H_DEREG_BCMC 0x268
+#define H_REGISTER_HEA_RPAGES 0x26C
+#define H_DISABLE_AND_GET_HEA 0x270
+#define H_GET_HEA_INFO 0x274
+#define H_ADD_CONN 0x284
+#define H_DEL_CONN 0x288
+
+#endif /* __EHEA_HCALL_H__ */
diff --git a/drivers/net/ehea/ehea_hw.h b/drivers/net/ehea/ehea_hw.h
new file mode 100644
index 00000000000..e3a7d07f88c
--- /dev/null
+++ b/drivers/net/ehea/ehea_hw.h
@@ -0,0 +1,287 @@
+/*
+ * linux/drivers/net/ehea/ehea_hw.h
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __EHEA_HW_H__
+#define __EHEA_HW_H__
+
+#define QPX_SQA_VALUE EHEA_BMASK_IBM(48,63)
+#define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48,63)
+#define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48,63)
+#define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48,63)
+
+#define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x)
+
+struct ehea_qptemm {
+ u64 qpx_hcr;
+ u64 qpx_c;
+ u64 qpx_herr;
+ u64 qpx_aer;
+ u64 qpx_sqa;
+ u64 qpx_sqc;
+ u64 qpx_rq1a;
+ u64 qpx_rq1c;
+ u64 qpx_st;
+ u64 qpx_aerr;
+ u64 qpx_tenure;
+ u64 qpx_reserved1[(0x098 - 0x058) / 8];
+ u64 qpx_portp;
+ u64 qpx_reserved2[(0x100 - 0x0A0) / 8];
+ u64 qpx_t;
+ u64 qpx_sqhp;
+ u64 qpx_sqptp;
+ u64 qpx_reserved3[(0x140 - 0x118) / 8];
+ u64 qpx_sqwsize;
+ u64 qpx_reserved4[(0x170 - 0x148) / 8];
+ u64 qpx_sqsize;
+ u64 qpx_reserved5[(0x1B0 - 0x178) / 8];
+ u64 qpx_sigt;
+ u64 qpx_wqecnt;
+ u64 qpx_rq1hp;
+ u64 qpx_rq1ptp;
+ u64 qpx_rq1size;
+ u64 qpx_reserved6[(0x220 - 0x1D8) / 8];
+ u64 qpx_rq1wsize;
+ u64 qpx_reserved7[(0x240 - 0x228) / 8];
+ u64 qpx_pd;
+ u64 qpx_scqn;
+ u64 qpx_rcqn;
+ u64 qpx_aeqn;
+ u64 reserved49;
+ u64 qpx_ram;
+ u64 qpx_reserved8[(0x300 - 0x270) / 8];
+ u64 qpx_rq2a;
+ u64 qpx_rq2c;
+ u64 qpx_rq2hp;
+ u64 qpx_rq2ptp;
+ u64 qpx_rq2size;
+ u64 qpx_rq2wsize;
+ u64 qpx_rq2th;
+ u64 qpx_rq3a;
+ u64 qpx_rq3c;
+ u64 qpx_rq3hp;
+ u64 qpx_rq3ptp;
+ u64 qpx_rq3size;
+ u64 qpx_rq3wsize;
+ u64 qpx_rq3th;
+ u64 qpx_lpn;
+ u64 qpx_reserved9[(0x400 - 0x378) / 8];
+ u64 reserved_ext[(0x500 - 0x400) / 8];
+ u64 reserved2[(0x1000 - 0x500) / 8];
+};
+
+#define MRx_HCR_LPARID_VALID EHEA_BMASK_IBM(0, 0)
+
+#define MRMWMM_OFFSET(x) offsetof(struct ehea_mrmwmm, x)
+
+struct ehea_mrmwmm {
+ u64 mrx_hcr;
+ u64 mrx_c;
+ u64 mrx_herr;
+ u64 mrx_aer;
+ u64 mrx_pp;
+ u64 reserved1;
+ u64 reserved2;
+ u64 reserved3;
+ u64 reserved4[(0x200 - 0x40) / 8];
+ u64 mrx_ctl[64];
+};
+
+#define QPEDMM_OFFSET(x) offsetof(struct ehea_qpedmm, x)
+
+struct ehea_qpedmm {
+
+ u64 reserved0[(0x400) / 8];
+ u64 qpedx_phh;
+ u64 qpedx_ppsgp;
+ u64 qpedx_ppsgu;
+ u64 qpedx_ppdgp;
+ u64 qpedx_ppdgu;
+ u64 qpedx_aph;
+ u64 qpedx_apsgp;
+ u64 qpedx_apsgu;
+ u64 qpedx_apdgp;
+ u64 qpedx_apdgu;
+ u64 qpedx_apav;
+ u64 qpedx_apsav;
+ u64 qpedx_hcr;
+ u64 reserved1[4];
+ u64 qpedx_rrl0;
+ u64 qpedx_rrrkey0;
+ u64 qpedx_rrva0;
+ u64 reserved2;
+ u64 qpedx_rrl1;
+ u64 qpedx_rrrkey1;
+ u64 qpedx_rrva1;
+ u64 reserved3;
+ u64 qpedx_rrl2;
+ u64 qpedx_rrrkey2;
+ u64 qpedx_rrva2;
+ u64 reserved4;
+ u64 qpedx_rrl3;
+ u64 qpedx_rrrkey3;
+ u64 qpedx_rrva3;
+};
+
+#define CQX_FECADDER EHEA_BMASK_IBM(32, 63)
+#define CQX_FEC_CQE_CNT EHEA_BMASK_IBM(32, 63)
+#define CQX_N1_GENERATE_COMP_EVENT EHEA_BMASK_IBM(0, 0)
+#define CQX_EP_EVENT_PENDING EHEA_BMASK_IBM(0, 0)
+
+#define CQTEMM_OFFSET(x) offsetof(struct ehea_cqtemm, x)
+
+struct ehea_cqtemm {
+ u64 cqx_hcr;
+ u64 cqx_c;
+ u64 cqx_herr;
+ u64 cqx_aer;
+ u64 cqx_ptp;
+ u64 cqx_tp;
+ u64 cqx_fec;
+ u64 cqx_feca;
+ u64 cqx_ep;
+ u64 cqx_eq;
+ u64 reserved1;
+ u64 cqx_n0;
+ u64 cqx_n1;
+ u64 reserved2[(0x1000 - 0x60) / 8];
+};
+
+#define EQTEMM_OFFSET(x) offsetof(struct ehea_eqtemm, x)
+
+struct ehea_eqtemm {
+ u64 eqx_hcr;
+ u64 eqx_c;
+ u64 eqx_herr;
+ u64 eqx_aer;
+ u64 eqx_ptp;
+ u64 eqx_tp;
+ u64 eqx_ssba;
+ u64 eqx_psba;
+ u64 eqx_cec;
+ u64 eqx_meql;
+ u64 eqx_xisbi;
+ u64 eqx_xisc;
+ u64 eqx_it;
+};
+
+static inline u64 epa_load(struct h_epa epa, u32 offset)
+{
+ return readq((void __iomem *)(epa.addr + offset));
+}
+
+static inline void epa_store(struct h_epa epa, u32 offset, u64 value)
+{
+ writeq(value, (void __iomem *)(epa.addr + offset));
+ epa_load(epa, offset); /* synchronize explicitly to eHEA */
+}
+
+static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value)
+{
+ writeq(value, (void __iomem *)(epa.addr + offset));
+}
+
+#define epa_store_eq(epa, offset, value)\
+ epa_store(epa, EQTEMM_OFFSET(offset), value)
+#define epa_load_eq(epa, offset)\
+ epa_load(epa, EQTEMM_OFFSET(offset))
+
+#define epa_store_cq(epa, offset, value)\
+ epa_store(epa, CQTEMM_OFFSET(offset), value)
+#define epa_load_cq(epa, offset)\
+ epa_load(epa, CQTEMM_OFFSET(offset))
+
+#define epa_store_qp(epa, offset, value)\
+ epa_store(epa, QPTEMM_OFFSET(offset), value)
+#define epa_load_qp(epa, offset)\
+ epa_load(epa, QPTEMM_OFFSET(offset))
+
+#define epa_store_qped(epa, offset, value)\
+ epa_store(epa, QPEDMM_OFFSET(offset), value)
+#define epa_load_qped(epa, offset)\
+ epa_load(epa, QPEDMM_OFFSET(offset))
+
+#define epa_store_mrmw(epa, offset, value)\
+ epa_store(epa, MRMWMM_OFFSET(offset), value)
+#define epa_load_mrmw(epa, offset)\
+ epa_load(epa, MRMWMM_OFFSET(offset))
+
+#define epa_store_base(epa, offset, value)\
+ epa_store(epa, HCAGR_OFFSET(offset), value)
+#define epa_load_base(epa, offset)\
+ epa_load(epa, HCAGR_OFFSET(offset))
+
+static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes)
+{
+ struct h_epa epa = qp->epas.kernel;
+ epa_store_acc(epa, QPTEMM_OFFSET(qpx_sqa),
+ EHEA_BMASK_SET(QPX_SQA_VALUE, nr_wqes));
+}
+
+static inline void ehea_update_rq3a(struct ehea_qp *qp, u16 nr_wqes)
+{
+ struct h_epa epa = qp->epas.kernel;
+ epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq3a),
+ EHEA_BMASK_SET(QPX_RQ1A_VALUE, nr_wqes));
+}
+
+static inline void ehea_update_rq2a(struct ehea_qp *qp, u16 nr_wqes)
+{
+ struct h_epa epa = qp->epas.kernel;
+ epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq2a),
+ EHEA_BMASK_SET(QPX_RQ2A_VALUE, nr_wqes));
+}
+
+static inline void ehea_update_rq1a(struct ehea_qp *qp, u16 nr_wqes)
+{
+ struct h_epa epa = qp->epas.kernel;
+ epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq1a),
+ EHEA_BMASK_SET(QPX_RQ3A_VALUE, nr_wqes));
+}
+
+static inline void ehea_update_feca(struct ehea_cq *cq, u32 nr_cqes)
+{
+ struct h_epa epa = cq->epas.kernel;
+ epa_store_acc(epa, CQTEMM_OFFSET(cqx_feca),
+ EHEA_BMASK_SET(CQX_FECADDER, nr_cqes));
+}
+
+static inline void ehea_reset_cq_n1(struct ehea_cq *cq)
+{
+ struct h_epa epa = cq->epas.kernel;
+ epa_store_cq(epa, cqx_n1,
+ EHEA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, 1));
+}
+
+static inline void ehea_reset_cq_ep(struct ehea_cq *my_cq)
+{
+ struct h_epa epa = my_cq->epas.kernel;
+ epa_store_acc(epa, CQTEMM_OFFSET(cqx_ep),
+ EHEA_BMASK_SET(CQX_EP_EVENT_PENDING, 0));
+}
+
+#endif /* __EHEA_HW_H__ */
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
new file mode 100644
index 00000000000..82a58c1cfe5
--- /dev/null
+++ b/drivers/net/ehea/ehea_main.c
@@ -0,0 +1,2654 @@
+/*
+ * linux/drivers/net/ehea/ehea_main.c
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if.h>
+#include <linux/list.h>
+#include <linux/if_ether.h>
+#include <net/ip.h>
+
+#include "ehea.h"
+#include "ehea_qmr.h"
+#include "ehea_phyp.h"
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
+MODULE_DESCRIPTION("IBM eServer HEA Driver");
+MODULE_VERSION(DRV_VERSION);
+
+
+static int msg_level = -1;
+static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
+static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
+static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
+static int sq_entries = EHEA_DEF_ENTRIES_SQ;
+
+module_param(msg_level, int, 0);
+module_param(rq1_entries, int, 0);
+module_param(rq2_entries, int, 0);
+module_param(rq3_entries, int, 0);
+module_param(sq_entries, int, 0);
+
+MODULE_PARM_DESC(msg_level, "msg_level");
+MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
+ "[2^x - 1], x = [6..14]. Default = "
+ __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
+MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
+ "[2^x - 1], x = [6..14]. Default = "
+ __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
+MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
+ "[2^x - 1], x = [6..14]. Default = "
+ __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
+MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
+ "[2^x - 1], x = [6..14]. Default = "
+ __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
+
+void ehea_dump(void *adr, int len, char *msg) {
+ int x;
+ unsigned char *deb = adr;
+ for (x = 0; x < len; x += 16) {
+ printk(DRV_NAME "%s adr=%p ofs=%04x %016lx %016lx\n", msg,
+ deb, x, *((u64*)&deb[0]), *((u64*)&deb[8]));
+ deb += 16;
+ }
+}
+
+static struct net_device_stats *ehea_get_stats(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct net_device_stats *stats = &port->stats;
+ struct hcp_ehea_port_cb2 *cb2;
+ u64 hret, rx_packets;
+ int i;
+
+ memset(stats, 0, sizeof(*stats));
+
+ cb2 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!cb2) {
+ ehea_error("no mem for cb2");
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_port(port->adapter->handle,
+ port->logical_port_id,
+ H_PORT_CB2, H_PORT_CB2_ALL, cb2);
+ if (hret != H_SUCCESS) {
+ ehea_error("query_ehea_port failed");
+ goto out_herr;
+ }
+
+ if (netif_msg_hw(port))
+ ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
+
+ rx_packets = 0;
+ for (i = 0; i < port->num_def_qps; i++)
+ rx_packets += port->port_res[i].rx_packets;
+
+ stats->tx_packets = cb2->txucp + cb2->txmcp + cb2->txbcp;
+ stats->multicast = cb2->rxmcp;
+ stats->rx_errors = cb2->rxuerr;
+ stats->rx_bytes = cb2->rxo;
+ stats->tx_bytes = cb2->txo;
+ stats->rx_packets = rx_packets;
+
+out_herr:
+ kfree(cb2);
+out:
+ return stats;
+}
+
+static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
+{
+ struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
+ struct net_device *dev = pr->port->netdev;
+ int max_index_mask = pr->rq1_skba.len - 1;
+ int i;
+
+ if (!nr_of_wqes)
+ return;
+
+ for (i = 0; i < nr_of_wqes; i++) {
+ if (!skb_arr_rq1[index]) {
+ skb_arr_rq1[index] = netdev_alloc_skb(dev,
+ EHEA_L_PKT_SIZE);
+ if (!skb_arr_rq1[index]) {
+ ehea_error("%s: no mem for skb/%d wqes filled",
+ dev->name, i);
+ break;
+ }
+ }
+ index--;
+ index &= max_index_mask;
+ }
+ /* Ring doorbell */
+ ehea_update_rq1a(pr->qp, i);
+}
+
+static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
+{
+ int ret = 0;
+ struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
+ struct net_device *dev = pr->port->netdev;
+ int i;
+
+ for (i = 0; i < pr->rq1_skba.len; i++) {
+ skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
+ if (!skb_arr_rq1[i]) {
+ ehea_error("%s: no mem for skb/%d wqes filled",
+ dev->name, i);
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ /* Ring doorbell */
+ ehea_update_rq1a(pr->qp, nr_rq1a);
+out:
+ return ret;
+}
+
+static int ehea_refill_rq_def(struct ehea_port_res *pr,
+ struct ehea_q_skb_arr *q_skba, int rq_nr,
+ int num_wqes, int wqe_type, int packet_size)
+{
+ struct net_device *dev = pr->port->netdev;
+ struct ehea_qp *qp = pr->qp;
+ struct sk_buff **skb_arr = q_skba->arr;
+ struct ehea_rwqe *rwqe;
+ int i, index, max_index_mask, fill_wqes;
+ int ret = 0;
+
+ fill_wqes = q_skba->os_skbs + num_wqes;
+
+ if (!fill_wqes)
+ return ret;
+
+ index = q_skba->index;
+ max_index_mask = q_skba->len - 1;
+ for (i = 0; i < fill_wqes; i++) {
+ struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
+ if (!skb) {
+ ehea_error("%s: no mem for skb/%d wqes filled",
+ dev->name, i);
+ q_skba->os_skbs = fill_wqes - i;
+ ret = -ENOMEM;
+ break;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ skb_arr[index] = skb;
+
+ rwqe = ehea_get_next_rwqe(qp, rq_nr);
+ rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
+ | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
+ rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
+ rwqe->sg_list[0].vaddr = (u64)skb->data;
+ rwqe->sg_list[0].len = packet_size;
+ rwqe->data_segments = 1;
+
+ index++;
+ index &= max_index_mask;
+ }
+ q_skba->index = index;
+
+ /* Ring doorbell */
+ iosync();
+ if (rq_nr == 2)
+ ehea_update_rq2a(pr->qp, i);
+ else
+ ehea_update_rq3a(pr->qp, i);
+
+ return ret;
+}
+
+
+static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
+{
+ return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
+ nr_of_wqes, EHEA_RWQE2_TYPE,
+ EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
+}
+
+
+static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
+{
+ return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
+ nr_of_wqes, EHEA_RWQE3_TYPE,
+ EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
+}
+
+static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
+{
+ *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
+ if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
+ return 0;
+ if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
+ (cqe->header_length == 0))
+ return 0;
+ return -EINVAL;
+}
+
+static inline void ehea_fill_skb(struct net_device *dev,
+ struct sk_buff *skb, struct ehea_cqe *cqe)
+{
+ int length = cqe->num_bytes_transfered - 4; /*remove CRC */
+
+ skb_put(skb, length);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->protocol = eth_type_trans(skb, dev);
+}
+
+static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
+ int arr_len,
+ struct ehea_cqe *cqe)
+{
+ int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
+ struct sk_buff *skb;
+ void *pref;
+ int x;
+
+ x = skb_index + 1;
+ x &= (arr_len - 1);
+
+ pref = skb_array[x];
+ prefetchw(pref);
+ prefetchw(pref + EHEA_CACHE_LINE);
+
+ pref = (skb_array[x]->data);
+ prefetch(pref);
+ prefetch(pref + EHEA_CACHE_LINE);
+ prefetch(pref + EHEA_CACHE_LINE * 2);
+ prefetch(pref + EHEA_CACHE_LINE * 3);
+ skb = skb_array[skb_index];
+ skb_array[skb_index] = NULL;
+ return skb;
+}
+
+static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
+ int arr_len, int wqe_index)
+{
+ struct sk_buff *skb;
+ void *pref;
+ int x;
+
+ x = wqe_index + 1;
+ x &= (arr_len - 1);
+
+ pref = skb_array[x];
+ prefetchw(pref);
+ prefetchw(pref + EHEA_CACHE_LINE);
+
+ pref = (skb_array[x]->data);
+ prefetchw(pref);
+ prefetchw(pref + EHEA_CACHE_LINE);
+
+ skb = skb_array[wqe_index];
+ skb_array[wqe_index] = NULL;
+ return skb;
+}
+
+static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
+ struct ehea_cqe *cqe, int *processed_rq2,
+ int *processed_rq3)
+{
+ struct sk_buff *skb;
+
+ if (netif_msg_rx_err(pr->port)) {
+ ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr);
+ ehea_dump(cqe, sizeof(*cqe), "CQE");
+ }
+
+ if (rq == 2) {
+ *processed_rq2 += 1;
+ skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
+ dev_kfree_skb(skb);
+ } else if (rq == 3) {
+ *processed_rq3 += 1;
+ skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
+ dev_kfree_skb(skb);
+ }
+
+ if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
+ ehea_error("Critical receive error. Resetting port.");
+ queue_work(pr->port->adapter->ehea_wq, &pr->port->reset_task);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int ehea_poll(struct net_device *dev, int *budget)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_port_res *pr = &port->port_res[0];
+ struct ehea_qp *qp = pr->qp;
+ struct ehea_cqe *cqe;
+ struct sk_buff *skb;
+ struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
+ struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
+ struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
+ int skb_arr_rq1_len = pr->rq1_skba.len;
+ int skb_arr_rq2_len = pr->rq2_skba.len;
+ int skb_arr_rq3_len = pr->rq3_skba.len;
+ int processed, processed_rq1, processed_rq2, processed_rq3;
+ int wqe_index, last_wqe_index, rq, intreq, my_quota, port_reset;
+
+ processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
+ last_wqe_index = 0;
+ my_quota = min(*budget, dev->quota);
+ my_quota = min(my_quota, EHEA_POLL_MAX_RWQE);
+
+ /* rq0 is low latency RQ */
+ cqe = ehea_poll_rq1(qp, &wqe_index);
+ while ((my_quota > 0) && cqe) {
+ ehea_inc_rq1(qp);
+ processed_rq1++;
+ processed++;
+ my_quota--;
+ if (netif_msg_rx_status(port))
+ ehea_dump(cqe, sizeof(*cqe), "CQE");
+
+ last_wqe_index = wqe_index;
+ rmb();
+ if (!ehea_check_cqe(cqe, &rq)) {
+ if (rq == 1) { /* LL RQ1 */
+ skb = get_skb_by_index_ll(skb_arr_rq1,
+ skb_arr_rq1_len,
+ wqe_index);
+ if (unlikely(!skb)) {
+ if (netif_msg_rx_err(port))
+ ehea_error("LL rq1: skb=NULL");
+ skb = netdev_alloc_skb(dev,
+ EHEA_L_PKT_SIZE);
+ if (!skb)
+ break;
+ }
+ memcpy(skb->data, ((char*)cqe) + 64,
+ cqe->num_bytes_transfered - 4);
+ ehea_fill_skb(dev, skb, cqe);
+ } else if (rq == 2) { /* RQ2 */
+ skb = get_skb_by_index(skb_arr_rq2,
+ skb_arr_rq2_len, cqe);
+ if (unlikely(!skb)) {
+ if (netif_msg_rx_err(port))
+ ehea_error("rq2: skb=NULL");
+ break;
+ }
+ ehea_fill_skb(dev, skb, cqe);
+ processed_rq2++;
+ } else { /* RQ3 */
+ skb = get_skb_by_index(skb_arr_rq3,
+ skb_arr_rq3_len, cqe);
+ if (unlikely(!skb)) {
+ if (netif_msg_rx_err(port))
+ ehea_error("rq3: skb=NULL");
+ break;
+ }
+ ehea_fill_skb(dev, skb, cqe);
+ processed_rq3++;
+ }
+
+ if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
+ vlan_hwaccel_receive_skb(skb, port->vgrp,
+ cqe->vlan_tag);
+ else
+ netif_receive_skb(skb);
+
+ } else { /* Error occured */
+ pr->p_state.poll_receive_errors++;
+ port_reset = ehea_treat_poll_error(pr, rq, cqe,
+ &processed_rq2,
+ &processed_rq3);
+ if (port_reset)
+ break;
+ }
+ cqe = ehea_poll_rq1(qp, &wqe_index);
+ }
+
+ dev->quota -= processed;
+ *budget -= processed;
+
+ pr->p_state.ehea_poll += 1;
+ pr->rx_packets += processed;
+
+ ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
+ ehea_refill_rq2(pr, processed_rq2);
+ ehea_refill_rq3(pr, processed_rq3);
+
+ intreq = ((pr->p_state.ehea_poll & 0xF) == 0xF);
+
+ if (!cqe || intreq) {
+ netif_rx_complete(dev);
+ ehea_reset_cq_ep(pr->recv_cq);
+ ehea_reset_cq_n1(pr->recv_cq);
+ cqe = hw_qeit_get_valid(&qp->hw_rqueue1);
+ if (!cqe || intreq)
+ return 0;
+ if (!netif_rx_reschedule(dev, my_quota))
+ return 0;
+ }
+ return 1;
+}
+
+void free_sent_skbs(struct ehea_cqe *cqe, struct ehea_port_res *pr)
+{
+ struct sk_buff *skb;
+ int index, max_index_mask, i;
+
+ index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
+ max_index_mask = pr->sq_skba.len - 1;
+ for (i = 0; i < EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); i++) {
+ skb = pr->sq_skba.arr[index];
+ if (likely(skb)) {
+ dev_kfree_skb(skb);
+ pr->sq_skba.arr[index] = NULL;
+ } else {
+ ehea_error("skb=NULL, wr_id=%lX, loop=%d, index=%d",
+ cqe->wr_id, i, index);
+ }
+ index--;
+ index &= max_index_mask;
+ }
+}
+
+#define MAX_SENDCOMP_QUOTA 400
+void ehea_send_irq_tasklet(unsigned long data)
+{
+ struct ehea_port_res *pr = (struct ehea_port_res*)data;
+ struct ehea_cq *send_cq = pr->send_cq;
+ struct ehea_cqe *cqe;
+ int quota = MAX_SENDCOMP_QUOTA;
+ int cqe_counter = 0;
+ int swqe_av = 0;
+ unsigned long flags;
+
+ do {
+ cqe = ehea_poll_cq(send_cq);
+ if (!cqe) {
+ ehea_reset_cq_ep(send_cq);
+ ehea_reset_cq_n1(send_cq);
+ cqe = ehea_poll_cq(send_cq);
+ if (!cqe)
+ break;
+ }
+ cqe_counter++;
+ rmb();
+ if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
+ ehea_error("Send Completion Error: Resetting port");
+ if (netif_msg_tx_err(pr->port))
+ ehea_dump(cqe, sizeof(*cqe), "Send CQE");
+ queue_work(pr->port->adapter->ehea_wq,
+ &pr->port->reset_task);
+ break;
+ }
+
+ if (netif_msg_tx_done(pr->port))
+ ehea_dump(cqe, sizeof(*cqe), "CQE");
+
+ if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
+ == EHEA_SWQE2_TYPE))
+ free_sent_skbs(cqe, pr);
+
+ swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
+ quota--;
+ } while (quota > 0);
+
+ ehea_update_feca(send_cq, cqe_counter);
+ atomic_add(swqe_av, &pr->swqe_avail);
+
+ spin_lock_irqsave(&pr->netif_queue, flags);
+ if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
+ >= pr->swqe_refill_th)) {
+ netif_wake_queue(pr->port->netdev);
+ pr->queue_stopped = 0;
+ }
+ spin_unlock_irqrestore(&pr->netif_queue, flags);
+
+ if (unlikely(cqe))
+ tasklet_hi_schedule(&pr->send_comp_task);
+}
+
+static irqreturn_t ehea_send_irq_handler(int irq, void *param,
+ struct pt_regs *regs)
+{
+ struct ehea_port_res *pr = param;
+ tasklet_hi_schedule(&pr->send_comp_task);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ehea_recv_irq_handler(int irq, void *param,
+ struct pt_regs *regs)
+{
+ struct ehea_port_res *pr = param;
+ struct ehea_port *port = pr->port;
+ netif_rx_schedule(port->netdev);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param,
+ struct pt_regs *regs)
+{
+ struct ehea_port *port = param;
+ struct ehea_eqe *eqe;
+ u32 qp_token;
+
+ eqe = ehea_poll_eq(port->qp_eq);
+ ehea_debug("eqe=%p", eqe);
+ while (eqe) {
+ ehea_debug("*eqe=%lx", *(u64*)eqe);
+ eqe = ehea_poll_eq(port->qp_eq);
+ qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
+ ehea_debug("next eqe=%p", eqe);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
+ int logical_port)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_ports; i++)
+ if (adapter->port[i]->logical_port_id == logical_port)
+ return adapter->port[i];
+ return NULL;
+}
+
+int ehea_sense_port_attr(struct ehea_port *port)
+{
+ int ret;
+ u64 hret;
+ struct hcp_ehea_port_cb0 *cb0;
+
+ cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!cb0) {
+ ehea_error("no mem for cb0");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_port(port->adapter->handle,
+ port->logical_port_id, H_PORT_CB0,
+ EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
+ cb0);
+ if (hret != H_SUCCESS) {
+ ret = -EIO;
+ goto out_free;
+ }
+
+ /* MAC address */
+ port->mac_addr = cb0->port_mac_addr << 16;
+
+ if (!is_valid_ether_addr((u8*)&port->mac_addr)) {
+ ret = -EADDRNOTAVAIL;
+ goto out_free;
+ }
+
+ /* Port speed */
+ switch (cb0->port_speed) {
+ case H_SPEED_10M_H:
+ port->port_speed = EHEA_SPEED_10M;
+ port->full_duplex = 0;
+ break;
+ case H_SPEED_10M_F:
+ port->port_speed = EHEA_SPEED_10M;
+ port->full_duplex = 1;
+ break;
+ case H_SPEED_100M_H:
+ port->port_speed = EHEA_SPEED_100M;
+ port->full_duplex = 0;
+ break;
+ case H_SPEED_100M_F:
+ port->port_speed = EHEA_SPEED_100M;
+ port->full_duplex = 1;
+ break;
+ case H_SPEED_1G_F:
+ port->port_speed = EHEA_SPEED_1G;
+ port->full_duplex = 1;
+ break;
+ case H_SPEED_10G_F:
+ port->port_speed = EHEA_SPEED_10G;
+ port->full_duplex = 1;
+ break;
+ default:
+ port->port_speed = 0;
+ port->full_duplex = 0;
+ break;
+ }
+
+ /* Number of default QPs */
+ port->num_def_qps = cb0->num_default_qps;
+
+ if (!port->num_def_qps) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ if (port->num_def_qps >= EHEA_NUM_TX_QP)
+ port->num_add_tx_qps = 0;
+ else
+ port->num_add_tx_qps = EHEA_NUM_TX_QP - port->num_def_qps;
+
+ ret = 0;
+out_free:
+ if (ret || netif_msg_probe(port))
+ ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
+ kfree(cb0);
+out:
+ return ret;
+}
+
+int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
+{
+ struct hcp_ehea_port_cb4 *cb4;
+ u64 hret;
+ int ret = 0;
+
+ cb4 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!cb4) {
+ ehea_error("no mem for cb4");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cb4->port_speed = port_speed;
+
+ netif_carrier_off(port->netdev);
+
+ hret = ehea_h_modify_ehea_port(port->adapter->handle,
+ port->logical_port_id,
+ H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
+ if (hret == H_SUCCESS) {
+ port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
+
+ hret = ehea_h_query_ehea_port(port->adapter->handle,
+ port->logical_port_id,
+ H_PORT_CB4, H_PORT_CB4_SPEED,
+ cb4);
+ if (hret == H_SUCCESS) {
+ switch (cb4->port_speed) {
+ case H_SPEED_10M_H:
+ port->port_speed = EHEA_SPEED_10M;
+ port->full_duplex = 0;
+ break;
+ case H_SPEED_10M_F:
+ port->port_speed = EHEA_SPEED_10M;
+ port->full_duplex = 1;
+ break;
+ case H_SPEED_100M_H:
+ port->port_speed = EHEA_SPEED_100M;
+ port->full_duplex = 0;
+ break;
+ case H_SPEED_100M_F:
+ port->port_speed = EHEA_SPEED_100M;
+ port->full_duplex = 1;
+ break;
+ case H_SPEED_1G_F:
+ port->port_speed = EHEA_SPEED_1G;
+ port->full_duplex = 1;
+ break;
+ case H_SPEED_10G_F:
+ port->port_speed = EHEA_SPEED_10G;
+ port->full_duplex = 1;
+ break;
+ default:
+ port->port_speed = 0;
+ port->full_duplex = 0;
+ break;
+ }
+ } else {
+ ehea_error("Failed sensing port speed");
+ ret = -EIO;
+ }
+ } else {
+ if (hret == H_AUTHORITY) {
+ ehea_info("Hypervisor denied setting port speed. Either"
+ " this partition is not authorized to set "
+ "port speed or another partition has modified"
+ " port speed first.");
+ ret = -EPERM;
+ } else {
+ ret = -EIO;
+ ehea_error("Failed setting port speed");
+ }
+ }
+ netif_carrier_on(port->netdev);
+ kfree(cb4);
+out:
+ return ret;
+}
+
+static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
+{
+ int ret;
+ u8 ec;
+ u8 portnum;
+ struct ehea_port *port;
+
+ ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
+ portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
+ port = ehea_get_port(adapter, portnum);
+
+ switch (ec) {
+ case EHEA_EC_PORTSTATE_CHG: /* port state change */
+
+ if (!port) {
+ ehea_error("unknown portnum %x", portnum);
+ break;
+ }
+
+ if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
+ if (!netif_carrier_ok(port->netdev)) {
+ ret = ehea_sense_port_attr(
+ adapter->port[portnum]);
+ if (ret) {
+ ehea_error("failed resensing port "
+ "attributes");
+ break;
+ }
+
+ if (netif_msg_link(port))
+ ehea_info("%s: Logical port up: %dMbps "
+ "%s Duplex",
+ port->netdev->name,
+ port->port_speed,
+ port->full_duplex ==
+ 1 ? "Full" : "Half");
+
+ netif_carrier_on(port->netdev);
+ netif_wake_queue(port->netdev);
+ }
+ } else
+ if (netif_carrier_ok(port->netdev)) {
+ if (netif_msg_link(port))
+ ehea_info("%s: Logical port down",
+ port->netdev->name);
+ netif_carrier_off(port->netdev);
+ netif_stop_queue(port->netdev);
+ }
+
+ if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
+ if (netif_msg_link(port))
+ ehea_info("%s: Physical port up",
+ port->netdev->name);
+ } else {
+ if (netif_msg_link(port))
+ ehea_info("%s: Physical port down",
+ port->netdev->name);
+ }
+
+ if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
+ ehea_info("External switch port is primary port");
+ else
+ ehea_info("External switch port is backup port");
+
+ break;
+ case EHEA_EC_ADAPTER_MALFUNC:
+ ehea_error("Adapter malfunction");
+ break;
+ case EHEA_EC_PORT_MALFUNC:
+ ehea_info("Port malfunction: Device: %s", port->netdev->name);
+ netif_carrier_off(port->netdev);
+ netif_stop_queue(port->netdev);
+ break;
+ default:
+ ehea_error("unknown event code %x", ec);
+ break;
+ }
+}
+
+static void ehea_neq_tasklet(unsigned long data)
+{
+ struct ehea_adapter *adapter = (struct ehea_adapter*)data;
+ struct ehea_eqe *eqe;
+ u64 event_mask;
+
+ eqe = ehea_poll_eq(adapter->neq);
+ ehea_debug("eqe=%p", eqe);
+
+ while (eqe) {
+ ehea_debug("*eqe=%lx", eqe->entry);
+ ehea_parse_eqe(adapter, eqe->entry);
+ eqe = ehea_poll_eq(adapter->neq);
+ ehea_debug("next eqe=%p", eqe);
+ }
+
+ event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
+ | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
+ | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
+
+ ehea_h_reset_events(adapter->handle,
+ adapter->neq->fw_handle, event_mask);
+}
+
+static irqreturn_t ehea_interrupt_neq(int irq, void *param,
+ struct pt_regs *regs)
+{
+ struct ehea_adapter *adapter = param;
+ tasklet_hi_schedule(&adapter->neq_tasklet);
+ return IRQ_HANDLED;
+}
+
+
+static int ehea_fill_port_res(struct ehea_port_res *pr)
+{
+ int ret;
+ struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
+
+ ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
+ - init_attr->act_nr_rwqes_rq2
+ - init_attr->act_nr_rwqes_rq3 - 1);
+
+ ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
+
+ ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
+
+ return ret;
+}
+
+static int ehea_reg_interrupts(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_port_res *pr;
+ int i, ret;
+
+ for (i = 0; i < port->num_def_qps; i++) {
+ pr = &port->port_res[i];
+ snprintf(pr->int_recv_name, EHEA_IRQ_NAME_SIZE - 1
+ , "%s-recv%d", dev->name, i);
+ ret = ibmebus_request_irq(NULL, pr->recv_eq->attr.ist1,
+ ehea_recv_irq_handler,
+ SA_INTERRUPT, pr->int_recv_name, pr);
+ if (ret) {
+ ehea_error("failed registering irq for ehea_recv_int:"
+ "port_res_nr:%d, ist=%X", i,
+ pr->recv_eq->attr.ist1);
+ goto out_free_seq;
+ }
+ if (netif_msg_ifup(port))
+ ehea_info("irq_handle 0x%X for funct ehea_recv_int %d "
+ "registered", pr->recv_eq->attr.ist1, i);
+ }
+
+ snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
+ dev->name);
+
+ ret = ibmebus_request_irq(NULL, port->qp_eq->attr.ist1,
+ ehea_qp_aff_irq_handler,
+ SA_INTERRUPT, port->int_aff_name, port);
+ if (ret) {
+ ehea_error("failed registering irq for qp_aff_irq_handler:"
+ "ist=%X", port->qp_eq->attr.ist1);
+ goto out_free_qpeq;
+ }
+
+ if (netif_msg_ifup(port))
+ ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
+ "registered", port->qp_eq->attr.ist1);
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ pr = &port->port_res[i];
+ snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
+ "%s-send%d", dev->name, i);
+ ret = ibmebus_request_irq(NULL, pr->send_eq->attr.ist1,
+ ehea_send_irq_handler,
+ SA_INTERRUPT, pr->int_send_name,
+ pr);
+ if (ret) {
+ ehea_error("failed registering irq for ehea_send "
+ "port_res_nr:%d, ist=%X", i,
+ pr->send_eq->attr.ist1);
+ goto out_free_req;
+ }
+ if (netif_msg_ifup(port))
+ ehea_info("irq_handle 0x%X for function ehea_send_int "
+ "%d registered", pr->send_eq->attr.ist1, i);
+ }
+out:
+ return ret;
+
+out_free_req:
+ while (--i >= 0) {
+ u32 ist = port->port_res[i].send_eq->attr.ist1;
+ ibmebus_free_irq(NULL, ist, &port->port_res[i]);
+ }
+out_free_qpeq:
+ ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port);
+ i = port->num_def_qps;
+out_free_seq:
+ while (--i >= 0) {
+ u32 ist = port->port_res[i].recv_eq->attr.ist1;
+ ibmebus_free_irq(NULL, ist, &port->port_res[i]);
+ }
+ goto out;
+}
+
+static void ehea_free_interrupts(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_port_res *pr;
+ int i;
+
+ /* send */
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ pr = &port->port_res[i];
+ ibmebus_free_irq(NULL, pr->send_eq->attr.ist1, pr);
+ if (netif_msg_intr(port))
+ ehea_info("free send irq for res %d with handle 0x%X",
+ i, pr->send_eq->attr.ist1);
+ }
+
+ /* receive */
+ for (i = 0; i < port->num_def_qps; i++) {
+ pr = &port->port_res[i];
+ ibmebus_free_irq(NULL, pr->recv_eq->attr.ist1, pr);
+ if (netif_msg_intr(port))
+ ehea_info("free recv irq for res %d with handle 0x%X",
+ i, pr->recv_eq->attr.ist1);
+ }
+
+ /* associated events */
+ ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port);
+ if (netif_msg_intr(port))
+ ehea_info("associated event interrupt for handle 0x%X freed",
+ port->qp_eq->attr.ist1);
+}
+
+static int ehea_configure_port(struct ehea_port *port)
+{
+ int ret, i;
+ u64 hret, mask;
+ struct hcp_ehea_port_cb0 *cb0;
+
+ ret = -ENOMEM;
+ cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!cb0)
+ goto out;
+
+ cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
+ | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
+ | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
+ | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
+ | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
+ PXLY_RC_VLAN_FILTER)
+ | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
+
+ for (i = 0; i < port->num_def_qps; i++)
+ cb0->default_qpn_arr[i] = port->port_res[i].qp->init_attr.qp_nr;
+
+ if (netif_msg_ifup(port))
+ ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
+
+ mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
+ | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
+
+ hret = ehea_h_modify_ehea_port(port->adapter->handle,
+ port->logical_port_id,
+ H_PORT_CB0, mask, cb0);
+ ret = -EIO;
+ if (hret != H_SUCCESS)
+ goto out_free;
+
+ ret = 0;
+
+out_free:
+ kfree(cb0);
+out:
+ return ret;
+}
+
+static int ehea_gen_smrs(struct ehea_port_res *pr)
+{
+ u64 hret;
+ struct ehea_adapter *adapter = pr->port->adapter;
+
+ hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle,
+ adapter->mr.vaddr, EHEA_MR_ACC_CTRL,
+ adapter->pd, &pr->send_mr);
+ if (hret != H_SUCCESS)
+ goto out;
+
+ hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle,
+ adapter->mr.vaddr, EHEA_MR_ACC_CTRL,
+ adapter->pd, &pr->recv_mr);
+ if (hret != H_SUCCESS)
+ goto out_freeres;
+
+ return 0;
+
+out_freeres:
+ hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle);
+ if (hret != H_SUCCESS)
+ ehea_error("failed freeing SMR");
+out:
+ return -EIO;
+}
+
+static int ehea_rem_smrs(struct ehea_port_res *pr)
+{
+ struct ehea_adapter *adapter = pr->port->adapter;
+ int ret = 0;
+ u64 hret;
+
+ hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle);
+ if (hret != H_SUCCESS) {
+ ret = -EIO;
+ ehea_error("failed freeing send SMR for pr=%p", pr);
+ }
+
+ hret = ehea_h_free_resource(adapter->handle, pr->recv_mr.handle);
+ if (hret != H_SUCCESS) {
+ ret = -EIO;
+ ehea_error("failed freeing recv SMR for pr=%p", pr);
+ }
+
+ return ret;
+}
+
+static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
+{
+ int arr_size = sizeof(void*) * max_q_entries;
+
+ q_skba->arr = vmalloc(arr_size);
+ if (!q_skba->arr)
+ return -ENOMEM;
+
+ memset(q_skba->arr, 0, arr_size);
+
+ q_skba->len = max_q_entries;
+ q_skba->index = 0;
+ q_skba->os_skbs = 0;
+
+ return 0;
+}
+
+static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
+ struct port_res_cfg *pr_cfg, int queue_token)
+{
+ struct ehea_adapter *adapter = port->adapter;
+ enum ehea_eq_type eq_type = EHEA_EQ;
+ struct ehea_qp_init_attr *init_attr = NULL;
+ int ret = -EIO;
+
+ memset(pr, 0, sizeof(struct ehea_port_res));
+
+ pr->port = port;
+ spin_lock_init(&pr->send_lock);
+ spin_lock_init(&pr->recv_lock);
+ spin_lock_init(&pr->xmit_lock);
+ spin_lock_init(&pr->netif_queue);
+
+ pr->recv_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
+ if (!pr->recv_eq) {
+ ehea_error("create_eq failed (recv_eq)");
+ goto out_free;
+ }
+
+ pr->send_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
+ if (!pr->send_eq) {
+ ehea_error("create_eq failed (send_eq)");
+ goto out_free;
+ }
+
+ pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
+ pr->recv_eq->fw_handle,
+ port->logical_port_id);
+ if (!pr->recv_cq) {
+ ehea_error("create_cq failed (cq_recv)");
+ goto out_free;
+ }
+
+ pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
+ pr->send_eq->fw_handle,
+ port->logical_port_id);
+ if (!pr->send_cq) {
+ ehea_error("create_cq failed (cq_send)");
+ goto out_free;
+ }
+
+ if (netif_msg_ifup(port))
+ ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
+ pr->send_cq->attr.act_nr_of_cqes,
+ pr->recv_cq->attr.act_nr_of_cqes);
+
+ init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
+ if (!init_attr) {
+ ret = -ENOMEM;
+ ehea_error("no mem for ehea_qp_init_attr");
+ goto out_free;
+ }
+
+ init_attr->low_lat_rq1 = 1;
+ init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
+ init_attr->rq_count = 3;
+ init_attr->qp_token = queue_token;
+ init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
+ init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
+ init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
+ init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
+ init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
+ init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
+ init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
+ init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
+ init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
+ init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
+ init_attr->port_nr = port->logical_port_id;
+ init_attr->send_cq_handle = pr->send_cq->fw_handle;
+ init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
+ init_attr->aff_eq_handle = port->qp_eq->fw_handle;
+
+ pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
+ if (!pr->qp) {
+ ehea_error("create_qp failed");
+ ret = -EIO;
+ goto out_free;
+ }
+
+ if (netif_msg_ifup(port))
+ ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
+ "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
+ init_attr->act_nr_send_wqes,
+ init_attr->act_nr_rwqes_rq1,
+ init_attr->act_nr_rwqes_rq2,
+ init_attr->act_nr_rwqes_rq3);
+
+ ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1);
+ ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
+ ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
+ ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
+ if (ret)
+ goto out_free;
+
+ pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
+ if (ehea_gen_smrs(pr) != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ tasklet_init(&pr->send_comp_task, ehea_send_irq_tasklet,
+ (unsigned long)pr);
+ atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
+
+ kfree(init_attr);
+ ret = 0;
+ goto out;
+
+out_free:
+ kfree(init_attr);
+ vfree(pr->sq_skba.arr);
+ vfree(pr->rq1_skba.arr);
+ vfree(pr->rq2_skba.arr);
+ vfree(pr->rq3_skba.arr);
+ ehea_destroy_qp(pr->qp);
+ ehea_destroy_cq(pr->send_cq);
+ ehea_destroy_cq(pr->recv_cq);
+ ehea_destroy_eq(pr->send_eq);
+ ehea_destroy_eq(pr->recv_eq);
+out:
+ return ret;
+}
+
+static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
+{
+ int ret, i;
+
+ ret = ehea_destroy_qp(pr->qp);
+
+ if (!ret) {
+ ehea_destroy_cq(pr->send_cq);
+ ehea_destroy_cq(pr->recv_cq);
+ ehea_destroy_eq(pr->send_eq);
+ ehea_destroy_eq(pr->recv_eq);
+
+ for (i = 0; i < pr->rq1_skba.len; i++)
+ if (pr->rq1_skba.arr[i])
+ dev_kfree_skb(pr->rq1_skba.arr[i]);
+
+ for (i = 0; i < pr->rq2_skba.len; i++)
+ if (pr->rq2_skba.arr[i])
+ dev_kfree_skb(pr->rq2_skba.arr[i]);
+
+ for (i = 0; i < pr->rq3_skba.len; i++)
+ if (pr->rq3_skba.arr[i])
+ dev_kfree_skb(pr->rq3_skba.arr[i]);
+
+ for (i = 0; i < pr->sq_skba.len; i++)
+ if (pr->sq_skba.arr[i])
+ dev_kfree_skb(pr->sq_skba.arr[i]);
+
+ vfree(pr->rq1_skba.arr);
+ vfree(pr->rq2_skba.arr);
+ vfree(pr->rq3_skba.arr);
+ vfree(pr->sq_skba.arr);
+ ret = ehea_rem_smrs(pr);
+ }
+ return ret;
+}
+
+/*
+ * The write_* functions store information in swqe which is used by
+ * the hardware to calculate the ip/tcp/udp checksum
+ */
+
+static inline void write_ip_start_end(struct ehea_swqe *swqe,
+ const struct sk_buff *skb)
+{
+ swqe->ip_start = (u8)(((u64)skb->nh.iph) - ((u64)skb->data));
+ swqe->ip_end = (u8)(swqe->ip_start + skb->nh.iph->ihl * 4 - 1);
+}
+
+static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
+ const struct sk_buff *skb)
+{
+ swqe->tcp_offset =
+ (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
+
+ swqe->tcp_end = (u16)skb->len - 1;
+}
+
+static inline void write_udp_offset_end(struct ehea_swqe *swqe,
+ const struct sk_buff *skb)
+{
+ swqe->tcp_offset =
+ (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
+
+ swqe->tcp_end = (u16)skb->len - 1;
+}
+
+
+static void write_swqe2_TSO(struct sk_buff *skb,
+ struct ehea_swqe *swqe, u32 lkey)
+{
+ struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
+ u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
+ int skb_data_size = skb->len - skb->data_len;
+ int headersize;
+ u64 tmp_addr;
+
+ /* Packet is TCP with TSO enabled */
+ swqe->tx_control |= EHEA_SWQE_TSO;
+ swqe->mss = skb_shinfo(skb)->gso_size;
+ /* copy only eth/ip/tcp headers to immediate data and
+ * the rest of skb->data to sg1entry
+ */
+ headersize = ETH_HLEN + (skb->nh.iph->ihl * 4) + (skb->h.th->doff * 4);
+
+ skb_data_size = skb->len - skb->data_len;
+
+ if (skb_data_size >= headersize) {
+ /* copy immediate data */
+ memcpy(imm_data, skb->data, headersize);
+ swqe->immediate_data_length = headersize;
+
+ if (skb_data_size > headersize) {
+ /* set sg1entry data */
+ sg1entry->l_key = lkey;
+ sg1entry->len = skb_data_size - headersize;
+
+ tmp_addr = (u64)(skb->data + headersize);
+ sg1entry->vaddr = tmp_addr;
+ swqe->descriptors++;
+ }
+ } else
+ ehea_error("cannot handle fragmented headers");
+}
+
+static void write_swqe2_nonTSO(struct sk_buff *skb,
+ struct ehea_swqe *swqe, u32 lkey)
+{
+ int skb_data_size = skb->len - skb->data_len;
+ u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
+ struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
+ u64 tmp_addr;
+
+ /* Packet is any nonTSO type
+ *
+ * Copy as much as possible skb->data to immediate data and
+ * the rest to sg1entry
+ */
+ if (skb_data_size >= SWQE2_MAX_IMM) {
+ /* copy immediate data */
+ memcpy(imm_data, skb->data, SWQE2_MAX_IMM);
+
+ swqe->immediate_data_length = SWQE2_MAX_IMM;
+
+ if (skb_data_size > SWQE2_MAX_IMM) {
+ /* copy sg1entry data */
+ sg1entry->l_key = lkey;
+ sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
+ tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM);
+ sg1entry->vaddr = tmp_addr;
+ swqe->descriptors++;
+ }
+ } else {
+ memcpy(imm_data, skb->data, skb_data_size);
+ swqe->immediate_data_length = skb_data_size;
+ }
+}
+
+static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
+ struct ehea_swqe *swqe, u32 lkey)
+{
+ struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
+ skb_frag_t *frag;
+ int nfrags, sg1entry_contains_frag_data, i;
+ u64 tmp_addr;
+
+ nfrags = skb_shinfo(skb)->nr_frags;
+ sg1entry = &swqe->u.immdata_desc.sg_entry;
+ sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list;
+ swqe->descriptors = 0;
+ sg1entry_contains_frag_data = 0;
+
+ if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
+ write_swqe2_TSO(skb, swqe, lkey);
+ else
+ write_swqe2_nonTSO(skb, swqe, lkey);
+
+ /* write descriptors */
+ if (nfrags > 0) {
+ if (swqe->descriptors == 0) {
+ /* sg1entry not yet used */
+ frag = &skb_shinfo(skb)->frags[0];
+
+ /* copy sg1entry data */
+ sg1entry->l_key = lkey;
+ sg1entry->len = frag->size;
+ tmp_addr = (u64)(page_address(frag->page)
+ + frag->page_offset);
+ sg1entry->vaddr = tmp_addr;
+ swqe->descriptors++;
+ sg1entry_contains_frag_data = 1;
+ }
+
+ for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
+
+ frag = &skb_shinfo(skb)->frags[i];
+ sgentry = &sg_list[i - sg1entry_contains_frag_data];
+
+ sgentry->l_key = lkey;
+ sgentry->len = frag->size;
+
+ tmp_addr = (u64)(page_address(frag->page)
+ + frag->page_offset);
+ sgentry->vaddr = tmp_addr;
+ swqe->descriptors++;
+ }
+ }
+}
+
+static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
+{
+ int ret = 0;
+ u64 hret;
+ u8 reg_type;
+
+ /* De/Register untagged packets */
+ reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
+ hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
+ port->logical_port_id,
+ reg_type, port->mac_addr, 0, hcallid);
+ if (hret != H_SUCCESS) {
+ ehea_error("reg_dereg_bcmc failed (tagged)");
+ ret = -EIO;
+ goto out_herr;
+ }
+
+ /* De/Register VLAN packets */
+ reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
+ hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
+ port->logical_port_id,
+ reg_type, port->mac_addr, 0, hcallid);
+ if (hret != H_SUCCESS) {
+ ehea_error("reg_dereg_bcmc failed (vlan)");
+ ret = -EIO;
+ }
+out_herr:
+ return ret;
+}
+
+static int ehea_set_mac_addr(struct net_device *dev, void *sa)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct sockaddr *mac_addr = sa;
+ struct hcp_ehea_port_cb0 *cb0;
+ int ret;
+ u64 hret;
+
+ if (!is_valid_ether_addr(mac_addr->sa_data)) {
+ ret = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!cb0) {
+ ehea_error("no mem for cb0");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
+
+ cb0->port_mac_addr = cb0->port_mac_addr >> 16;
+
+ hret = ehea_h_modify_ehea_port(port->adapter->handle,
+ port->logical_port_id, H_PORT_CB0,
+ EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
+ if (hret != H_SUCCESS) {
+ ret = -EIO;
+ goto out_free;
+ }
+
+ memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
+
+ /* Deregister old MAC in pHYP */
+ ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
+ if (ret)
+ goto out_free;
+
+ port->mac_addr = cb0->port_mac_addr << 16;
+
+ /* Register new MAC in pHYP */
+ ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
+ if (ret)
+ goto out_free;
+
+ ret = 0;
+out_free:
+ kfree(cb0);
+out:
+ return ret;
+}
+
+static void ehea_promiscuous_error(u64 hret, int enable)
+{
+ ehea_info("Hypervisor denied %sabling promiscuous mode.%s",
+ enable == 1 ? "en" : "dis",
+ hret != H_AUTHORITY ? "" : " Another partition owning a "
+ "logical port on the same physical port might have altered "
+ "promiscuous mode first.");
+}
+
+static void ehea_promiscuous(struct net_device *dev, int enable)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct hcp_ehea_port_cb7 *cb7;
+ u64 hret;
+
+ if ((enable && port->promisc) || (!enable && !port->promisc))
+ return;
+
+ cb7 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!cb7) {
+ ehea_error("no mem for cb7");
+ goto out;
+ }
+
+ /* Modify Pxs_DUCQPN in CB7 */
+ cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
+
+ hret = ehea_h_modify_ehea_port(port->adapter->handle,
+ port->logical_port_id,
+ H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
+ if (hret) {
+ ehea_promiscuous_error(hret, enable);
+ goto out;
+ }
+
+ port->promisc = enable;
+out:
+ kfree(cb7);
+ return;
+}
+
+static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
+ u32 hcallid)
+{
+ u64 hret;
+ u8 reg_type;
+
+ reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
+ | EHEA_BCMC_UNTAGGED;
+
+ hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
+ port->logical_port_id,
+ reg_type, mc_mac_addr, 0, hcallid);
+ if (hret)
+ goto out;
+
+ reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
+ | EHEA_BCMC_VLANID_ALL;
+
+ hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
+ port->logical_port_id,
+ reg_type, mc_mac_addr, 0, hcallid);
+out:
+ return hret;
+}
+
+static int ehea_drop_multicast_list(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_mc_list *mc_entry = port->mc_list;
+ struct list_head *pos;
+ struct list_head *temp;
+ int ret = 0;
+ u64 hret;
+
+ list_for_each_safe(pos, temp, &(port->mc_list->list)) {
+ mc_entry = list_entry(pos, struct ehea_mc_list, list);
+
+ hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
+ H_DEREG_BCMC);
+ if (hret) {
+ ehea_error("failed deregistering mcast MAC");
+ ret = -EIO;
+ }
+
+ list_del(pos);
+ kfree(mc_entry);
+ }
+ return ret;
+}
+
+static void ehea_allmulti(struct net_device *dev, int enable)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ u64 hret;
+
+ if (!port->allmulti) {
+ if (enable) {
+ /* Enable ALLMULTI */
+ ehea_drop_multicast_list(dev);
+ hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
+ if (!hret)
+ port->allmulti = 1;
+ else
+ ehea_error("failed enabling IFF_ALLMULTI");
+ }
+ } else
+ if (!enable) {
+ /* Disable ALLMULTI */
+ hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
+ if (!hret)
+ port->allmulti = 0;
+ else
+ ehea_error("failed disabling IFF_ALLMULTI");
+ }
+}
+
+static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr)
+{
+ struct ehea_mc_list *ehea_mcl_entry;
+ u64 hret;
+
+ ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_KERNEL);
+ if (!ehea_mcl_entry) {
+ ehea_error("no mem for mcl_entry");
+ return;
+ }
+
+ INIT_LIST_HEAD(&ehea_mcl_entry->list);
+
+ memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
+
+ hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
+ H_REG_BCMC);
+ if (!hret)
+ list_add(&ehea_mcl_entry->list, &port->mc_list->list);
+ else {
+ ehea_error("failed registering mcast MAC");
+ kfree(ehea_mcl_entry);
+ }
+}
+
+static void ehea_set_multicast_list(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct dev_mc_list *k_mcl_entry;
+ int ret, i;
+
+ if (dev->flags & IFF_PROMISC) {
+ ehea_promiscuous(dev, 1);
+ return;
+ }
+ ehea_promiscuous(dev, 0);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ ehea_allmulti(dev, 1);
+ return;
+ }
+ ehea_allmulti(dev, 0);
+
+ if (dev->mc_count) {
+ ret = ehea_drop_multicast_list(dev);
+ if (ret) {
+ /* Dropping the current multicast list failed.
+ * Enabling ALL_MULTI is the best we can do.
+ */
+ ehea_allmulti(dev, 1);
+ }
+
+ if (dev->mc_count > port->adapter->max_mc_mac) {
+ ehea_info("Mcast registration limit reached (0x%lx). "
+ "Use ALLMULTI!",
+ port->adapter->max_mc_mac);
+ goto out;
+ }
+
+ for (i = 0, k_mcl_entry = dev->mc_list;
+ i < dev->mc_count;
+ i++, k_mcl_entry = k_mcl_entry->next) {
+ ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
+ }
+ }
+out:
+ return;
+}
+
+static int ehea_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
+ struct ehea_swqe *swqe, u32 lkey)
+{
+ if (skb->protocol == htons(ETH_P_IP)) {
+ /* IPv4 */
+ swqe->tx_control |= EHEA_SWQE_CRC
+ | EHEA_SWQE_IP_CHECKSUM
+ | EHEA_SWQE_TCP_CHECKSUM
+ | EHEA_SWQE_IMM_DATA_PRESENT
+ | EHEA_SWQE_DESCRIPTORS_PRESENT;
+
+ write_ip_start_end(swqe, skb);
+
+ if (skb->nh.iph->protocol == IPPROTO_UDP) {
+ if ((skb->nh.iph->frag_off & IP_MF) ||
+ (skb->nh.iph->frag_off & IP_OFFSET))
+ /* IP fragment, so don't change cs */
+ swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
+ else
+ write_udp_offset_end(swqe, skb);
+
+ } else if (skb->nh.iph->protocol == IPPROTO_TCP) {
+ write_tcp_offset_end(swqe, skb);
+ }
+
+ /* icmp (big data) and ip segmentation packets (all other ip
+ packets) do not require any special handling */
+
+ } else {
+ /* Other Ethernet Protocol */
+ swqe->tx_control |= EHEA_SWQE_CRC
+ | EHEA_SWQE_IMM_DATA_PRESENT
+ | EHEA_SWQE_DESCRIPTORS_PRESENT;
+ }
+
+ write_swqe2_data(skb, dev, swqe, lkey);
+}
+
+static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
+ struct ehea_swqe *swqe)
+{
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
+ skb_frag_t *frag;
+ int i;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ /* IPv4 */
+ write_ip_start_end(swqe, skb);
+
+ if (skb->nh.iph->protocol == IPPROTO_TCP) {
+ swqe->tx_control |= EHEA_SWQE_CRC
+ | EHEA_SWQE_IP_CHECKSUM
+ | EHEA_SWQE_TCP_CHECKSUM
+ | EHEA_SWQE_IMM_DATA_PRESENT;
+
+ write_tcp_offset_end(swqe, skb);
+
+ } else if (skb->nh.iph->protocol == IPPROTO_UDP) {
+ if ((skb->nh.iph->frag_off & IP_MF) ||
+ (skb->nh.iph->frag_off & IP_OFFSET))
+ /* IP fragment, so don't change cs */
+ swqe->tx_control |= EHEA_SWQE_CRC
+ | EHEA_SWQE_IMM_DATA_PRESENT;
+ else {
+ swqe->tx_control |= EHEA_SWQE_CRC
+ | EHEA_SWQE_IP_CHECKSUM
+ | EHEA_SWQE_TCP_CHECKSUM
+ | EHEA_SWQE_IMM_DATA_PRESENT;
+
+ write_udp_offset_end(swqe, skb);
+ }
+ } else {
+ /* icmp (big data) and
+ ip segmentation packets (all other ip packets) */
+ swqe->tx_control |= EHEA_SWQE_CRC
+ | EHEA_SWQE_IP_CHECKSUM
+ | EHEA_SWQE_IMM_DATA_PRESENT;
+ }
+ } else {
+ /* Other Ethernet Protocol */
+ swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
+ }
+ /* copy (immediate) data */
+ if (nfrags == 0) {
+ /* data is in a single piece */
+ memcpy(imm_data, skb->data, skb->len);
+ } else {
+ /* first copy data from the skb->data buffer ... */
+ memcpy(imm_data, skb->data, skb->len - skb->data_len);
+ imm_data += skb->len - skb->data_len;
+
+ /* ... then copy data from the fragments */
+ for (i = 0; i < nfrags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ memcpy(imm_data,
+ page_address(frag->page) + frag->page_offset,
+ frag->size);
+ imm_data += frag->size;
+ }
+ }
+ swqe->immediate_data_length = skb->len;
+ dev_kfree_skb(skb);
+}
+
+static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_swqe *swqe;
+ unsigned long flags;
+ u32 lkey;
+ int swqe_index;
+ struct ehea_port_res *pr = &port->port_res[0];
+
+ spin_lock(&pr->xmit_lock);
+
+ swqe = ehea_get_swqe(pr->qp, &swqe_index);
+ memset(swqe, 0, SWQE_HEADER_SIZE);
+ atomic_dec(&pr->swqe_avail);
+
+ if (skb->len <= SWQE3_MAX_IMM) {
+ u32 sig_iv = port->sig_comp_iv;
+ u32 swqe_num = pr->swqe_id_counter;
+ ehea_xmit3(skb, dev, swqe);
+ swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
+ | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
+ if (pr->swqe_ll_count >= (sig_iv - 1)) {
+ swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
+ sig_iv);
+ swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
+ pr->swqe_ll_count = 0;
+ } else
+ pr->swqe_ll_count += 1;
+ } else {
+ swqe->wr_id =
+ EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
+ | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
+ | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
+ pr->sq_skba.arr[pr->sq_skba.index] = skb;
+
+ pr->sq_skba.index++;
+ pr->sq_skba.index &= (pr->sq_skba.len - 1);
+
+ lkey = pr->send_mr.lkey;
+ ehea_xmit2(skb, dev, swqe, lkey);
+
+ if (pr->swqe_count >= (EHEA_SIG_IV_LONG - 1)) {
+ swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
+ EHEA_SIG_IV_LONG);
+ swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
+ pr->swqe_count = 0;
+ } else
+ pr->swqe_count += 1;
+ }
+ pr->swqe_id_counter += 1;
+
+ if (port->vgrp && vlan_tx_tag_present(skb)) {
+ swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
+ swqe->vlan_tag = vlan_tx_tag_get(skb);
+ }
+
+ if (netif_msg_tx_queued(port)) {
+ ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
+ ehea_dump(swqe, sizeof(*swqe), "swqe");
+ }
+
+ ehea_post_swqe(pr->qp, swqe);
+ pr->tx_packets++;
+
+ if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
+ spin_lock_irqsave(&pr->netif_queue, flags);
+ if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
+ netif_stop_queue(dev);
+ pr->queue_stopped = 1;
+ }
+ spin_unlock_irqrestore(&pr->netif_queue, flags);
+ }
+ dev->trans_start = jiffies;
+ spin_unlock(&pr->xmit_lock);
+
+ return NETDEV_TX_OK;
+}
+
+static void ehea_vlan_rx_register(struct net_device *dev,
+ struct vlan_group *grp)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_adapter *adapter = port->adapter;
+ struct hcp_ehea_port_cb1 *cb1;
+ u64 hret;
+
+ port->vgrp = grp;
+
+ cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!cb1) {
+ ehea_error("no mem for cb1");
+ goto out;
+ }
+
+ if (grp)
+ memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter));
+ else
+ memset(cb1->vlan_filter, 0xFF, sizeof(cb1->vlan_filter));
+
+ hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
+ H_PORT_CB1, H_PORT_CB1_ALL, cb1);
+ if (hret != H_SUCCESS)
+ ehea_error("modify_ehea_port failed");
+
+ kfree(cb1);
+out:
+ return;
+}
+
+static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_adapter *adapter = port->adapter;
+ struct hcp_ehea_port_cb1 *cb1;
+ int index;
+ u64 hret;
+
+ cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!cb1) {
+ ehea_error("no mem for cb1");
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
+ H_PORT_CB1, H_PORT_CB1_ALL, cb1);
+ if (hret != H_SUCCESS) {
+ ehea_error("query_ehea_port failed");
+ goto out;
+ }
+
+ index = (vid / 64);
+ cb1->vlan_filter[index] |= ((u64)(1 << (vid & 0x3F)));
+
+ hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
+ H_PORT_CB1, H_PORT_CB1_ALL, cb1);
+ if (hret != H_SUCCESS)
+ ehea_error("modify_ehea_port failed");
+out:
+ kfree(cb1);
+ return;
+}
+
+static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_adapter *adapter = port->adapter;
+ struct hcp_ehea_port_cb1 *cb1;
+ int index;
+ u64 hret;
+
+ if (port->vgrp)
+ port->vgrp->vlan_devices[vid] = NULL;
+
+ cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!cb1) {
+ ehea_error("no mem for cb1");
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
+ H_PORT_CB1, H_PORT_CB1_ALL, cb1);
+ if (hret != H_SUCCESS) {
+ ehea_error("query_ehea_port failed");
+ goto out;
+ }
+
+ index = (vid / 64);
+ cb1->vlan_filter[index] &= ~((u64)(1 << (vid & 0x3F)));
+
+ hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
+ H_PORT_CB1, H_PORT_CB1_ALL, cb1);
+ if (hret != H_SUCCESS)
+ ehea_error("modify_ehea_port failed");
+out:
+ kfree(cb1);
+ return;
+}
+
+int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
+{
+ int ret = -EIO;
+ u64 hret;
+ u16 dummy16 = 0;
+ u64 dummy64 = 0;
+ struct hcp_modify_qp_cb0* cb0;
+
+ cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!cb0) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
+ if (hret != H_SUCCESS) {
+ ehea_error("query_ehea_qp failed (1)");
+ goto out;
+ }
+
+ cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
+ hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
+ &dummy64, &dummy64, &dummy16, &dummy16);
+ if (hret != H_SUCCESS) {
+ ehea_error("modify_ehea_qp failed (1)");
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
+ if (hret != H_SUCCESS) {
+ ehea_error("query_ehea_qp failed (2)");
+ goto out;
+ }
+
+ cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
+ hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
+ &dummy64, &dummy64, &dummy16, &dummy16);
+ if (hret != H_SUCCESS) {
+ ehea_error("modify_ehea_qp failed (2)");
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
+ if (hret != H_SUCCESS) {
+ ehea_error("query_ehea_qp failed (3)");
+ goto out;
+ }
+
+ cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
+ hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
+ &dummy64, &dummy64, &dummy16, &dummy16);
+ if (hret != H_SUCCESS) {
+ ehea_error("modify_ehea_qp failed (3)");
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
+ if (hret != H_SUCCESS) {
+ ehea_error("query_ehea_qp failed (4)");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ kfree(cb0);
+ return ret;
+}
+
+static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
+ int add_tx_qps)
+{
+ int ret, i;
+ struct port_res_cfg pr_cfg, pr_cfg_small_rx;
+ enum ehea_eq_type eq_type = EHEA_EQ;
+
+ port->qp_eq = ehea_create_eq(port->adapter, eq_type,
+ EHEA_MAX_ENTRIES_EQ, 1);
+ if (!port->qp_eq) {
+ ret = -EINVAL;
+ ehea_error("ehea_create_eq failed (qp_eq)");
+ goto out_kill_eq;
+ }
+
+ pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
+ pr_cfg.max_entries_scq = sq_entries;
+ pr_cfg.max_entries_sq = sq_entries;
+ pr_cfg.max_entries_rq1 = rq1_entries;
+ pr_cfg.max_entries_rq2 = rq2_entries;
+ pr_cfg.max_entries_rq3 = rq3_entries;
+
+ pr_cfg_small_rx.max_entries_rcq = 1;
+ pr_cfg_small_rx.max_entries_scq = sq_entries;
+ pr_cfg_small_rx.max_entries_sq = sq_entries;
+ pr_cfg_small_rx.max_entries_rq1 = 1;
+ pr_cfg_small_rx.max_entries_rq2 = 1;
+ pr_cfg_small_rx.max_entries_rq3 = 1;
+
+ for (i = 0; i < def_qps; i++) {
+ ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
+ if (ret)
+ goto out_clean_pr;
+ }
+ for (i = def_qps; i < def_qps + add_tx_qps; i++) {
+ ret = ehea_init_port_res(port, &port->port_res[i],
+ &pr_cfg_small_rx, i);
+ if (ret)
+ goto out_clean_pr;
+ }
+
+ return 0;
+
+out_clean_pr:
+ while (--i >= 0)
+ ehea_clean_portres(port, &port->port_res[i]);
+
+out_kill_eq:
+ ehea_destroy_eq(port->qp_eq);
+ return ret;
+}
+
+static int ehea_clean_all_portres(struct ehea_port *port)
+{
+ int ret = 0;
+ int i;
+
+ for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ ret |= ehea_clean_portres(port, &port->port_res[i]);
+
+ ret |= ehea_destroy_eq(port->qp_eq);
+
+ return ret;
+}
+
+static int ehea_up(struct net_device *dev)
+{
+ int ret, i;
+ struct ehea_port *port = netdev_priv(dev);
+ u64 mac_addr = 0;
+
+ if (port->state == EHEA_PORT_UP)
+ return 0;
+
+ ret = ehea_port_res_setup(port, port->num_def_qps,
+ port->num_add_tx_qps);
+ if (ret) {
+ ehea_error("port_res_failed");
+ goto out;
+ }
+
+ /* Set default QP for this port */
+ ret = ehea_configure_port(port);
+ if (ret) {
+ ehea_error("ehea_configure_port failed. ret:%d", ret);
+ goto out_clean_pr;
+ }
+
+ ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
+ if (ret) {
+ ret = -EIO;
+ ehea_error("out_clean_pr");
+ goto out_clean_pr;
+ }
+ mac_addr = (*(u64*)dev->dev_addr) >> 16;
+
+ ret = ehea_reg_interrupts(dev);
+ if (ret) {
+ ehea_error("out_dereg_bc");
+ goto out_dereg_bc;
+ }
+
+ for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
+ if (ret) {
+ ehea_error("activate_qp failed");
+ goto out_free_irqs;
+ }
+ }
+
+ for(i = 0; i < port->num_def_qps; i++) {
+ ret = ehea_fill_port_res(&port->port_res[i]);
+ if (ret) {
+ ehea_error("out_free_irqs");
+ goto out_free_irqs;
+ }
+ }
+
+ ret = 0;
+ port->state = EHEA_PORT_UP;
+ goto out;
+
+out_free_irqs:
+ ehea_free_interrupts(dev);
+
+out_dereg_bc:
+ ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
+
+out_clean_pr:
+ ehea_clean_all_portres(port);
+out:
+ return ret;
+}
+
+static int ehea_open(struct net_device *dev)
+{
+ int ret;
+ struct ehea_port *port = netdev_priv(dev);
+
+ down(&port->port_lock);
+
+ if (netif_msg_ifup(port))
+ ehea_info("enabling port %s", dev->name);
+
+ ret = ehea_up(dev);
+ if (!ret)
+ netif_start_queue(dev);
+
+ up(&port->port_lock);
+
+ return ret;
+}
+
+static int ehea_down(struct net_device *dev)
+{
+ int ret, i;
+ struct ehea_port *port = netdev_priv(dev);
+
+ if (port->state == EHEA_PORT_DOWN)
+ return 0;
+
+ ehea_drop_multicast_list(dev);
+ ehea_free_interrupts(dev);
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ tasklet_kill(&port->port_res[i].send_comp_task);
+
+ ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
+ ret = ehea_clean_all_portres(port);
+ port->state = EHEA_PORT_DOWN;
+ return ret;
+}
+
+static int ehea_stop(struct net_device *dev)
+{
+ int ret;
+ struct ehea_port *port = netdev_priv(dev);
+
+ if (netif_msg_ifdown(port))
+ ehea_info("disabling port %s", dev->name);
+
+ flush_workqueue(port->adapter->ehea_wq);
+ down(&port->port_lock);
+ netif_stop_queue(dev);
+ ret = ehea_down(dev);
+ up(&port->port_lock);
+ return ret;
+}
+
+static void ehea_reset_port(void *data)
+{
+ int ret;
+ struct net_device *dev = data;
+ struct ehea_port *port = netdev_priv(dev);
+
+ port->resets++;
+ down(&port->port_lock);
+ netif_stop_queue(dev);
+ netif_poll_disable(dev);
+
+ ret = ehea_down(dev);
+ if (ret)
+ ehea_error("ehea_down failed. not all resources are freed");
+
+ ret = ehea_up(dev);
+ if (ret) {
+ ehea_error("Reset device %s failed: ret=%d", dev->name, ret);
+ goto out;
+ }
+
+ if (netif_msg_timer(port))
+ ehea_info("Device %s resetted successfully", dev->name);
+
+ netif_poll_enable(dev);
+ netif_wake_queue(dev);
+out:
+ up(&port->port_lock);
+ return;
+}
+
+static void ehea_tx_watchdog(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+
+ if (netif_carrier_ok(dev))
+ queue_work(port->adapter->ehea_wq, &port->reset_task);
+}
+
+int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
+{
+ struct hcp_query_ehea *cb;
+ u64 hret;
+ int ret;
+
+ cb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!cb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea(adapter->handle, cb);
+
+ if (hret != H_SUCCESS) {
+ ret = -EIO;
+ goto out_herr;
+ }
+
+ adapter->num_ports = cb->num_ports;
+ adapter->max_mc_mac = cb->max_mc_mac - 1;
+ ret = 0;
+
+out_herr:
+ kfree(cb);
+out:
+ return ret;
+}
+
+static int ehea_setup_single_port(struct ehea_port *port,
+ struct device_node *dn)
+{
+ int ret;
+ u64 hret;
+ struct net_device *dev = port->netdev;
+ struct ehea_adapter *adapter = port->adapter;
+ struct hcp_ehea_port_cb4 *cb4;
+ u32 *dn_log_port_id;
+
+ sema_init(&port->port_lock, 1);
+ port->state = EHEA_PORT_DOWN;
+ port->sig_comp_iv = sq_entries / 10;
+
+ if (!dn) {
+ ehea_error("bad device node: dn=%p", dn);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ port->of_dev_node = dn;
+
+ /* Determine logical port id */
+ dn_log_port_id = (u32*)get_property(dn, "ibm,hea-port-no", NULL);
+
+ if (!dn_log_port_id) {
+ ehea_error("bad device node: dn_log_port_id=%p",
+ dn_log_port_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ port->logical_port_id = *dn_log_port_id;
+
+ port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
+ if (!port->mc_list) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&port->mc_list->list);
+
+ ehea_set_portspeed(port, EHEA_SPEED_AUTONEG);
+
+ ret = ehea_sense_port_attr(port);
+ if (ret)
+ goto out;
+
+ /* Enable Jumbo frames */
+ cb4 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!cb4) {
+ ehea_error("no mem for cb4");
+ } else {
+ cb4->jumbo_frame = 1;
+ hret = ehea_h_modify_ehea_port(adapter->handle,
+ port->logical_port_id,
+ H_PORT_CB4, H_PORT_CB4_JUMBO,
+ cb4);
+ if (hret != H_SUCCESS) {
+ ehea_info("Jumbo frames not activated");
+ }
+ kfree(cb4);
+ }
+
+ /* initialize net_device structure */
+ SET_MODULE_OWNER(dev);
+
+ memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
+
+ dev->open = ehea_open;
+ dev->poll = ehea_poll;
+ dev->weight = 64;
+ dev->stop = ehea_stop;
+ dev->hard_start_xmit = ehea_start_xmit;
+ dev->get_stats = ehea_get_stats;
+ dev->set_multicast_list = ehea_set_multicast_list;
+ dev->set_mac_address = ehea_set_mac_addr;
+ dev->change_mtu = ehea_change_mtu;
+ dev->vlan_rx_register = ehea_vlan_rx_register;
+ dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid;
+ dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid;
+ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
+ | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX
+ | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
+ | NETIF_F_LLTX;
+ dev->tx_timeout = &ehea_tx_watchdog;
+ dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
+
+ INIT_WORK(&port->reset_task, ehea_reset_port, dev);
+
+ ehea_set_ethtool_ops(dev);
+
+ ret = register_netdev(dev);
+ if (ret) {
+ ehea_error("register_netdev failed. ret=%d", ret);
+ goto out_free;
+ }
+
+ port->netdev = dev;
+ ret = 0;
+ goto out;
+
+out_free:
+ kfree(port->mc_list);
+out:
+ return ret;
+}
+
+static int ehea_setup_ports(struct ehea_adapter *adapter)
+{
+ int ret;
+ int port_setup_ok = 0;
+ struct ehea_port *port;
+ struct device_node *dn = NULL;
+ struct net_device *dev;
+ int i;
+
+ /* get port properties for all ports */
+ for (i = 0; i < adapter->num_ports; i++) {
+
+ if (adapter->port[i])
+ continue; /* port already up and running */
+
+ /* allocate memory for the port structures */
+ dev = alloc_etherdev(sizeof(struct ehea_port));
+
+ if (!dev) {
+ ehea_error("no mem for net_device");
+ break;
+ }
+
+ port = netdev_priv(dev);
+ port->adapter = adapter;
+ port->netdev = dev;
+ adapter->port[i] = port;
+ port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
+
+ dn = of_find_node_by_name(dn, "ethernet");
+ ret = ehea_setup_single_port(port, dn);
+ if (ret) {
+ /* Free mem for this port struct. The others will be
+ processed on rollback */
+ free_netdev(dev);
+ adapter->port[i] = NULL;
+ ehea_error("eHEA port %d setup failed, ret=%d", i, ret);
+ }
+ }
+
+ of_node_put(dn);
+
+ /* Check for succesfully set up ports */
+ for (i = 0; i < adapter->num_ports; i++)
+ if (adapter->port[i])
+ port_setup_ok++;
+
+ if (port_setup_ok)
+ ret = 0; /* At least some ports are setup correctly */
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int __devinit ehea_probe(struct ibmebus_dev *dev,
+ const struct of_device_id *id)
+{
+ struct ehea_adapter *adapter;
+ u64 *adapter_handle;
+ int ret;
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ ret = -ENOMEM;
+ dev_err(&dev->ofdev.dev, "no mem for ehea_adapter\n");
+ goto out;
+ }
+
+ adapter_handle = (u64*)get_property(dev->ofdev.node, "ibm,hea-handle",
+ NULL);
+ if (!adapter_handle) {
+ dev_err(&dev->ofdev.dev, "failed getting handle for adapter"
+ " '%s'\n", dev->ofdev.node->full_name);
+ ret = -ENODEV;
+ goto out_free_ad;
+ }
+
+ adapter->handle = *adapter_handle;
+ adapter->pd = EHEA_PD_ID;
+
+ dev->ofdev.dev.driver_data = adapter;
+
+ ret = ehea_reg_mr_adapter(adapter);
+ if (ret) {
+ dev_err(&dev->ofdev.dev, "reg_mr_adapter failed\n");
+ goto out_free_ad;
+ }
+
+ /* initialize adapter and ports */
+ /* get adapter properties */
+ ret = ehea_sense_adapter_attr(adapter);
+ if (ret) {
+ dev_err(&dev->ofdev.dev, "sense_adapter_attr failed: %d", ret);
+ goto out_free_res;
+ }
+ dev_info(&dev->ofdev.dev, "%d eHEA ports found\n", adapter->num_ports);
+
+ adapter->neq = ehea_create_eq(adapter,
+ EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
+ if (!adapter->neq) {
+ dev_err(&dev->ofdev.dev, "NEQ creation failed");
+ goto out_free_res;
+ }
+
+ tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
+ (unsigned long)adapter);
+
+ ret = ibmebus_request_irq(NULL, adapter->neq->attr.ist1,
+ ehea_interrupt_neq, SA_INTERRUPT,
+ "ehea_neq", adapter);
+ if (ret) {
+ dev_err(&dev->ofdev.dev, "requesting NEQ IRQ failed");
+ goto out_kill_eq;
+ }
+
+ adapter->ehea_wq = create_workqueue("ehea_wq");
+ if (!adapter->ehea_wq)
+ goto out_free_irq;
+
+ ret = ehea_setup_ports(adapter);
+ if (ret) {
+ dev_err(&dev->ofdev.dev, "setup_ports failed");
+ goto out_kill_wq;
+ }
+
+ ret = 0;
+ goto out;
+
+out_kill_wq:
+ destroy_workqueue(adapter->ehea_wq);
+
+out_free_irq:
+ ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
+
+out_kill_eq:
+ ehea_destroy_eq(adapter->neq);
+
+out_free_res:
+ ehea_h_free_resource(adapter->handle, adapter->mr.handle);
+
+out_free_ad:
+ kfree(adapter);
+out:
+ return ret;
+}
+
+static void ehea_shutdown_single_port(struct ehea_port *port)
+{
+ unregister_netdev(port->netdev);
+ kfree(port->mc_list);
+ free_netdev(port->netdev);
+}
+
+static int __devexit ehea_remove(struct ibmebus_dev *dev)
+{
+ struct ehea_adapter *adapter = dev->ofdev.dev.driver_data;
+ u64 hret;
+ int i;
+
+ for (i = 0; i < adapter->num_ports; i++)
+ if (adapter->port[i]) {
+ ehea_shutdown_single_port(adapter->port[i]);
+ adapter->port[i] = NULL;
+ }
+ destroy_workqueue(adapter->ehea_wq);
+
+ ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
+
+ ehea_destroy_eq(adapter->neq);
+
+ hret = ehea_h_free_resource(adapter->handle, adapter->mr.handle);
+ if (hret) {
+ dev_err(&dev->ofdev.dev, "free_resource_mr failed");
+ return -EIO;
+ }
+ kfree(adapter);
+ return 0;
+}
+
+static int check_module_parm(void)
+{
+ int ret = 0;
+
+ if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
+ (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
+ ehea_info("Bad parameter: rq1_entries");
+ ret = -EINVAL;
+ }
+ if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
+ (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
+ ehea_info("Bad parameter: rq2_entries");
+ ret = -EINVAL;
+ }
+ if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
+ (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
+ ehea_info("Bad parameter: rq3_entries");
+ ret = -EINVAL;
+ }
+ if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
+ (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
+ ehea_info("Bad parameter: sq_entries");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct of_device_id ehea_device_table[] = {
+ {
+ .name = "lhea",
+ .compatible = "IBM,lhea",
+ },
+ {},
+};
+
+static struct ibmebus_driver ehea_driver = {
+ .name = "ehea",
+ .id_table = ehea_device_table,
+ .probe = ehea_probe,
+ .remove = ehea_remove,
+};
+
+int __init ehea_module_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
+ DRV_VERSION);
+
+ ret = check_module_parm();
+ if (ret)
+ goto out;
+ ret = ibmebus_register_driver(&ehea_driver);
+ if (ret)
+ ehea_error("failed registering eHEA device driver on ebus");
+
+out:
+ return ret;
+}
+
+static void __exit ehea_module_exit(void)
+{
+ ibmebus_unregister_driver(&ehea_driver);
+}
+
+module_init(ehea_module_init);
+module_exit(ehea_module_exit);
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
new file mode 100644
index 00000000000..4a85aca4c7e
--- /dev/null
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -0,0 +1,705 @@
+/*
+ * linux/drivers/net/ehea/ehea_phyp.c
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "ehea_phyp.h"
+
+
+static inline u16 get_order_of_qentries(u16 queue_entries)
+{
+ u8 ld = 1; /* logarithmus dualis */
+ while (((1U << ld) - 1) < queue_entries)
+ ld++;
+ return ld - 1;
+}
+
+/* Defines for H_CALL H_ALLOC_RESOURCE */
+#define H_ALL_RES_TYPE_QP 1
+#define H_ALL_RES_TYPE_CQ 2
+#define H_ALL_RES_TYPE_EQ 3
+#define H_ALL_RES_TYPE_MR 5
+#define H_ALL_RES_TYPE_MW 6
+
+static long ehea_hcall_9arg_9ret(unsigned long opcode,
+ unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ unsigned long arg5, unsigned long arg6,
+ unsigned long arg7, unsigned long arg8,
+ unsigned long arg9, unsigned long *out1,
+ unsigned long *out2,unsigned long *out3,
+ unsigned long *out4,unsigned long *out5,
+ unsigned long *out6,unsigned long *out7,
+ unsigned long *out8,unsigned long *out9)
+{
+ long hret;
+ int i, sleep_msecs;
+
+ for (i = 0; i < 5; i++) {
+ hret = plpar_hcall_9arg_9ret(opcode,arg1, arg2, arg3, arg4,
+ arg5, arg6, arg7, arg8, arg9, out1,
+ out2, out3, out4, out5, out6, out7,
+ out8, out9);
+ if (H_IS_LONG_BUSY(hret)) {
+ sleep_msecs = get_longbusy_msecs(hret);
+ msleep_interruptible(sleep_msecs);
+ continue;
+ }
+
+ if (hret < H_SUCCESS)
+ ehea_error("op=%lx hret=%lx "
+ "i1=%lx i2=%lx i3=%lx i4=%lx i5=%lx i6=%lx "
+ "i7=%lx i8=%lx i9=%lx "
+ "o1=%lx o2=%lx o3=%lx o4=%lx o5=%lx o6=%lx "
+ "o7=%lx o8=%lx o9=%lx",
+ opcode, hret, arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9, *out1, *out2, *out3,
+ *out4, *out5, *out6, *out7, *out8, *out9);
+ return hret;
+ }
+ return H_BUSY;
+}
+
+u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
+ const u64 qp_handle, const u64 sel_mask, void *cb_addr)
+{
+ u64 dummy;
+
+ if ((((u64)cb_addr) & (PAGE_SIZE - 1)) != 0) {
+ ehea_error("not on pageboundary");
+ return H_PARAMETER;
+ }
+
+ return ehea_hcall_9arg_9ret(H_QUERY_HEA_QP,
+ adapter_handle, /* R4 */
+ qp_category, /* R5 */
+ qp_handle, /* R6 */
+ sel_mask, /* R7 */
+ virt_to_abs(cb_addr), /* R8 */
+ 0, 0, 0, 0, /* R9-R12 */
+ &dummy, /* R4 */
+ &dummy, /* R5 */
+ &dummy, /* R6 */
+ &dummy, /* R7 */
+ &dummy, /* R8 */
+ &dummy, /* R9 */
+ &dummy, /* R10 */
+ &dummy, /* R11 */
+ &dummy); /* R12 */
+}
+
+/* input param R5 */
+#define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11)
+#define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12)
+#define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15)
+#define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16)
+#define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17)
+#define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19)
+#define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21)
+#define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23)
+#define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55)
+#define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63)
+
+/* input param R9 */
+#define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32,63)
+
+/* input param R10 */
+#define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7)
+#define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15)
+#define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23)
+#define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31)
+/* Max Send Scatter Gather Elements */
+#define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39)
+#define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47)
+/* Max Receive SG Elements RQ1 */
+#define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55)
+#define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63)
+
+/* input param R11 */
+#define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7)
+/* max swqe immediate data length */
+#define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63)
+
+/* input param R12 */
+#define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15)
+/* Threshold RQ2 */
+#define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31)
+/* Threshold RQ3 */
+
+/* output param R6 */
+#define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15)
+#define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31)
+#define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47)
+#define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63)
+
+/* output param, R7 */
+#define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7)
+#define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15)
+#define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23)
+#define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31)
+#define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
+
+/* output param R8,R9 */
+#define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63)
+
+/* output param R11,R12 */
+#define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63)
+
+u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
+ struct ehea_qp_init_attr *init_attr, const u32 pd,
+ u64 *qp_handle, struct h_epas *h_epas)
+{
+ u64 hret;
+
+ u64 allocate_controls =
+ EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_QPP, 0)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_RQR, 6) /* rq1 & rq2 & rq3 */
+ | EHEA_BMASK_SET(H_ALL_RES_QP_EQEG, 0) /* EQE gen. disabled */
+ | EHEA_BMASK_SET(H_ALL_RES_QP_LL_QP, init_attr->low_lat_rq1)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_DMA128, 0)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_HSM, 0)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_SIGT, init_attr->signalingtype)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_RES_TYP, H_ALL_RES_TYPE_QP);
+
+ u64 r9_reg = EHEA_BMASK_SET(H_ALL_RES_QP_PD, pd)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_TOKEN, init_attr->qp_token);
+
+ u64 max_r10_reg =
+ EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SWQE,
+ get_order_of_qentries(init_attr->max_nr_send_wqes))
+ | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1WQE,
+ get_order_of_qentries(init_attr->max_nr_rwqes_rq1))
+ | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2WQE,
+ get_order_of_qentries(init_attr->max_nr_rwqes_rq2))
+ | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3WQE,
+ get_order_of_qentries(init_attr->max_nr_rwqes_rq3))
+ | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SSGE, init_attr->wqe_size_enc_sq)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1SGE,
+ init_attr->wqe_size_enc_rq1)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2SGE,
+ init_attr->wqe_size_enc_rq2)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3SGE,
+ init_attr->wqe_size_enc_rq3);
+
+ u64 r11_in =
+ EHEA_BMASK_SET(H_ALL_RES_QP_SWQE_IDL, init_attr->swqe_imm_data_len)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_PORT_NUM, init_attr->port_nr);
+ u64 threshold =
+ EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold);
+
+ u64 r5_out = 0;
+ u64 r6_out = 0;
+ u64 r7_out = 0;
+ u64 r8_out = 0;
+ u64 r9_out = 0;
+ u64 g_la_user_out = 0;
+ u64 r11_out = 0;
+ u64 r12_out = 0;
+
+ hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE,
+ adapter_handle, /* R4 */
+ allocate_controls, /* R5 */
+ init_attr->send_cq_handle, /* R6 */
+ init_attr->recv_cq_handle, /* R7 */
+ init_attr->aff_eq_handle, /* R8 */
+ r9_reg, /* R9 */
+ max_r10_reg, /* R10 */
+ r11_in, /* R11 */
+ threshold, /* R12 */
+ qp_handle, /* R4 */
+ &r5_out, /* R5 */
+ &r6_out, /* R6 */
+ &r7_out, /* R7 */
+ &r8_out, /* R8 */
+ &r9_out, /* R9 */
+ &g_la_user_out, /* R10 */
+ &r11_out, /* R11 */
+ &r12_out); /* R12 */
+
+ init_attr->qp_nr = (u32)r5_out;
+
+ init_attr->act_nr_send_wqes =
+ (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, r6_out);
+ init_attr->act_nr_rwqes_rq1 =
+ (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, r6_out);
+ init_attr->act_nr_rwqes_rq2 =
+ (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, r6_out);
+ init_attr->act_nr_rwqes_rq3 =
+ (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, r6_out);
+
+ init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq;
+ init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1;
+ init_attr->act_wqe_size_enc_rq2 = init_attr->wqe_size_enc_rq2;
+ init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3;
+
+ init_attr->nr_sq_pages =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, r8_out);
+ init_attr->nr_rq1_pages =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, r8_out);
+ init_attr->nr_rq2_pages =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, r9_out);
+ init_attr->nr_rq3_pages =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, r9_out);
+
+ init_attr->liobn_sq =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, r11_out);
+ init_attr->liobn_rq1 =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, r11_out);
+ init_attr->liobn_rq2 =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, r12_out);
+ init_attr->liobn_rq3 =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, r12_out);
+
+ if (!hret)
+ hcp_epas_ctor(h_epas, g_la_user_out, g_la_user_out);
+
+ return hret;
+}
+
+u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
+ struct ehea_cq_attr *cq_attr,
+ u64 *cq_handle, struct h_epas *epas)
+{
+ u64 hret, dummy, act_nr_of_cqes_out, act_pages_out;
+ u64 g_la_privileged_out, g_la_user_out;
+
+ hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE,
+ adapter_handle, /* R4 */
+ H_ALL_RES_TYPE_CQ, /* R5 */
+ cq_attr->eq_handle, /* R6 */
+ cq_attr->cq_token, /* R7 */
+ cq_attr->max_nr_of_cqes, /* R8 */
+ 0, 0, 0, 0, /* R9-R12 */
+ cq_handle, /* R4 */
+ &dummy, /* R5 */
+ &dummy, /* R6 */
+ &act_nr_of_cqes_out, /* R7 */
+ &act_pages_out, /* R8 */
+ &g_la_privileged_out, /* R9 */
+ &g_la_user_out, /* R10 */
+ &dummy, /* R11 */
+ &dummy); /* R12 */
+
+ cq_attr->act_nr_of_cqes = act_nr_of_cqes_out;
+ cq_attr->nr_pages = act_pages_out;
+
+ if (!hret)
+ hcp_epas_ctor(epas, g_la_privileged_out, g_la_user_out);
+
+ return hret;
+}
+
+/* Defines for H_CALL H_ALLOC_RESOURCE */
+#define H_ALL_RES_TYPE_QP 1
+#define H_ALL_RES_TYPE_CQ 2
+#define H_ALL_RES_TYPE_EQ 3
+#define H_ALL_RES_TYPE_MR 5
+#define H_ALL_RES_TYPE_MW 6
+
+/* input param R5 */
+#define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0)
+#define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7)
+#define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16)
+#define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63)
+/* input param R6 */
+#define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63)
+
+/* output param R6 */
+#define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63)
+
+/* output param R7 */
+#define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63)
+
+/* output param R8 */
+#define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63)
+
+/* output param R9 */
+#define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31)
+#define H_ALL_RES_EQ_ACT_EQ_IST_1 EHEA_BMASK_IBM(40, 63)
+
+/* output param R10 */
+#define H_ALL_RES_EQ_ACT_EQ_IST_2 EHEA_BMASK_IBM(40, 63)
+
+/* output param R11 */
+#define H_ALL_RES_EQ_ACT_EQ_IST_3 EHEA_BMASK_IBM(40, 63)
+
+/* output param R12 */
+#define H_ALL_RES_EQ_ACT_EQ_IST_4 EHEA_BMASK_IBM(40, 63)
+
+u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
+ struct ehea_eq_attr *eq_attr, u64 *eq_handle)
+{
+ u64 hret, dummy, eq_liobn, allocate_controls;
+ u64 ist1_out, ist2_out, ist3_out, ist4_out;
+ u64 act_nr_of_eqes_out, act_pages_out;
+
+ /* resource type */
+ allocate_controls =
+ EHEA_BMASK_SET(H_ALL_RES_EQ_RES_TYPE, H_ALL_RES_TYPE_EQ)
+ | EHEA_BMASK_SET(H_ALL_RES_EQ_NEQ, eq_attr->type ? 1 : 0)
+ | EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen)
+ | EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1);
+
+ hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE,
+ adapter_handle, /* R4 */
+ allocate_controls, /* R5 */
+ eq_attr->max_nr_of_eqes, /* R6 */
+ 0, 0, 0, 0, 0, 0, /* R7-R10 */
+ eq_handle, /* R4 */
+ &dummy, /* R5 */
+ &eq_liobn, /* R6 */
+ &act_nr_of_eqes_out, /* R7 */
+ &act_pages_out, /* R8 */
+ &ist1_out, /* R9 */
+ &ist2_out, /* R10 */
+ &ist3_out, /* R11 */
+ &ist4_out); /* R12 */
+
+ eq_attr->act_nr_of_eqes = act_nr_of_eqes_out;
+ eq_attr->nr_pages = act_pages_out;
+ eq_attr->ist1 = ist1_out;
+ eq_attr->ist2 = ist2_out;
+ eq_attr->ist3 = ist3_out;
+ eq_attr->ist4 = ist4_out;
+
+ return hret;
+}
+
+u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
+ const u64 qp_handle, const u64 sel_mask,
+ void *cb_addr, u64 *inv_attr_id, u64 *proc_mask,
+ u16 *out_swr, u16 *out_rwr)
+{
+ u64 hret, dummy, act_out_swr, act_out_rwr;
+
+ if ((((u64)cb_addr) & (PAGE_SIZE - 1)) != 0) {
+ ehea_error("not on page boundary");
+ return H_PARAMETER;
+ }
+
+ hret = ehea_hcall_9arg_9ret(H_MODIFY_HEA_QP,
+ adapter_handle, /* R4 */
+ (u64) cat, /* R5 */
+ qp_handle, /* R6 */
+ sel_mask, /* R7 */
+ virt_to_abs(cb_addr), /* R8 */
+ 0, 0, 0, 0, /* R9-R12 */
+ inv_attr_id, /* R4 */
+ &dummy, /* R5 */
+ &dummy, /* R6 */
+ &act_out_swr, /* R7 */
+ &act_out_rwr, /* R8 */
+ proc_mask, /* R9 */
+ &dummy, /* R10 */
+ &dummy, /* R11 */
+ &dummy); /* R12 */
+ *out_swr = act_out_swr;
+ *out_rwr = act_out_rwr;
+
+ return hret;
+}
+
+u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize,
+ const u8 queue_type, const u64 resource_handle,
+ const u64 log_pageaddr, u64 count)
+{
+ u64 dummy, reg_control;
+
+ reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize)
+ | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type);
+
+ return ehea_hcall_9arg_9ret(H_REGISTER_HEA_RPAGES,
+ adapter_handle, /* R4 */
+ reg_control, /* R5 */
+ resource_handle, /* R6 */
+ log_pageaddr, /* R7 */
+ count, /* R8 */
+ 0, 0, 0, 0, /* R9-R12 */
+ &dummy, /* R4 */
+ &dummy, /* R5 */
+ &dummy, /* R6 */
+ &dummy, /* R7 */
+ &dummy, /* R8 */
+ &dummy, /* R9 */
+ &dummy, /* R10 */
+ &dummy, /* R11 */
+ &dummy); /* R12 */
+}
+
+u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
+ const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
+ struct ehea_mr *mr)
+{
+ u64 hret, dummy, lkey_out;
+
+ hret = ehea_hcall_9arg_9ret(H_REGISTER_SMR,
+ adapter_handle , /* R4 */
+ orig_mr_handle, /* R5 */
+ vaddr_in, /* R6 */
+ (((u64)access_ctrl) << 32ULL), /* R7 */
+ pd, /* R8 */
+ 0, 0, 0, 0, /* R9-R12 */
+ &mr->handle, /* R4 */
+ &dummy, /* R5 */
+ &lkey_out, /* R6 */
+ &dummy, /* R7 */
+ &dummy, /* R8 */
+ &dummy, /* R9 */
+ &dummy, /* R10 */
+ &dummy, /* R11 */
+ &dummy); /* R12 */
+ mr->lkey = (u32)lkey_out;
+
+ return hret;
+}
+
+u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
+{
+ u64 hret, dummy, ladr_next_sq_wqe_out;
+ u64 ladr_next_rq1_wqe_out, ladr_next_rq2_wqe_out, ladr_next_rq3_wqe_out;
+
+ hret = ehea_hcall_9arg_9ret(H_DISABLE_AND_GET_HEA,
+ adapter_handle, /* R4 */
+ H_DISABLE_GET_EHEA_WQE_P, /* R5 */
+ qp_handle, /* R6 */
+ 0, 0, 0, 0, 0, 0, /* R7-R12 */
+ &ladr_next_sq_wqe_out, /* R4 */
+ &ladr_next_rq1_wqe_out, /* R5 */
+ &ladr_next_rq2_wqe_out, /* R6 */
+ &ladr_next_rq3_wqe_out, /* R7 */
+ &dummy, /* R8 */
+ &dummy, /* R9 */
+ &dummy, /* R10 */
+ &dummy, /* R11 */
+ &dummy); /* R12 */
+ return hret;
+}
+
+u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle)
+{
+ u64 dummy;
+
+ return ehea_hcall_9arg_9ret(H_FREE_RESOURCE,
+ adapter_handle, /* R4 */
+ res_handle, /* R5 */
+ 0, 0, 0, 0, 0, 0, 0, /* R6-R12 */
+ &dummy, /* R4 */
+ &dummy, /* R5 */
+ &dummy, /* R6 */
+ &dummy, /* R7 */
+ &dummy, /* R8 */
+ &dummy, /* R9 */
+ &dummy, /* R10 */
+ &dummy, /* R11 */
+ &dummy); /* R12 */
+}
+
+u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
+ const u64 length, const u32 access_ctrl,
+ const u32 pd, u64 *mr_handle, u32 *lkey)
+{
+ u64 hret, dummy, lkey_out;
+
+ hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE,
+ adapter_handle, /* R4 */
+ 5, /* R5 */
+ vaddr, /* R6 */
+ length, /* R7 */
+ (((u64) access_ctrl) << 32ULL),/* R8 */
+ pd, /* R9 */
+ 0, 0, 0, /* R10-R12 */
+ mr_handle, /* R4 */
+ &dummy, /* R5 */
+ &lkey_out, /* R6 */
+ &dummy, /* R7 */
+ &dummy, /* R8 */
+ &dummy, /* R9 */
+ &dummy, /* R10 */
+ &dummy, /* R11 */
+ &dummy); /* R12 */
+ *lkey = (u32) lkey_out;
+
+ return hret;
+}
+
+u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
+ const u8 pagesize, const u8 queue_type,
+ const u64 log_pageaddr, const u64 count)
+{
+ if ((count > 1) && (log_pageaddr & 0xfff)) {
+ ehea_error("not on pageboundary");
+ return H_PARAMETER;
+ }
+
+ return ehea_h_register_rpage(adapter_handle, pagesize,
+ queue_type, mr_handle,
+ log_pageaddr, count);
+}
+
+u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
+{
+ u64 hret, dummy, cb_logaddr;
+
+ cb_logaddr = virt_to_abs(cb_addr);
+
+ hret = ehea_hcall_9arg_9ret(H_QUERY_HEA,
+ adapter_handle, /* R4 */
+ cb_logaddr, /* R5 */
+ 0, 0, 0, 0, 0, 0, 0, /* R6-R12 */
+ &dummy, /* R4 */
+ &dummy, /* R5 */
+ &dummy, /* R6 */
+ &dummy, /* R7 */
+ &dummy, /* R8 */
+ &dummy, /* R9 */
+ &dummy, /* R10 */
+ &dummy, /* R11 */
+ &dummy); /* R12 */
+#ifdef DEBUG
+ ehea_dmp(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
+#endif
+ return hret;
+}
+
+u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
+ const u8 cb_cat, const u64 select_mask,
+ void *cb_addr)
+{
+ u64 port_info, dummy;
+ u64 cb_logaddr = virt_to_abs(cb_addr);
+ u64 arr_index = 0;
+
+ port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
+ | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
+
+ return ehea_hcall_9arg_9ret(H_QUERY_HEA_PORT,
+ adapter_handle, /* R4 */
+ port_info, /* R5 */
+ select_mask, /* R6 */
+ arr_index, /* R7 */
+ cb_logaddr, /* R8 */
+ 0, 0, 0, 0, /* R9-R12 */
+ &dummy, /* R4 */
+ &dummy, /* R5 */
+ &dummy, /* R6 */
+ &dummy, /* R7 */
+ &dummy, /* R8 */
+ &dummy, /* R9 */
+ &dummy, /* R10 */
+ &dummy, /* R11 */
+ &dummy); /* R12 */
+}
+
+u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
+ const u8 cb_cat, const u64 select_mask,
+ void *cb_addr)
+{
+ u64 port_info, dummy, inv_attr_ident, proc_mask;
+ u64 arr_index = 0;
+ u64 cb_logaddr = virt_to_abs(cb_addr);
+
+ port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
+ | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
+#ifdef DEBUG
+ ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL");
+#endif
+ return ehea_hcall_9arg_9ret(H_MODIFY_HEA_PORT,
+ adapter_handle, /* R4 */
+ port_info, /* R5 */
+ select_mask, /* R6 */
+ arr_index, /* R7 */
+ cb_logaddr, /* R8 */
+ 0, 0, 0, 0, /* R9-R12 */
+ &inv_attr_ident, /* R4 */
+ &proc_mask, /* R5 */
+ &dummy, /* R6 */
+ &dummy, /* R7 */
+ &dummy, /* R8 */
+ &dummy, /* R9 */
+ &dummy, /* R10 */
+ &dummy, /* R11 */
+ &dummy); /* R12 */
+}
+
+u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
+ const u8 reg_type, const u64 mc_mac_addr,
+ const u16 vlan_id, const u32 hcall_id)
+{
+ u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id, dummy;
+ u64 mac_addr = mc_mac_addr >> 16;
+
+ r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num);
+ r6_reg_type = EHEA_BMASK_SET(H_REGBCMC_REGTYPE, reg_type);
+ r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr);
+ r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id);
+
+ return ehea_hcall_9arg_9ret(hcall_id,
+ adapter_handle, /* R4 */
+ r5_port_num, /* R5 */
+ r6_reg_type, /* R6 */
+ r7_mc_mac_addr, /* R7 */
+ r8_vlan_id, /* R8 */
+ 0, 0, 0, 0, /* R9-R12 */
+ &dummy, /* R4 */
+ &dummy, /* R5 */
+ &dummy, /* R6 */
+ &dummy, /* R7 */
+ &dummy, /* R8 */
+ &dummy, /* R9 */
+ &dummy, /* R10 */
+ &dummy, /* R11 */
+ &dummy); /* R12 */
+}
+
+u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
+ const u64 event_mask)
+{
+ u64 dummy;
+
+ return ehea_hcall_9arg_9ret(H_RESET_EVENTS,
+ adapter_handle, /* R4 */
+ neq_handle, /* R5 */
+ event_mask, /* R6 */
+ 0, 0, 0, 0, 0, 0, /* R7-R12 */
+ &dummy, /* R4 */
+ &dummy, /* R5 */
+ &dummy, /* R6 */
+ &dummy, /* R7 */
+ &dummy, /* R8 */
+ &dummy, /* R9 */
+ &dummy, /* R10 */
+ &dummy, /* R11 */
+ &dummy); /* R12 */
+}
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h
new file mode 100644
index 00000000000..fa51e3b5bb0
--- /dev/null
+++ b/drivers/net/ehea/ehea_phyp.h
@@ -0,0 +1,455 @@
+/*
+ * linux/drivers/net/ehea/ehea_phyp.h
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __EHEA_PHYP_H__
+#define __EHEA_PHYP_H__
+
+#include <linux/delay.h>
+#include <asm/hvcall.h>
+#include "ehea.h"
+#include "ehea_hw.h"
+#include "ehea_hcall.h"
+
+/* Some abbreviations used here:
+ *
+ * hcp_* - structures, variables and functions releated to Hypervisor Calls
+ */
+
+static inline u32 get_longbusy_msecs(int long_busy_ret_code)
+{
+ switch (long_busy_ret_code) {
+ case H_LONG_BUSY_ORDER_1_MSEC:
+ return 1;
+ case H_LONG_BUSY_ORDER_10_MSEC:
+ return 10;
+ case H_LONG_BUSY_ORDER_100_MSEC:
+ return 100;
+ case H_LONG_BUSY_ORDER_1_SEC:
+ return 1000;
+ case H_LONG_BUSY_ORDER_10_SEC:
+ return 10000;
+ case H_LONG_BUSY_ORDER_100_SEC:
+ return 100000;
+ default:
+ return 1;
+ }
+}
+
+/* Notification Event Queue (NEQ) Entry bit masks */
+#define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7)
+#define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47)
+#define NEQE_PORT_UP EHEA_BMASK_IBM(16, 16)
+#define NEQE_EXTSWITCH_PORT_UP EHEA_BMASK_IBM(17, 17)
+#define NEQE_EXTSWITCH_PRIMARY EHEA_BMASK_IBM(18, 18)
+#define NEQE_PLID EHEA_BMASK_IBM(16, 47)
+
+/* Notification Event Codes */
+#define EHEA_EC_PORTSTATE_CHG 0x30
+#define EHEA_EC_ADAPTER_MALFUNC 0x32
+#define EHEA_EC_PORT_MALFUNC 0x33
+
+/* Notification Event Log Register (NELR) bit masks */
+#define NELR_PORT_MALFUNC EHEA_BMASK_IBM(61, 61)
+#define NELR_ADAPTER_MALFUNC EHEA_BMASK_IBM(62, 62)
+#define NELR_PORTSTATE_CHG EHEA_BMASK_IBM(63, 63)
+
+static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel,
+ u64 paddr_user)
+{
+ epas->kernel.addr = ioremap(paddr_kernel, PAGE_SIZE);
+ epas->user.addr = paddr_user;
+}
+
+static inline void hcp_epas_dtor(struct h_epas *epas)
+{
+ if (epas->kernel.addr)
+ iounmap(epas->kernel.addr);
+
+ epas->user.addr = 0;
+ epas->kernel.addr = 0;
+}
+
+struct hcp_modify_qp_cb0 {
+ u64 qp_ctl_reg; /* 00 */
+ u32 max_swqe; /* 02 */
+ u32 max_rwqe; /* 03 */
+ u32 port_nb; /* 04 */
+ u32 reserved0; /* 05 */
+ u64 qp_aer; /* 06 */
+ u64 qp_tenure; /* 08 */
+};
+
+/* Hcall Query/Modify Queue Pair Control Block 0 Selection Mask Bits */
+#define H_QPCB0_ALL EHEA_BMASK_IBM(0, 5)
+#define H_QPCB0_QP_CTL_REG EHEA_BMASK_IBM(0, 0)
+#define H_QPCB0_MAX_SWQE EHEA_BMASK_IBM(1, 1)
+#define H_QPCB0_MAX_RWQE EHEA_BMASK_IBM(2, 2)
+#define H_QPCB0_PORT_NB EHEA_BMASK_IBM(3, 3)
+#define H_QPCB0_QP_AER EHEA_BMASK_IBM(4, 4)
+#define H_QPCB0_QP_TENURE EHEA_BMASK_IBM(5, 5)
+
+/* Queue Pair Control Register Status Bits */
+#define H_QP_CR_ENABLED 0x8000000000000000ULL /* QP enabled */
+ /* QP States: */
+#define H_QP_CR_STATE_RESET 0x0000010000000000ULL /* Reset */
+#define H_QP_CR_STATE_INITIALIZED 0x0000020000000000ULL /* Initialized */
+#define H_QP_CR_STATE_RDY2RCV 0x0000030000000000ULL /* Ready to recv */
+#define H_QP_CR_STATE_RDY2SND 0x0000050000000000ULL /* Ready to send */
+#define H_QP_CR_STATE_ERROR 0x0000800000000000ULL /* Error */
+
+struct hcp_modify_qp_cb1 {
+ u32 qpn; /* 00 */
+ u32 qp_asyn_ev_eq_nb; /* 01 */
+ u64 sq_cq_handle; /* 02 */
+ u64 rq_cq_handle; /* 04 */
+ /* sgel = scatter gather element */
+ u32 sgel_nb_sq; /* 06 */
+ u32 sgel_nb_rq1; /* 07 */
+ u32 sgel_nb_rq2; /* 08 */
+ u32 sgel_nb_rq3; /* 09 */
+};
+
+/* Hcall Query/Modify Queue Pair Control Block 1 Selection Mask Bits */
+#define H_QPCB1_ALL EHEA_BMASK_IBM(0, 7)
+#define H_QPCB1_QPN EHEA_BMASK_IBM(0, 0)
+#define H_QPCB1_ASYN_EV_EQ_NB EHEA_BMASK_IBM(1, 1)
+#define H_QPCB1_SQ_CQ_HANDLE EHEA_BMASK_IBM(2, 2)
+#define H_QPCB1_RQ_CQ_HANDLE EHEA_BMASK_IBM(3, 3)
+#define H_QPCB1_SGEL_NB_SQ EHEA_BMASK_IBM(4, 4)
+#define H_QPCB1_SGEL_NB_RQ1 EHEA_BMASK_IBM(5, 5)
+#define H_QPCB1_SGEL_NB_RQ2 EHEA_BMASK_IBM(6, 6)
+#define H_QPCB1_SGEL_NB_RQ3 EHEA_BMASK_IBM(7, 7)
+
+struct hcp_query_ehea {
+ u32 cur_num_qps; /* 00 */
+ u32 cur_num_cqs; /* 01 */
+ u32 cur_num_eqs; /* 02 */
+ u32 cur_num_mrs; /* 03 */
+ u32 auth_level; /* 04 */
+ u32 max_num_qps; /* 05 */
+ u32 max_num_cqs; /* 06 */
+ u32 max_num_eqs; /* 07 */
+ u32 max_num_mrs; /* 08 */
+ u32 reserved0; /* 09 */
+ u32 int_clock_freq; /* 10 */
+ u32 max_num_pds; /* 11 */
+ u32 max_num_addr_handles; /* 12 */
+ u32 max_num_cqes; /* 13 */
+ u32 max_num_wqes; /* 14 */
+ u32 max_num_sgel_rq1wqe; /* 15 */
+ u32 max_num_sgel_rq2wqe; /* 16 */
+ u32 max_num_sgel_rq3wqe; /* 17 */
+ u32 mr_page_size; /* 18 */
+ u32 reserved1; /* 19 */
+ u64 max_mr_size; /* 20 */
+ u64 reserved2; /* 22 */
+ u32 num_ports; /* 24 */
+ u32 reserved3; /* 25 */
+ u32 reserved4; /* 26 */
+ u32 reserved5; /* 27 */
+ u64 max_mc_mac; /* 28 */
+ u64 ehea_cap; /* 30 */
+ u32 max_isn_per_eq; /* 32 */
+ u32 max_num_neq; /* 33 */
+ u64 max_num_vlan_ids; /* 34 */
+ u32 max_num_port_group; /* 36 */
+ u32 max_num_phys_port; /* 37 */
+
+};
+
+/* Hcall Query/Modify Port Control Block defines */
+#define H_PORT_CB0 0
+#define H_PORT_CB1 1
+#define H_PORT_CB2 2
+#define H_PORT_CB3 3
+#define H_PORT_CB4 4
+#define H_PORT_CB5 5
+#define H_PORT_CB6 6
+#define H_PORT_CB7 7
+
+struct hcp_ehea_port_cb0 {
+ u64 port_mac_addr;
+ u64 port_rc;
+ u64 reserved0;
+ u32 port_op_state;
+ u32 port_speed;
+ u32 ext_swport_op_state;
+ u32 neg_tpf_prpf;
+ u32 num_default_qps;
+ u32 reserved1;
+ u64 default_qpn_arr[16];
+};
+
+/* Hcall Query/Modify Port Control Block 0 Selection Mask Bits */
+#define H_PORT_CB0_ALL EHEA_BMASK_IBM(0, 7) /* Set all bits */
+#define H_PORT_CB0_MAC EHEA_BMASK_IBM(0, 0) /* MAC address */
+#define H_PORT_CB0_PRC EHEA_BMASK_IBM(1, 1) /* Port Recv Control */
+#define H_PORT_CB0_DEFQPNARRAY EHEA_BMASK_IBM(7, 7) /* Default QPN Array */
+
+/* Hcall Query Port: Returned port speed values */
+#define H_SPEED_10M_H 1 /* 10 Mbps, Half Duplex */
+#define H_SPEED_10M_F 2 /* 10 Mbps, Full Duplex */
+#define H_SPEED_100M_H 3 /* 100 Mbps, Half Duplex */
+#define H_SPEED_100M_F 4 /* 100 Mbps, Full Duplex */
+#define H_SPEED_1G_F 6 /* 1 Gbps, Full Duplex */
+#define H_SPEED_10G_F 8 /* 10 Gbps, Full Duplex */
+
+/* Port Receive Control Status Bits */
+#define PXLY_RC_VALID EHEA_BMASK_IBM(49, 49)
+#define PXLY_RC_VLAN_XTRACT EHEA_BMASK_IBM(50, 50)
+#define PXLY_RC_TCP_6_TUPLE EHEA_BMASK_IBM(51, 51)
+#define PXLY_RC_UDP_6_TUPLE EHEA_BMASK_IBM(52, 52)
+#define PXLY_RC_TCP_3_TUPLE EHEA_BMASK_IBM(53, 53)
+#define PXLY_RC_TCP_2_TUPLE EHEA_BMASK_IBM(54, 54)
+#define PXLY_RC_LLC_SNAP EHEA_BMASK_IBM(55, 55)
+#define PXLY_RC_JUMBO_FRAME EHEA_BMASK_IBM(56, 56)
+#define PXLY_RC_FRAG_IP_PKT EHEA_BMASK_IBM(57, 57)
+#define PXLY_RC_TCP_UDP_CHKSUM EHEA_BMASK_IBM(58, 58)
+#define PXLY_RC_IP_CHKSUM EHEA_BMASK_IBM(59, 59)
+#define PXLY_RC_MAC_FILTER EHEA_BMASK_IBM(60, 60)
+#define PXLY_RC_UNTAG_FILTER EHEA_BMASK_IBM(61, 61)
+#define PXLY_RC_VLAN_TAG_FILTER EHEA_BMASK_IBM(62, 63)
+
+#define PXLY_RC_VLAN_FILTER 2
+#define PXLY_RC_VLAN_PERM 0
+
+
+#define H_PORT_CB1_ALL 0x8000000000000000ULL
+
+struct hcp_ehea_port_cb1 {
+ u64 vlan_filter[64];
+};
+
+#define H_PORT_CB2_ALL 0xFFE0000000000000ULL
+
+struct hcp_ehea_port_cb2 {
+ u64 rxo;
+ u64 rxucp;
+ u64 rxufd;
+ u64 rxuerr;
+ u64 rxftl;
+ u64 rxmcp;
+ u64 rxbcp;
+ u64 txo;
+ u64 txucp;
+ u64 txmcp;
+ u64 txbcp;
+};
+
+struct hcp_ehea_port_cb3 {
+ u64 vlan_bc_filter[64];
+ u64 vlan_mc_filter[64];
+ u64 vlan_un_filter[64];
+ u64 port_mac_hash_array[64];
+};
+
+#define H_PORT_CB4_ALL 0xF000000000000000ULL
+#define H_PORT_CB4_JUMBO 0x1000000000000000ULL
+#define H_PORT_CB4_SPEED 0x8000000000000000ULL
+
+struct hcp_ehea_port_cb4 {
+ u32 port_speed;
+ u32 pause_frame;
+ u32 ens_port_op_state;
+ u32 jumbo_frame;
+ u32 ens_port_wrap;
+};
+
+/* Hcall Query/Modify Port Control Block 5 Selection Mask Bits */
+#define H_PORT_CB5_RCU 0x0001000000000000ULL
+#define PXS_RCU EHEA_BMASK_IBM(61, 63)
+
+struct hcp_ehea_port_cb5 {
+ u64 prc; /* 00 */
+ u64 uaa; /* 01 */
+ u64 macvc; /* 02 */
+ u64 xpcsc; /* 03 */
+ u64 xpcsp; /* 04 */
+ u64 pcsid; /* 05 */
+ u64 xpcsst; /* 06 */
+ u64 pthlb; /* 07 */
+ u64 pthrb; /* 08 */
+ u64 pqu; /* 09 */
+ u64 pqd; /* 10 */
+ u64 prt; /* 11 */
+ u64 wsth; /* 12 */
+ u64 rcb; /* 13 */
+ u64 rcm; /* 14 */
+ u64 rcu; /* 15 */
+ u64 macc; /* 16 */
+ u64 pc; /* 17 */
+ u64 pst; /* 18 */
+ u64 ducqpn; /* 19 */
+ u64 mcqpn; /* 20 */
+ u64 mma; /* 21 */
+ u64 pmc0h; /* 22 */
+ u64 pmc0l; /* 23 */
+ u64 lbc; /* 24 */
+};
+
+#define H_PORT_CB6_ALL 0xFFFFFE7FFFFF8000ULL
+
+struct hcp_ehea_port_cb6 {
+ u64 rxo; /* 00 */
+ u64 rx64; /* 01 */
+ u64 rx65; /* 02 */
+ u64 rx128; /* 03 */
+ u64 rx256; /* 04 */
+ u64 rx512; /* 05 */
+ u64 rx1024; /* 06 */
+ u64 rxbfcs; /* 07 */
+ u64 rxime; /* 08 */
+ u64 rxrle; /* 09 */
+ u64 rxorle; /* 10 */
+ u64 rxftl; /* 11 */
+ u64 rxjab; /* 12 */
+ u64 rxse; /* 13 */
+ u64 rxce; /* 14 */
+ u64 rxrf; /* 15 */
+ u64 rxfrag; /* 16 */
+ u64 rxuoc; /* 17 */
+ u64 rxcpf; /* 18 */
+ u64 rxsb; /* 19 */
+ u64 rxfd; /* 20 */
+ u64 rxoerr; /* 21 */
+ u64 rxaln; /* 22 */
+ u64 ducqpn; /* 23 */
+ u64 reserved0; /* 24 */
+ u64 rxmcp; /* 25 */
+ u64 rxbcp; /* 26 */
+ u64 txmcp; /* 27 */
+ u64 txbcp; /* 28 */
+ u64 txo; /* 29 */
+ u64 tx64; /* 30 */
+ u64 tx65; /* 31 */
+ u64 tx128; /* 32 */
+ u64 tx256; /* 33 */
+ u64 tx512; /* 34 */
+ u64 tx1024; /* 35 */
+ u64 txbfcs; /* 36 */
+ u64 txcpf; /* 37 */
+ u64 txlf; /* 38 */
+ u64 txrf; /* 39 */
+ u64 txime; /* 40 */
+ u64 txsc; /* 41 */
+ u64 txmc; /* 42 */
+ u64 txsqe; /* 43 */
+ u64 txdef; /* 44 */
+ u64 txlcol; /* 45 */
+ u64 txexcol; /* 46 */
+ u64 txcse; /* 47 */
+ u64 txbor; /* 48 */
+};
+
+#define H_PORT_CB7_DUCQPN 0x8000000000000000ULL
+
+struct hcp_ehea_port_cb7 {
+ u64 def_uc_qpn;
+};
+
+u64 ehea_h_query_ehea_qp(const u64 adapter_handle,
+ const u8 qp_category,
+ const u64 qp_handle, const u64 sel_mask,
+ void *cb_addr);
+
+u64 ehea_h_modify_ehea_qp(const u64 adapter_handle,
+ const u8 cat,
+ const u64 qp_handle,
+ const u64 sel_mask,
+ void *cb_addr,
+ u64 * inv_attr_id,
+ u64 * proc_mask, u16 * out_swr, u16 * out_rwr);
+
+u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
+ struct ehea_eq_attr *eq_attr, u64 * eq_handle);
+
+u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
+ struct ehea_cq_attr *cq_attr,
+ u64 * cq_handle, struct h_epas *epas);
+
+u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
+ struct ehea_qp_init_attr *init_attr,
+ const u32 pd,
+ u64 * qp_handle, struct h_epas *h_epas);
+
+#define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48,55)
+#define H_REG_RPAGE_QT EHEA_BMASK_IBM(62,63)
+
+u64 ehea_h_register_rpage(const u64 adapter_handle,
+ const u8 pagesize,
+ const u8 queue_type,
+ const u64 resource_handle,
+ const u64 log_pageaddr, u64 count);
+
+#define H_DISABLE_GET_EHEA_WQE_P 1
+#define H_DISABLE_GET_SQ_WQE_P 2
+#define H_DISABLE_GET_RQC 3
+
+u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle);
+
+u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle);
+
+u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
+ const u64 length, const u32 access_ctrl,
+ const u32 pd, u64 * mr_handle, u32 * lkey);
+
+u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
+ const u8 pagesize, const u8 queue_type,
+ const u64 log_pageaddr, const u64 count);
+
+u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
+ const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
+ struct ehea_mr *mr);
+
+u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr);
+
+/* output param R5 */
+#define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40,47)
+#define H_MEHEAPORT_PN EHEA_BMASK_IBM(48,63)
+
+u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
+ const u8 cb_cat, const u64 select_mask,
+ void *cb_addr);
+
+u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
+ const u8 cb_cat, const u64 select_mask,
+ void *cb_addr);
+
+#define H_REGBCMC_PN EHEA_BMASK_IBM(48, 63)
+#define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(61, 63)
+#define H_REGBCMC_MACADDR EHEA_BMASK_IBM(16, 63)
+#define H_REGBCMC_VLANID EHEA_BMASK_IBM(52, 63)
+
+u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
+ const u8 reg_type, const u64 mc_mac_addr,
+ const u16 vlan_id, const u32 hcall_id);
+
+u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
+ const u64 event_mask);
+
+#endif /* __EHEA_PHYP_H__ */
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
new file mode 100644
index 00000000000..3e1862326c8
--- /dev/null
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -0,0 +1,582 @@
+/*
+ * linux/drivers/net/ehea/ehea_qmr.c
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "ehea.h"
+#include "ehea_phyp.h"
+#include "ehea_qmr.h"
+
+static void *hw_qpageit_get_inc(struct hw_queue *queue)
+{
+ void *retvalue = hw_qeit_get(queue);
+
+ queue->current_q_offset += queue->pagesize;
+ if (queue->current_q_offset > queue->queue_length) {
+ queue->current_q_offset -= queue->pagesize;
+ retvalue = NULL;
+ } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
+ ehea_error("not on pageboundary");
+ retvalue = NULL;
+ }
+ return retvalue;
+}
+
+static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
+ const u32 pagesize, const u32 qe_size)
+{
+ int pages_per_kpage = PAGE_SIZE / pagesize;
+ int i, k;
+
+ if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
+ ehea_error("pagesize conflict! kernel pagesize=%d, "
+ "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize);
+ return -EINVAL;
+ }
+
+ queue->queue_length = nr_of_pages * pagesize;
+ queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL);
+ if (!queue->queue_pages) {
+ ehea_error("no mem for queue_pages");
+ return -ENOMEM;
+ }
+
+ /*
+ * allocate pages for queue:
+ * outer loop allocates whole kernel pages (page aligned) and
+ * inner loop divides a kernel page into smaller hea queue pages
+ */
+ i = 0;
+ while (i < nr_of_pages) {
+ u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
+ if (!kpage)
+ goto out_nomem;
+ for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
+ (queue->queue_pages)[i] = (struct ehea_page*)kpage;
+ kpage += pagesize;
+ i++;
+ }
+ }
+
+ queue->current_q_offset = 0;
+ queue->qe_size = qe_size;
+ queue->pagesize = pagesize;
+ queue->toggle_state = 1;
+
+ return 0;
+out_nomem:
+ for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
+ if (!(queue->queue_pages)[i])
+ break;
+ free_page((unsigned long)(queue->queue_pages)[i]);
+ }
+ return -ENOMEM;
+}
+
+static void hw_queue_dtor(struct hw_queue *queue)
+{
+ int pages_per_kpage = PAGE_SIZE / queue->pagesize;
+ int i, nr_pages;
+
+ if (!queue || !queue->queue_pages)
+ return;
+
+ nr_pages = queue->queue_length / queue->pagesize;
+
+ for (i = 0; i < nr_pages; i += pages_per_kpage)
+ free_page((unsigned long)(queue->queue_pages)[i]);
+
+ kfree(queue->queue_pages);
+}
+
+struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
+ int nr_of_cqe, u64 eq_handle, u32 cq_token)
+{
+ struct ehea_cq *cq;
+ struct h_epa epa;
+ u64 *cq_handle_ref, hret, rpage;
+ u32 act_nr_of_entries, act_pages, counter;
+ int ret;
+ void *vpage;
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq) {
+ ehea_error("no mem for cq");
+ goto out_nomem;
+ }
+
+ cq->attr.max_nr_of_cqes = nr_of_cqe;
+ cq->attr.cq_token = cq_token;
+ cq->attr.eq_handle = eq_handle;
+
+ cq->adapter = adapter;
+
+ cq_handle_ref = &cq->fw_handle;
+ act_nr_of_entries = 0;
+ act_pages = 0;
+
+ hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
+ &cq->fw_handle, &cq->epas);
+ if (hret != H_SUCCESS) {
+ ehea_error("alloc_resource_cq failed");
+ goto out_freemem;
+ }
+
+ ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
+ EHEA_PAGESIZE, sizeof(struct ehea_cqe));
+ if (ret)
+ goto out_freeres;
+
+ for (counter = 0; counter < cq->attr.nr_pages; counter++) {
+ vpage = hw_qpageit_get_inc(&cq->hw_queue);
+ if (!vpage) {
+ ehea_error("hw_qpageit_get_inc failed");
+ goto out_kill_hwq;
+ }
+
+ rpage = virt_to_abs(vpage);
+ hret = ehea_h_register_rpage(adapter->handle,
+ 0, EHEA_CQ_REGISTER_ORIG,
+ cq->fw_handle, rpage, 1);
+ if (hret < H_SUCCESS) {
+ ehea_error("register_rpage_cq failed ehea_cq=%p "
+ "hret=%lx counter=%i act_pages=%i",
+ cq, hret, counter, cq->attr.nr_pages);
+ goto out_kill_hwq;
+ }
+
+ if (counter == (cq->attr.nr_pages - 1)) {
+ vpage = hw_qpageit_get_inc(&cq->hw_queue);
+
+ if ((hret != H_SUCCESS) || (vpage)) {
+ ehea_error("registration of pages not "
+ "complete hret=%lx\n", hret);
+ goto out_kill_hwq;
+ }
+ } else {
+ if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+ ehea_error("CQ: registration of page failed "
+ "hret=%lx\n", hret);
+ goto out_kill_hwq;
+ }
+ }
+ }
+
+ hw_qeit_reset(&cq->hw_queue);
+ epa = cq->epas.kernel;
+ ehea_reset_cq_ep(cq);
+ ehea_reset_cq_n1(cq);
+
+ return cq;
+
+out_kill_hwq:
+ hw_queue_dtor(&cq->hw_queue);
+
+out_freeres:
+ ehea_h_free_resource(adapter->handle, cq->fw_handle);
+
+out_freemem:
+ kfree(cq);
+
+out_nomem:
+ return NULL;
+}
+
+int ehea_destroy_cq(struct ehea_cq *cq)
+{
+ u64 adapter_handle, hret;
+
+ adapter_handle = cq->adapter->handle;
+
+ if (!cq)
+ return 0;
+
+ /* deregister all previous registered pages */
+ hret = ehea_h_free_resource(adapter_handle, cq->fw_handle);
+ if (hret != H_SUCCESS) {
+ ehea_error("destroy CQ failed");
+ return -EIO;
+ }
+
+ hw_queue_dtor(&cq->hw_queue);
+ kfree(cq);
+
+ return 0;
+}
+
+struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
+ const enum ehea_eq_type type,
+ const u32 max_nr_of_eqes, const u8 eqe_gen)
+{
+ int ret, i;
+ u64 hret, rpage;
+ void *vpage;
+ struct ehea_eq *eq;
+
+ eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+ if (!eq) {
+ ehea_error("no mem for eq");
+ return NULL;
+ }
+
+ eq->adapter = adapter;
+ eq->attr.type = type;
+ eq->attr.max_nr_of_eqes = max_nr_of_eqes;
+ eq->attr.eqe_gen = eqe_gen;
+ spin_lock_init(&eq->spinlock);
+
+ hret = ehea_h_alloc_resource_eq(adapter->handle,
+ &eq->attr, &eq->fw_handle);
+ if (hret != H_SUCCESS) {
+ ehea_error("alloc_resource_eq failed");
+ goto out_freemem;
+ }
+
+ ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
+ EHEA_PAGESIZE, sizeof(struct ehea_eqe));
+ if (ret) {
+ ehea_error("can't allocate eq pages");
+ goto out_freeres;
+ }
+
+ for (i = 0; i < eq->attr.nr_pages; i++) {
+ vpage = hw_qpageit_get_inc(&eq->hw_queue);
+ if (!vpage) {
+ ehea_error("hw_qpageit_get_inc failed");
+ hret = H_RESOURCE;
+ goto out_kill_hwq;
+ }
+
+ rpage = virt_to_abs(vpage);
+
+ hret = ehea_h_register_rpage(adapter->handle, 0,
+ EHEA_EQ_REGISTER_ORIG,
+ eq->fw_handle, rpage, 1);
+
+ if (i == (eq->attr.nr_pages - 1)) {
+ /* last page */
+ vpage = hw_qpageit_get_inc(&eq->hw_queue);
+ if ((hret != H_SUCCESS) || (vpage)) {
+ goto out_kill_hwq;
+ }
+ } else {
+ if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+ goto out_kill_hwq;
+ }
+ }
+ }
+
+ hw_qeit_reset(&eq->hw_queue);
+ return eq;
+
+out_kill_hwq:
+ hw_queue_dtor(&eq->hw_queue);
+
+out_freeres:
+ ehea_h_free_resource(adapter->handle, eq->fw_handle);
+
+out_freemem:
+ kfree(eq);
+ return NULL;
+}
+
+struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
+{
+ struct ehea_eqe *eqe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&eq->spinlock, flags);
+ eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
+ spin_unlock_irqrestore(&eq->spinlock, flags);
+
+ return eqe;
+}
+
+int ehea_destroy_eq(struct ehea_eq *eq)
+{
+ u64 hret;
+ unsigned long flags;
+
+ if (!eq)
+ return 0;
+
+ spin_lock_irqsave(&eq->spinlock, flags);
+
+ hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle);
+ spin_unlock_irqrestore(&eq->spinlock, flags);
+
+ if (hret != H_SUCCESS) {
+ ehea_error("destroy_eq failed");
+ return -EIO;
+ }
+
+ hw_queue_dtor(&eq->hw_queue);
+ kfree(eq);
+
+ return 0;
+}
+
+/**
+ * allocates memory for a queue and registers pages in phyp
+ */
+int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
+ int nr_pages, int wqe_size, int act_nr_sges,
+ struct ehea_adapter *adapter, int h_call_q_selector)
+{
+ u64 hret, rpage;
+ int ret, cnt;
+ void *vpage;
+
+ ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
+ if (ret)
+ return ret;
+
+ for (cnt = 0; cnt < nr_pages; cnt++) {
+ vpage = hw_qpageit_get_inc(hw_queue);
+ if (!vpage) {
+ ehea_error("hw_qpageit_get_inc failed");
+ goto out_kill_hwq;
+ }
+ rpage = virt_to_abs(vpage);
+ hret = ehea_h_register_rpage(adapter->handle,
+ 0, h_call_q_selector,
+ qp->fw_handle, rpage, 1);
+ if (hret < H_SUCCESS) {
+ ehea_error("register_rpage_qp failed");
+ goto out_kill_hwq;
+ }
+ }
+ hw_qeit_reset(hw_queue);
+ return 0;
+
+out_kill_hwq:
+ hw_queue_dtor(hw_queue);
+ return -EIO;
+}
+
+static inline u32 map_wqe_size(u8 wqe_enc_size)
+{
+ return 128 << wqe_enc_size;
+}
+
+struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
+ u32 pd, struct ehea_qp_init_attr *init_attr)
+{
+ int ret;
+ u64 hret;
+ struct ehea_qp *qp;
+ u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
+ u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
+
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ ehea_error("no mem for qp");
+ return NULL;
+ }
+
+ qp->adapter = adapter;
+
+ hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
+ &qp->fw_handle, &qp->epas);
+ if (hret != H_SUCCESS) {
+ ehea_error("ehea_h_alloc_resource_qp failed");
+ goto out_freemem;
+ }
+
+ wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
+ wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
+ wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
+ wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
+
+ ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
+ wqe_size_in_bytes_sq,
+ init_attr->act_wqe_size_enc_sq, adapter,
+ 0);
+ if (ret) {
+ ehea_error("can't register for sq ret=%x", ret);
+ goto out_freeres;
+ }
+
+ ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
+ init_attr->nr_rq1_pages,
+ wqe_size_in_bytes_rq1,
+ init_attr->act_wqe_size_enc_rq1,
+ adapter, 1);
+ if (ret) {
+ ehea_error("can't register for rq1 ret=%x", ret);
+ goto out_kill_hwsq;
+ }
+
+ if (init_attr->rq_count > 1) {
+ ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
+ init_attr->nr_rq2_pages,
+ wqe_size_in_bytes_rq2,
+ init_attr->act_wqe_size_enc_rq2,
+ adapter, 2);
+ if (ret) {
+ ehea_error("can't register for rq2 ret=%x", ret);
+ goto out_kill_hwr1q;
+ }
+ }
+
+ if (init_attr->rq_count > 2) {
+ ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
+ init_attr->nr_rq3_pages,
+ wqe_size_in_bytes_rq3,
+ init_attr->act_wqe_size_enc_rq3,
+ adapter, 3);
+ if (ret) {
+ ehea_error("can't register for rq3 ret=%x", ret);
+ goto out_kill_hwr2q;
+ }
+ }
+
+ qp->init_attr = *init_attr;
+
+ return qp;
+
+out_kill_hwr2q:
+ hw_queue_dtor(&qp->hw_rqueue2);
+
+out_kill_hwr1q:
+ hw_queue_dtor(&qp->hw_rqueue1);
+
+out_kill_hwsq:
+ hw_queue_dtor(&qp->hw_squeue);
+
+out_freeres:
+ ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
+ ehea_h_free_resource(adapter->handle, qp->fw_handle);
+
+out_freemem:
+ kfree(qp);
+ return NULL;
+}
+
+int ehea_destroy_qp(struct ehea_qp *qp)
+{
+ u64 hret;
+ struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
+
+ if (!qp)
+ return 0;
+
+ hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle);
+ if (hret != H_SUCCESS) {
+ ehea_error("destroy_qp failed");
+ return -EIO;
+ }
+
+ hw_queue_dtor(&qp->hw_squeue);
+ hw_queue_dtor(&qp->hw_rqueue1);
+
+ if (qp_attr->rq_count > 1)
+ hw_queue_dtor(&qp->hw_rqueue2);
+ if (qp_attr->rq_count > 2)
+ hw_queue_dtor(&qp->hw_rqueue3);
+ kfree(qp);
+
+ return 0;
+}
+
+int ehea_reg_mr_adapter(struct ehea_adapter *adapter)
+{
+ int i, k, ret;
+ u64 hret, pt_abs, start, end, nr_pages;
+ u32 acc_ctrl = EHEA_MR_ACC_CTRL;
+ u64 *pt;
+
+ start = KERNELBASE;
+ end = (u64)high_memory;
+ nr_pages = (end - start) / PAGE_SIZE;
+
+ pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pt) {
+ ehea_error("no mem");
+ ret = -ENOMEM;
+ goto out;
+ }
+ pt_abs = virt_to_abs(pt);
+
+ hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start,
+ acc_ctrl, adapter->pd,
+ &adapter->mr.handle, &adapter->mr.lkey);
+ if (hret != H_SUCCESS) {
+ ehea_error("alloc_resource_mr failed");
+ ret = -EIO;
+ goto out;
+ }
+
+ adapter->mr.vaddr = KERNELBASE;
+ k = 0;
+
+ while (nr_pages > 0) {
+ if (nr_pages > 1) {
+ u64 num_pages = min(nr_pages, (u64)512);
+ for (i = 0; i < num_pages; i++)
+ pt[i] = virt_to_abs((void*)(((u64)start)
+ + ((k++) *
+ PAGE_SIZE)));
+
+ hret = ehea_h_register_rpage_mr(adapter->handle,
+ adapter->mr.handle, 0,
+ 0, (u64)pt_abs,
+ num_pages);
+ nr_pages -= num_pages;
+ } else {
+ u64 abs_adr = virt_to_abs((void*)(((u64)start)
+ + (k * PAGE_SIZE)));
+ hret = ehea_h_register_rpage_mr(adapter->handle,
+ adapter->mr.handle, 0,
+ 0, abs_adr,1);
+ nr_pages--;
+ }
+
+ if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) {
+ ehea_h_free_resource(adapter->handle,
+ adapter->mr.handle);
+ ehea_error("register_rpage_mr failed: hret = %lX",
+ hret);
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ if (hret != H_SUCCESS) {
+ ehea_h_free_resource(adapter->handle, adapter->mr.handle);
+ ehea_error("register_rpage failed for last page: hret = %lX",
+ hret);
+ ret = -EIO;
+ goto out;
+ }
+ ret = 0;
+out:
+ kfree(pt);
+ return ret;
+}
+
+
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
new file mode 100644
index 00000000000..7efdc96919c
--- /dev/null
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -0,0 +1,358 @@
+/*
+ * linux/drivers/net/ehea/ehea_qmr.h
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __EHEA_QMR_H__
+#define __EHEA_QMR_H__
+
+#include "ehea.h"
+#include "ehea_hw.h"
+
+/*
+ * page size of ehea hardware queues
+ */
+
+#define EHEA_PAGESHIFT 12
+#define EHEA_PAGESIZE 4096UL
+
+/* Some abbreviations used here:
+ *
+ * WQE - Work Queue Entry
+ * SWQE - Send Work Queue Entry
+ * RWQE - Receive Work Queue Entry
+ * CQE - Completion Queue Entry
+ * EQE - Event Queue Entry
+ * MR - Memory Region
+ */
+
+/* Use of WR_ID field for EHEA */
+#define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19)
+#define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23)
+#define EHEA_SWQE2_TYPE 0x1
+#define EHEA_SWQE3_TYPE 0x2
+#define EHEA_RWQE2_TYPE 0x3
+#define EHEA_RWQE3_TYPE 0x4
+#define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47)
+#define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63)
+
+struct ehea_vsgentry {
+ u64 vaddr;
+ u32 l_key;
+ u32 len;
+};
+
+/* maximum number of sg entries allowed in a WQE */
+#define EHEA_MAX_WQE_SG_ENTRIES 252
+#define SWQE2_MAX_IMM (0xD0 - 0x30)
+#define SWQE3_MAX_IMM 224
+
+/* tx control flags for swqe */
+#define EHEA_SWQE_CRC 0x8000
+#define EHEA_SWQE_IP_CHECKSUM 0x4000
+#define EHEA_SWQE_TCP_CHECKSUM 0x2000
+#define EHEA_SWQE_TSO 0x1000
+#define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800
+#define EHEA_SWQE_VLAN_INSERT 0x0400
+#define EHEA_SWQE_IMM_DATA_PRESENT 0x0200
+#define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100
+#define EHEA_SWQE_WRAP_CTL_REC 0x0080
+#define EHEA_SWQE_WRAP_CTL_FORCE 0x0040
+#define EHEA_SWQE_BIND 0x0020
+#define EHEA_SWQE_PURGE 0x0010
+
+/* sizeof(struct ehea_swqe) less the union */
+#define SWQE_HEADER_SIZE 32
+
+struct ehea_swqe {
+ u64 wr_id;
+ u16 tx_control;
+ u16 vlan_tag;
+ u8 reserved1;
+ u8 ip_start;
+ u8 ip_end;
+ u8 immediate_data_length;
+ u8 tcp_offset;
+ u8 reserved2;
+ u16 tcp_end;
+ u8 wrap_tag;
+ u8 descriptors; /* number of valid descriptors in WQE */
+ u16 reserved3;
+ u16 reserved4;
+ u16 mss;
+ u32 reserved5;
+ union {
+ /* Send WQE Format 1 */
+ struct {
+ struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
+ } no_immediate_data;
+
+ /* Send WQE Format 2 */
+ struct {
+ struct ehea_vsgentry sg_entry;
+ /* 0x30 */
+ u8 immediate_data[SWQE2_MAX_IMM];
+ /* 0xd0 */
+ struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
+ } immdata_desc __attribute__ ((packed));
+
+ /* Send WQE Format 3 */
+ struct {
+ u8 immediate_data[SWQE3_MAX_IMM];
+ } immdata_nodesc;
+ } u;
+};
+
+struct ehea_rwqe {
+ u64 wr_id; /* work request ID */
+ u8 reserved1[5];
+ u8 data_segments;
+ u16 reserved2;
+ u64 reserved3;
+ u64 reserved4;
+ struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
+};
+
+#define EHEA_CQE_VLAN_TAG_XTRACT 0x0400
+
+#define EHEA_CQE_TYPE_RQ 0x60
+#define EHEA_CQE_STAT_ERR_MASK 0x721F
+#define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F
+#define EHEA_CQE_STAT_ERR_TCP 0x4000
+
+struct ehea_cqe {
+ u64 wr_id; /* work request ID from WQE */
+ u8 type;
+ u8 valid;
+ u16 status;
+ u16 reserved1;
+ u16 num_bytes_transfered;
+ u16 vlan_tag;
+ u16 inet_checksum_value;
+ u8 reserved2;
+ u8 header_length;
+ u16 reserved3;
+ u16 page_offset;
+ u16 wqe_count;
+ u32 qp_token;
+ u32 timestamp;
+ u32 reserved4;
+ u64 reserved5[3];
+};
+
+#define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0)
+#define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1)
+#define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7)
+#define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31)
+#define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
+#define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63)
+#define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63)
+#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
+#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
+
+struct ehea_eqe {
+ u64 entry;
+};
+
+static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
+{
+ struct ehea_page *current_page;
+
+ if (q_offset >= queue->queue_length)
+ q_offset -= queue->queue_length;
+ current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
+ return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
+}
+
+static inline void *hw_qeit_get(struct hw_queue *queue)
+{
+ return hw_qeit_calc(queue, queue->current_q_offset);
+}
+
+static inline void hw_qeit_inc(struct hw_queue *queue)
+{
+ queue->current_q_offset += queue->qe_size;
+ if (queue->current_q_offset >= queue->queue_length) {
+ queue->current_q_offset = 0;
+ /* toggle the valid flag */
+ queue->toggle_state = (~queue->toggle_state) & 1;
+ }
+}
+
+static inline void *hw_qeit_get_inc(struct hw_queue *queue)
+{
+ void *retvalue = hw_qeit_get(queue);
+ hw_qeit_inc(queue);
+ return retvalue;
+}
+
+static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
+{
+ struct ehea_cqe *retvalue = hw_qeit_get(queue);
+ u8 valid = retvalue->valid;
+ void *pref;
+
+ if ((valid >> 7) == (queue->toggle_state & 1)) {
+ /* this is a good one */
+ hw_qeit_inc(queue);
+ pref = hw_qeit_calc(queue, queue->current_q_offset);
+ prefetch(pref);
+ prefetch(pref + 128);
+ } else
+ retvalue = NULL;
+ return retvalue;
+}
+
+static inline void *hw_qeit_get_valid(struct hw_queue *queue)
+{
+ struct ehea_cqe *retvalue = hw_qeit_get(queue);
+ void *pref;
+ u8 valid;
+
+ pref = hw_qeit_calc(queue, queue->current_q_offset);
+ prefetch(pref);
+ prefetch(pref + 128);
+ prefetch(pref + 256);
+ valid = retvalue->valid;
+ if (!((valid >> 7) == (queue->toggle_state & 1)))
+ retvalue = NULL;
+ return retvalue;
+}
+
+static inline void *hw_qeit_reset(struct hw_queue *queue)
+{
+ queue->current_q_offset = 0;
+ return hw_qeit_get(queue);
+}
+
+static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
+{
+ u64 last_entry_in_q = queue->queue_length - queue->qe_size;
+ void *retvalue;
+
+ retvalue = hw_qeit_get(queue);
+ queue->current_q_offset += queue->qe_size;
+ if (queue->current_q_offset > last_entry_in_q) {
+ queue->current_q_offset = 0;
+ queue->toggle_state = (~queue->toggle_state) & 1;
+ }
+ return retvalue;
+}
+
+static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
+{
+ void *retvalue = hw_qeit_get(queue);
+ u32 qe = *(u8*)retvalue;
+ if ((qe >> 7) == (queue->toggle_state & 1))
+ hw_qeit_eq_get_inc(queue);
+ else
+ retvalue = NULL;
+ return retvalue;
+}
+
+static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
+ int rq_nr)
+{
+ struct hw_queue *queue;
+
+ if (rq_nr == 1)
+ queue = &qp->hw_rqueue1;
+ else if (rq_nr == 2)
+ queue = &qp->hw_rqueue2;
+ else
+ queue = &qp->hw_rqueue3;
+
+ return hw_qeit_get_inc(queue);
+}
+
+static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
+ int *wqe_index)
+{
+ struct hw_queue *queue = &my_qp->hw_squeue;
+ struct ehea_swqe *wqe_p;
+
+ *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
+ wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
+
+ return wqe_p;
+}
+
+static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
+{
+ iosync();
+ ehea_update_sqa(my_qp, 1);
+}
+
+static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
+{
+ struct hw_queue *queue = &qp->hw_rqueue1;
+
+ *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
+ return hw_qeit_get_valid(queue);
+}
+
+static inline void ehea_inc_rq1(struct ehea_qp *qp)
+{
+ hw_qeit_inc(&qp->hw_rqueue1);
+}
+
+static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
+{
+ return hw_qeit_get_inc_valid(&my_cq->hw_queue);
+}
+
+#define EHEA_CQ_REGISTER_ORIG 0
+#define EHEA_EQ_REGISTER_ORIG 0
+
+enum ehea_eq_type {
+ EHEA_EQ = 0, /* event queue */
+ EHEA_NEQ /* notification event queue */
+};
+
+struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
+ enum ehea_eq_type type,
+ const u32 length, const u8 eqe_gen);
+
+int ehea_destroy_eq(struct ehea_eq *eq);
+
+struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
+
+struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
+ u64 eq_handle, u32 cq_token);
+
+int ehea_destroy_cq(struct ehea_cq *cq);
+
+struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd,
+ struct ehea_qp_init_attr *init_attr);
+
+int ehea_destroy_qp(struct ehea_qp *qp);
+
+int ehea_reg_mr_adapter(struct ehea_adapter *adapter);
+
+#endif /* __EHEA_QMR_H__ */