diff options
Diffstat (limited to 'drivers/net/ehea')
-rw-r--r-- | drivers/net/ehea/ehea.h | 33 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_hw.h | 24 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_main.c | 199 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_phyp.h | 3 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_qmr.c | 212 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_qmr.h | 14 |
6 files changed, 362 insertions, 123 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index c0f81b5a30f..6628fa622e2 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -39,7 +39,13 @@ #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0064" +#define DRV_VERSION "EHEA_0070" + +/* eHEA capability flags */ +#define DLPAR_PORT_ADD_REM 1 +#define DLPAR_MEM_ADD 2 +#define DLPAR_MEM_REM 4 +#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM) #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) @@ -107,6 +113,8 @@ /* Memory Regions */ #define EHEA_MR_ACC_CTRL 0x00800000 +#define EHEA_BUSMAP_START 0x8000000000000000ULL + #define EHEA_WATCH_DOG_TIMEOUT 10*HZ /* utility functions */ @@ -136,10 +144,10 @@ void ehea_dump(void *adr, int len, char *msg); (0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff)) #define EHEA_BMASK_SET(mask, value) \ - ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask)) + ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask)) #define EHEA_BMASK_GET(mask, value) \ - (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask))) + (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask))) /* * Generic ehea page @@ -180,6 +188,12 @@ struct h_epas { set to 0 if unused */ }; +struct ehea_busmap { + unsigned int entries; /* total number of entries */ + unsigned int valid_sections; /* number of valid sections */ + u64 *vaddr; +}; + struct ehea_qp; struct ehea_cq; struct ehea_eq; @@ -190,7 +204,7 @@ struct ehea_av; * Queue attributes passed to ehea_create_qp() */ struct ehea_qp_init_attr { - /* input parameter */ + /* input parameter */ u32 qp_token; /* queue token */ u8 low_lat_rq1; u8 signalingtype; /* cqe generation flag */ @@ -212,7 +226,7 @@ struct ehea_qp_init_attr { u64 recv_cq_handle; u64 aff_eq_handle; - /* output parameter */ + /* output parameter */ u32 qp_nr; u16 act_nr_send_wqes; u16 act_nr_rwqes_rq1; @@ -279,12 +293,12 @@ struct ehea_qp { * Completion Queue attributes */ struct ehea_cq_attr { - /* input parameter */ + /* input parameter */ u32 max_nr_of_cqes; u32 cq_token; u64 eq_handle; - /* output parameter */ + /* output parameter */ u32 act_nr_of_cqes; u32 nr_pages; }; @@ -376,6 +390,8 @@ struct ehea_adapter { struct ehea_mr mr; u32 pd; /* protection domain */ u64 max_mc_mac; /* max number of multicast mac addresses */ + int active_ports; + struct list_head list; }; @@ -425,6 +441,9 @@ struct port_res_cfg { int max_entries_rq3; }; +enum ehea_flag_bits { + __EHEA_STOP_XFER +}; void ehea_set_ethtool_ops(struct net_device *netdev); int ehea_sense_port_attr(struct ehea_port *port); diff --git a/drivers/net/ehea/ehea_hw.h b/drivers/net/ehea/ehea_hw.h index 1246757f2c2..1af7ca499ec 100644 --- a/drivers/net/ehea/ehea_hw.h +++ b/drivers/net/ehea/ehea_hw.h @@ -211,34 +211,34 @@ static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value) } #define epa_store_eq(epa, offset, value)\ - epa_store(epa, EQTEMM_OFFSET(offset), value) + epa_store(epa, EQTEMM_OFFSET(offset), value) #define epa_load_eq(epa, offset)\ - epa_load(epa, EQTEMM_OFFSET(offset)) + epa_load(epa, EQTEMM_OFFSET(offset)) #define epa_store_cq(epa, offset, value)\ - epa_store(epa, CQTEMM_OFFSET(offset), value) + epa_store(epa, CQTEMM_OFFSET(offset), value) #define epa_load_cq(epa, offset)\ - epa_load(epa, CQTEMM_OFFSET(offset)) + epa_load(epa, CQTEMM_OFFSET(offset)) #define epa_store_qp(epa, offset, value)\ - epa_store(epa, QPTEMM_OFFSET(offset), value) + epa_store(epa, QPTEMM_OFFSET(offset), value) #define epa_load_qp(epa, offset)\ - epa_load(epa, QPTEMM_OFFSET(offset)) + epa_load(epa, QPTEMM_OFFSET(offset)) #define epa_store_qped(epa, offset, value)\ - epa_store(epa, QPEDMM_OFFSET(offset), value) + epa_store(epa, QPEDMM_OFFSET(offset), value) #define epa_load_qped(epa, offset)\ - epa_load(epa, QPEDMM_OFFSET(offset)) + epa_load(epa, QPEDMM_OFFSET(offset)) #define epa_store_mrmw(epa, offset, value)\ - epa_store(epa, MRMWMM_OFFSET(offset), value) + epa_store(epa, MRMWMM_OFFSET(offset), value) #define epa_load_mrmw(epa, offset)\ - epa_load(epa, MRMWMM_OFFSET(offset)) + epa_load(epa, MRMWMM_OFFSET(offset)) #define epa_store_base(epa, offset, value)\ - epa_store(epa, HCAGR_OFFSET(offset), value) + epa_store(epa, HCAGR_OFFSET(offset), value) #define epa_load_base(epa, offset)\ - epa_load(epa, HCAGR_OFFSET(offset)) + epa_load(epa, HCAGR_OFFSET(offset)) static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes) { diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 9e13433a268..1d1571cf322 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -79,9 +79,14 @@ MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 "); static int port_name_cnt = 0; +static LIST_HEAD(adapter_list); +u64 ehea_driver_flags = 0; +struct workqueue_struct *ehea_driver_wq; +struct work_struct ehea_rereg_mr_task; + static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, - const struct of_device_id *id); + const struct of_device_id *id); static int __devexit ehea_remove(struct ibmebus_dev *dev); @@ -236,15 +241,19 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, rwqe = ehea_get_next_rwqe(qp, rq_nr); rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) - | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); + | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); rwqe->sg_list[0].l_key = pr->recv_mr.lkey; - rwqe->sg_list[0].vaddr = (u64)skb->data; + rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); rwqe->sg_list[0].len = packet_size; rwqe->data_segments = 1; index++; index &= max_index_mask; + + if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) + goto out; } + q_skba->index = index; /* Ring doorbell */ @@ -253,7 +262,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, ehea_update_rq2a(pr->qp, i); else ehea_update_rq3a(pr->qp, i); - +out: return ret; } @@ -427,7 +436,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, break; } skb_copy_to_linear_data(skb, ((char*)cqe) + 64, - cqe->num_bytes_transfered - 4); + cqe->num_bytes_transfered - 4); ehea_fill_skb(port->netdev, skb, cqe); } else if (rq == 2) { /* RQ2 */ skb = get_skb_by_index(skb_arr_rq2, @@ -618,7 +627,7 @@ static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter, for (i = 0; i < EHEA_MAX_PORTS; i++) if (adapter->port[i]) - if (adapter->port[i]->logical_port_id == logical_port) + if (adapter->port[i]->logical_port_id == logical_port) return adapter->port[i]; return NULL; } @@ -1321,7 +1330,7 @@ static void write_swqe2_TSO(struct sk_buff *skb, sg1entry->len = skb_data_size - headersize; tmp_addr = (u64)(skb->data + headersize); - sg1entry->vaddr = tmp_addr; + sg1entry->vaddr = ehea_map_vaddr(tmp_addr); swqe->descriptors++; } } else @@ -1352,7 +1361,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb, sg1entry->l_key = lkey; sg1entry->len = skb_data_size - SWQE2_MAX_IMM; tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM); - sg1entry->vaddr = tmp_addr; + sg1entry->vaddr = ehea_map_vaddr(tmp_addr); swqe->descriptors++; } } else { @@ -1391,7 +1400,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, sg1entry->len = frag->size; tmp_addr = (u64)(page_address(frag->page) + frag->page_offset); - sg1entry->vaddr = tmp_addr; + sg1entry->vaddr = ehea_map_vaddr(tmp_addr); swqe->descriptors++; sg1entry_contains_frag_data = 1; } @@ -1406,7 +1415,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, tmp_addr = (u64)(page_address(frag->page) + frag->page_offset); - sgentry->vaddr = tmp_addr; + sgentry->vaddr = ehea_map_vaddr(tmp_addr); swqe->descriptors++; } } @@ -1695,6 +1704,7 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, { if (skb->protocol == htons(ETH_P_IP)) { const struct iphdr *iph = ip_hdr(skb); + /* IPv4 */ swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IP_CHECKSUM @@ -1705,13 +1715,12 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, write_ip_start_end(swqe, skb); if (iph->protocol == IPPROTO_UDP) { - if ((iph->frag_off & IP_MF) || - (iph->frag_off & IP_OFFSET)) + if ((iph->frag_off & IP_MF) + || (iph->frag_off & IP_OFFSET)) /* IP fragment, so don't change cs */ swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; else write_udp_offset_end(swqe, skb); - } else if (iph->protocol == IPPROTO_TCP) { write_tcp_offset_end(swqe, skb); } @@ -1739,6 +1748,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, if (skb->protocol == htons(ETH_P_IP)) { const struct iphdr *iph = ip_hdr(skb); + /* IPv4 */ write_ip_start_end(swqe, skb); @@ -1751,8 +1761,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, write_tcp_offset_end(swqe, skb); } else if (iph->protocol == IPPROTO_UDP) { - if ((iph->frag_off & IP_MF) || - (iph->frag_off & IP_OFFSET)) + if ((iph->frag_off & IP_MF) + || (iph->frag_off & IP_OFFSET)) /* IP fragment, so don't change cs */ swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT; @@ -1877,6 +1887,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) ehea_dump(swqe, 512, "swqe"); } + if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) + goto out; + ehea_post_swqe(pr->qp, swqe); pr->tx_packets++; @@ -1891,7 +1904,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) } dev->trans_start = jiffies; spin_unlock(&pr->xmit_lock); - +out: return NETDEV_TX_OK; } @@ -2219,6 +2232,9 @@ out_dereg_bc: out_clean_pr: ehea_clean_all_portres(port); out: + if (ret) + ehea_info("Failed starting %s. ret=%i", dev->name, ret); + return ret; } @@ -2258,8 +2274,13 @@ static int ehea_down(struct net_device *dev) msleep(1); ehea_broadcast_reg_helper(port, H_DEREG_BCMC); - ret = ehea_clean_all_portres(port); port->state = EHEA_PORT_DOWN; + + ret = ehea_clean_all_portres(port); + if (ret) + ehea_info("Failed freeing resources for %s. ret=%i", + dev->name, ret); + return ret; } @@ -2291,15 +2312,11 @@ static void ehea_reset_port(struct work_struct *work) netif_stop_queue(dev); netif_poll_disable(dev); - ret = ehea_down(dev); - if (ret) - ehea_error("ehea_down failed. not all resources are freed"); + ehea_down(dev); ret = ehea_up(dev); - if (ret) { - ehea_error("Reset device %s failed: ret=%d", dev->name, ret); + if (ret) goto out; - } if (netif_msg_timer(port)) ehea_info("Device %s resetted successfully", dev->name); @@ -2311,6 +2328,88 @@ out: return; } +static void ehea_rereg_mrs(struct work_struct *work) +{ + int ret, i; + struct ehea_adapter *adapter; + + ehea_info("LPAR memory enlarged - re-initializing driver"); + + list_for_each_entry(adapter, &adapter_list, list) + if (adapter->active_ports) { + /* Shutdown all ports */ + for (i = 0; i < EHEA_MAX_PORTS; i++) { + struct ehea_port *port = adapter->port[i]; + + if (port) { + struct net_device *dev = port->netdev; + + if (dev->flags & IFF_UP) { + ehea_info("stopping %s", + dev->name); + down(&port->port_lock); + netif_stop_queue(dev); + netif_poll_disable(dev); + ehea_down(dev); + up(&port->port_lock); + } + } + } + + /* Unregister old memory region */ + ret = ehea_rem_mr(&adapter->mr); + if (ret) { + ehea_error("unregister MR failed - driver" + " inoperable!"); + goto out; + } + } + + ehea_destroy_busmap(); + + ret = ehea_create_busmap(); + if (ret) + goto out; + + clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags); + + list_for_each_entry(adapter, &adapter_list, list) + if (adapter->active_ports) { + /* Register new memory region */ + ret = ehea_reg_kernel_mr(adapter, &adapter->mr); + if (ret) { + ehea_error("register MR failed - driver" + " inoperable!"); + goto out; + } + + /* Restart all ports */ + for (i = 0; i < EHEA_MAX_PORTS; i++) { + struct ehea_port *port = adapter->port[i]; + + if (port) { + struct net_device *dev = port->netdev; + + if (dev->flags & IFF_UP) { + ehea_info("restarting %s", + dev->name); + down(&port->port_lock); + + ret = ehea_up(dev); + if (!ret) { + netif_poll_enable(dev); + netif_wake_queue(dev); + } + + up(&port->port_lock); + } + } + } + } +out: + return; +} + static void ehea_tx_watchdog(struct net_device *dev) { struct ehea_port *port = netdev_priv(dev); @@ -2407,7 +2506,7 @@ static void __devinit logical_port_release(struct device *dev) } static int ehea_driver_sysfs_add(struct device *dev, - struct device_driver *driver) + struct device_driver *driver) { int ret; @@ -2424,7 +2523,7 @@ static int ehea_driver_sysfs_add(struct device *dev, } static void ehea_driver_sysfs_remove(struct device *dev, - struct device_driver *driver) + struct device_driver *driver) { struct device_driver *drv = driver; @@ -2453,7 +2552,7 @@ static struct device *ehea_register_port(struct ehea_port *port, } ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); - if (ret) { + if (ret) { ehea_error("failed to register attributes, ret=%d", ret); goto out_unreg_of_dev; } @@ -2572,6 +2671,8 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, ehea_info("%s: Jumbo frames are %sabled", dev->name, jumbo == 1 ? "en" : "dis"); + adapter->active_ports++; + return port; out_unreg_port: @@ -2595,12 +2696,14 @@ static void ehea_shutdown_single_port(struct ehea_port *port) ehea_unregister_port(port); kfree(port->mc_list); free_netdev(port->netdev); + port->adapter->active_ports--; } static int ehea_setup_ports(struct ehea_adapter *adapter) { struct device_node *lhea_dn; struct device_node *eth_dn = NULL; + const u32 *dn_log_port_id; int i = 0; @@ -2608,7 +2711,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter) while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", - NULL); + NULL); if (!dn_log_port_id) { ehea_error("bad device node: eth_dn name=%s", eth_dn->full_name); @@ -2648,7 +2751,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter, while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", - NULL); + NULL); if (dn_log_port_id) if (*dn_log_port_id == logical_port_id) return eth_dn; @@ -2786,10 +2889,12 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, goto out; } + list_add(&adapter->list, &adapter_list); + adapter->ebus_dev = dev; adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle", - NULL); + NULL); if (adapter_handle) adapter->handle = *adapter_handle; @@ -2889,7 +2994,10 @@ static int __devexit ehea_remove(struct ibmebus_dev *dev) ehea_destroy_eq(adapter->neq); ehea_remove_adapter_mr(adapter); + list_del(&adapter->list); + kfree(adapter); + return 0; } @@ -2921,6 +3029,15 @@ static int check_module_parm(void) return ret; } +static ssize_t ehea_show_capabilities(struct device_driver *drv, + char *buf) +{ + return sprintf(buf, "%d", EHEA_CAPABILITIES); +} + +static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH, + ehea_show_capabilities, NULL); + int __init ehea_module_init(void) { int ret; @@ -2928,12 +3045,32 @@ int __init ehea_module_init(void) printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION); + ehea_driver_wq = create_workqueue("ehea_driver_wq"); + + INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs); + ret = check_module_parm(); if (ret) goto out; - ret = ibmebus_register_driver(&ehea_driver); + + ret = ehea_create_busmap(); if (ret) + goto out; + + ret = ibmebus_register_driver(&ehea_driver); + if (ret) { ehea_error("failed registering eHEA device driver on ebus"); + goto out; + } + + ret = driver_create_file(&ehea_driver.driver, + &driver_attr_capabilities); + if (ret) { + ehea_error("failed to register capabilities attribute, ret=%d", + ret); + ibmebus_unregister_driver(&ehea_driver); + goto out; + } out: return ret; @@ -2941,7 +3078,9 @@ out: static void __exit ehea_module_exit(void) { + driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); ibmebus_unregister_driver(&ehea_driver); + ehea_destroy_busmap(); } module_init(ehea_module_init); diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h index d17a45a7e71..89b63531ff2 100644 --- a/drivers/net/ehea/ehea_phyp.h +++ b/drivers/net/ehea/ehea_phyp.h @@ -60,6 +60,9 @@ static inline u32 get_longbusy_msecs(int long_busy_ret_code) } } +/* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */ +#define EHEA_MAX_RPAGE 512 + /* Notification Event Queue (NEQ) Entry bit masks */ #define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7) #define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47) diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index f24a8862977..a36fa6c23fd 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c @@ -31,6 +31,13 @@ #include "ehea_phyp.h" #include "ehea_qmr.h" + +struct ehea_busmap ehea_bmap = { 0, 0, NULL }; +extern u64 ehea_driver_flags; +extern struct workqueue_struct *ehea_driver_wq; +extern struct work_struct ehea_rereg_mr_task; + + static void *hw_qpageit_get_inc(struct hw_queue *queue) { void *retvalue = hw_qeit_get(queue); @@ -211,7 +218,7 @@ u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force) u64 hret; u64 adapter_handle = cq->adapter->handle; - /* deregister all previous registered pages */ + /* deregister all previous registered pages */ hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force); if (hret != H_SUCCESS) return hret; @@ -362,7 +369,7 @@ int ehea_destroy_eq(struct ehea_eq *eq) if (hret != H_SUCCESS) { ehea_error("destroy EQ failed"); return -EIO; - } + } return 0; } @@ -507,58 +514,124 @@ out_freemem: u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force) { - u64 hret; - struct ehea_qp_init_attr *qp_attr = &qp->init_attr; + u64 hret; + struct ehea_qp_init_attr *qp_attr = &qp->init_attr; - ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle); - hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force); - if (hret != H_SUCCESS) - return hret; + ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle); + hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force); + if (hret != H_SUCCESS) + return hret; - hw_queue_dtor(&qp->hw_squeue); - hw_queue_dtor(&qp->hw_rqueue1); + hw_queue_dtor(&qp->hw_squeue); + hw_queue_dtor(&qp->hw_rqueue1); - if (qp_attr->rq_count > 1) - hw_queue_dtor(&qp->hw_rqueue2); - if (qp_attr->rq_count > 2) - hw_queue_dtor(&qp->hw_rqueue3); - kfree(qp); + if (qp_attr->rq_count > 1) + hw_queue_dtor(&qp->hw_rqueue2); + if (qp_attr->rq_count > 2) + hw_queue_dtor(&qp->hw_rqueue3); + kfree(qp); - return hret; + return hret; } int ehea_destroy_qp(struct ehea_qp *qp) { - u64 hret; - if (!qp) - return 0; + u64 hret; + if (!qp) + return 0; - if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) { - ehea_error_data(qp->adapter, qp->fw_handle); - hret = ehea_destroy_qp_res(qp, FORCE_FREE); - } + if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) { + ehea_error_data(qp->adapter, qp->fw_handle); + hret = ehea_destroy_qp_res(qp, FORCE_FREE); + } - if (hret != H_SUCCESS) { - ehea_error("destroy QP failed"); - return -EIO; - } + if (hret != H_SUCCESS) { + ehea_error("destroy QP failed"); + return -EIO; + } - return 0; + return 0; +} + +int ehea_create_busmap( void ) +{ + u64 vaddr = EHEA_BUSMAP_START; + unsigned long abs_max_pfn = 0; + unsigned long sec_max_pfn; + int i; + + /* + * Sections are not in ascending order -> Loop over all sections and + * find the highest PFN to compute the required map size. + */ + ehea_bmap.valid_sections = 0; + + for (i = 0; i < NR_MEM_SECTIONS; i++) + if (valid_section_nr(i)) { + sec_max_pfn = section_nr_to_pfn(i); + if (sec_max_pfn > abs_max_pfn) + abs_max_pfn = sec_max_pfn; + ehea_bmap.valid_sections++; + } + + ehea_bmap.entries = abs_max_pfn / EHEA_PAGES_PER_SECTION + 1; + ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr)); + + if (!ehea_bmap.vaddr) + return -ENOMEM; + + for (i = 0 ; i < ehea_bmap.entries; i++) { + unsigned long pfn = section_nr_to_pfn(i); + + if (pfn_valid(pfn)) { + ehea_bmap.vaddr[i] = vaddr; + vaddr += EHEA_SECTSIZE; + } else + ehea_bmap.vaddr[i] = 0; + } + + return 0; +} + +void ehea_destroy_busmap( void ) +{ + vfree(ehea_bmap.vaddr); +} + +u64 ehea_map_vaddr(void *caddr) +{ + u64 mapped_addr; + unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS; + + if (likely(index < ehea_bmap.entries)) { + mapped_addr = ehea_bmap.vaddr[index]; + if (likely(mapped_addr)) + mapped_addr |= (((unsigned long)caddr) + & (EHEA_SECTSIZE - 1)); + else + mapped_addr = -1; + } else + mapped_addr = -1; + + if (unlikely(mapped_addr == -1)) + if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) + queue_work(ehea_driver_wq, &ehea_rereg_mr_task); + + return mapped_addr; } int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) { - int i, k, ret; - u64 hret, pt_abs, start, end, nr_pages; - u32 acc_ctrl = EHEA_MR_ACC_CTRL; + int ret; u64 *pt; + void *pg; + u64 hret, pt_abs, i, j, m, mr_len; + u32 acc_ctrl = EHEA_MR_ACC_CTRL; - start = KERNELBASE; - end = (u64)high_memory; - nr_pages = (end - start) / EHEA_PAGESIZE; + mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE; - pt = kzalloc(PAGE_SIZE, GFP_KERNEL); + pt = kzalloc(EHEA_MAX_RPAGE * sizeof(u64), GFP_KERNEL); if (!pt) { ehea_error("no mem"); ret = -ENOMEM; @@ -566,7 +639,8 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) } pt_abs = virt_to_abs(pt); - hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start, + hret = ehea_h_alloc_resource_mr(adapter->handle, + EHEA_BUSMAP_START, mr_len, acc_ctrl, adapter->pd, &mr->handle, &mr->lkey); if (hret != H_SUCCESS) { @@ -575,49 +649,43 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) goto out; } - mr->vaddr = KERNELBASE; - k = 0; - - while (nr_pages > 0) { - if (nr_pages > 1) { - u64 num_pages = min(nr_pages, (u64)512); - for (i = 0; i < num_pages; i++) - pt[i] = virt_to_abs((void*)(((u64)start) + - ((k++) * - EHEA_PAGESIZE))); - - hret = ehea_h_register_rpage_mr(adapter->handle, - mr->handle, 0, - 0, (u64)pt_abs, - num_pages); - nr_pages -= num_pages; - } else { - u64 abs_adr = virt_to_abs((void*)(((u64)start) + - (k * EHEA_PAGESIZE))); - - hret = ehea_h_register_rpage_mr(adapter->handle, - mr->handle, 0, - 0, abs_adr,1); - nr_pages--; - } - - if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) { - ehea_h_free_resource(adapter->handle, - mr->handle, FORCE_FREE); - ehea_error("register_rpage_mr failed"); - ret = -EIO; - goto out; + for (i = 0 ; i < ehea_bmap.entries; i++) + if (ehea_bmap.vaddr[i]) { + void *sectbase = __va(i << SECTION_SIZE_BITS); + unsigned long k = 0; + + for (j = 0; j < (PAGES_PER_SECTION / EHEA_MAX_RPAGE); + j++) { + + for (m = 0; m < EHEA_MAX_RPAGE; m++) { + pg = sectbase + ((k++) * EHEA_PAGESIZE); + pt[m] = virt_to_abs(pg); + } + + hret = ehea_h_register_rpage_mr(adapter->handle, + mr->handle, + 0, 0, pt_abs, + EHEA_MAX_RPAGE); + if ((hret != H_SUCCESS) + && (hret != H_PAGE_REGISTERED)) { + ehea_h_free_resource(adapter->handle, + mr->handle, + FORCE_FREE); + ehea_error("register_rpage_mr failed"); + ret = -EIO; + goto out; + } + } } - } if (hret != H_SUCCESS) { - ehea_h_free_resource(adapter->handle, mr->handle, - FORCE_FREE); - ehea_error("register_rpage failed for last page"); + ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); + ehea_error("registering mr failed"); ret = -EIO; goto out; } + mr->vaddr = EHEA_BUSMAP_START; mr->adapter = adapter; ret = 0; out: diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h index c0eb3e03a10..b71f8452a5e 100644 --- a/drivers/net/ehea/ehea_qmr.h +++ b/drivers/net/ehea/ehea_qmr.h @@ -36,8 +36,14 @@ * page size of ehea hardware queues */ -#define EHEA_PAGESHIFT 12 -#define EHEA_PAGESIZE 4096UL +#define EHEA_PAGESHIFT 12 +#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT) +#define EHEA_SECTSIZE (1UL << 24) +#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> PAGE_SHIFT) + +#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE +#error eHEA module can't work if kernel sectionsize < ehea sectionsize +#endif /* Some abbreviations used here: * @@ -372,4 +378,8 @@ int ehea_rem_mr(struct ehea_mr *mr); void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); +int ehea_create_busmap( void ); +void ehea_destroy_busmap( void ); +u64 ehea_map_vaddr(void *caddr); + #endif /* __EHEA_QMR_H__ */ |