diff options
author | David Vrabel <david.vrabel@csr.com> | 2008-10-20 16:07:19 +0100 |
---|---|---|
committer | David Vrabel <david.vrabel@csr.com> | 2008-10-20 16:07:19 +0100 |
commit | 61e0e79ee3c609eb34edf2fe023708cba6a79b1f (patch) | |
tree | 663deacffd4071120dc9badb70428fe5f124c7b9 /drivers/net/cxgb3/cxgb3_main.c | |
parent | c15895ef30c2c03e99802951787183039a349d32 (diff) | |
parent | 0cfd81031a26717fe14380d18275f8e217571615 (diff) |
Merge branch 'master' into for-upstream
Conflicts:
Documentation/ABI/testing/sysfs-bus-usb
drivers/Makefile
Diffstat (limited to 'drivers/net/cxgb3/cxgb3_main.c')
-rw-r--r-- | drivers/net/cxgb3/cxgb3_main.c | 390 |
1 files changed, 297 insertions, 93 deletions
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 5447f3e60f0..1ace41a13ac 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved. + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -208,6 +208,31 @@ void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat, } } +/** + * t3_os_phymod_changed - handle PHY module changes + * @phy: the PHY reporting the module change + * @mod_type: new module type + * + * This is the OS-dependent handler for PHY module changes. It is + * invoked when a PHY module is removed or inserted for any OS-specific + * processing. + */ +void t3_os_phymod_changed(struct adapter *adap, int port_id) +{ + static const char *mod_str[] = { + NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown" + }; + + const struct net_device *dev = adap->port[port_id]; + const struct port_info *pi = netdev_priv(dev); + + if (pi->phy.modtype == phy_modtype_none) + printk(KERN_INFO "%s: PHY module unplugged\n", dev->name); + else + printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name, + mod_str[pi->phy.modtype]); +} + static void cxgb_set_rxmode(struct net_device *dev) { struct t3_rx_mode rm; @@ -274,10 +299,10 @@ static void name_msix_vecs(struct adapter *adap) for (i = 0; i < pi->nqsets; i++, msi_idx++) { snprintf(adap->msix_info[msi_idx].desc, n, - "%s (queue %d)", d->name, i); + "%s-%d", d->name, pi->first_qset + i); adap->msix_info[msi_idx].desc[n] = 0; } - } + } } static int request_msix_data_irqs(struct adapter *adap) @@ -306,6 +331,22 @@ static int request_msix_data_irqs(struct adapter *adap) return 0; } +static void free_irq_resources(struct adapter *adapter) +{ + if (adapter->flags & USING_MSIX) { + int i, n = 0; + + free_irq(adapter->msix_info[0].vec, adapter); + for_each_port(adapter, i) + n += adap2pinfo(adapter, i)->nqsets; + + for (i = 0; i < n; ++i) + free_irq(adapter->msix_info[i + 1].vec, + &adapter->sge.qs[i]); + } else + free_irq(adapter->pdev->irq, adapter); +} + static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, unsigned long n) { @@ -473,12 +514,16 @@ static int setup_sge_qsets(struct adapter *adap) struct port_info *pi = netdev_priv(dev); pi->qs = &adap->sge.qs[pi->first_qset]; - for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { + for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; + ++j, ++qset_idx) { + if (!pi->rx_csum_offload) + adap->params.sge.qset[qset_idx].lro = 0; err = t3_sge_alloc_qset(adap, qset_idx, 1, (adap->flags & USING_MSIX) ? qset_idx + 1 : irq_idx, &adap->params.sge.qset[qset_idx], ntxq, dev); if (err) { + t3_stop_sge_timers(adap); t3_free_sge_resources(adap); return err; } @@ -739,11 +784,12 @@ static void init_port_mtus(struct adapter *adapter) t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus); } -static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, +static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, int hi, int port) { struct sk_buff *skb; struct mngt_pktsched_wr *req; + int ret; skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req)); @@ -754,20 +800,28 @@ static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, req->min = lo; req->max = hi; req->binding = port; - t3_mgmt_tx(adap, skb); + ret = t3_mgmt_tx(adap, skb); + + return ret; } -static void bind_qsets(struct adapter *adap) +static int bind_qsets(struct adapter *adap) { - int i, j; + int i, j, err = 0; for_each_port(adap, i) { const struct port_info *pi = adap2pinfo(adap, i); - for (j = 0; j < pi->nqsets; ++j) - send_pktsched_cmd(adap, 1, pi->first_qset + j, -1, - -1, i); + for (j = 0; j < pi->nqsets; ++j) { + int ret = send_pktsched_cmd(adap, 1, + pi->first_qset + j, -1, + -1, i); + if (ret) + err = ret; + } } + + return err; } #define FW_FNAME "t3fw-%d.%d.%d.bin" @@ -891,6 +945,13 @@ static int cxgb_up(struct adapter *adap) goto out; } + /* + * Clear interrupts now to catch errors if t3_init_hw fails. + * We clear them again later as initialization may trigger + * conditions that can interrupt. + */ + t3_intr_clear(adap); + err = t3_init_hw(adap, 0); if (err) goto out; @@ -946,9 +1007,16 @@ static int cxgb_up(struct adapter *adap) t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff); } - if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX) - bind_qsets(adap); - adap->flags |= QUEUES_BOUND; + if (!(adap->flags & QUEUES_BOUND)) { + err = bind_qsets(adap); + if (err) { + CH_ERR(adap, "failed to bind qsets, err %d\n", err); + t3_intr_disable(adap); + free_irq_resources(adap); + goto out; + } + adap->flags |= QUEUES_BOUND; + } out: return err; @@ -967,19 +1035,7 @@ static void cxgb_down(struct adapter *adapter) t3_intr_disable(adapter); spin_unlock_irq(&adapter->work_lock); - if (adapter->flags & USING_MSIX) { - int i, n = 0; - - free_irq(adapter->msix_info[0].vec, adapter); - for_each_port(adapter, i) - n += adap2pinfo(adapter, i)->nqsets; - - for (i = 0; i < n; ++i) - free_irq(adapter->msix_info[i + 1].vec, - &adapter->sge.qs[i]); - } else - free_irq(adapter->pdev->irq, adapter); - + free_irq_resources(adapter); flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */ quiesce_rx(adapter); } @@ -1100,9 +1156,9 @@ static int cxgb_close(struct net_device *dev) netif_carrier_off(dev); t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); - spin_lock(&adapter->work_lock); /* sync with update task */ + spin_lock_irq(&adapter->work_lock); /* sync with update task */ clear_bit(pi->port_id, &adapter->open_device_map); - spin_unlock(&adapter->work_lock); + spin_unlock_irq(&adapter->work_lock); if (!(adapter->open_device_map & PORT_MASK)) cancel_rearming_delayed_workqueue(cxgb3_wq, @@ -1284,8 +1340,8 @@ static unsigned long collect_sge_port_stats(struct adapter *adapter, int i; unsigned long tot = 0; - for (i = 0; i < p->nqsets; ++i) - tot += adapter->sge.qs[i + p->first_qset].port_stats[idx]; + for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i) + tot += adapter->sge.qs[i].port_stats[idx]; return tot; } @@ -1485,11 +1541,22 @@ static int speed_duplex_to_caps(int speed, int duplex) static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { + int cap; struct port_info *p = netdev_priv(dev); struct link_config *lc = &p->link_config; - if (!(lc->supported & SUPPORTED_Autoneg)) - return -EOPNOTSUPP; /* can't change speed/duplex */ + if (!(lc->supported & SUPPORTED_Autoneg)) { + /* + * PHY offers a single speed/duplex. See if that's what's + * being requested. + */ + if (cmd->autoneg == AUTONEG_DISABLE) { + cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); + if (lc->supported & cap) + return 0; + } + return -EINVAL; + } if (cmd->autoneg == AUTONEG_DISABLE) { int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); @@ -1568,8 +1635,10 @@ static int set_rx_csum(struct net_device *dev, u32 data) struct adapter *adap = p->adapter; int i; - for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) + for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { + adap->params.sge.qset[i].lro = 0; adap->sge.qs[i].lro_enabled = 0; + } } return 0; } @@ -1775,6 +1844,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) int i; struct qset_params *q; struct ch_qset_params t; + int q1 = pi->first_qset; + int nqsets = pi->nqsets; if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -1797,6 +1868,16 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES, MAX_RSPQ_ENTRIES)) return -EINVAL; + + if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0) + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + if (t.qset_idx >= pi->first_qset && + t.qset_idx < pi->first_qset + pi->nqsets && + !pi->rx_csum_offload) + return -EINVAL; + } + if ((adapter->flags & FULL_INIT_DONE) && (t.rspq_size >= 0 || t.fl_size[0] >= 0 || t.fl_size[1] >= 0 || t.txq_size[0] >= 0 || @@ -1804,6 +1885,20 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) t.polling >= 0 || t.cong_thres >= 0)) return -EBUSY; + /* Allow setting of any available qset when offload enabled */ + if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { + q1 = 0; + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + nqsets += pi->first_qset + pi->nqsets; + } + } + + if (t.qset_idx < q1) + return -EINVAL; + if (t.qset_idx > q1 + nqsets - 1) + return -EINVAL; + q = &adapter->params.sge.qset[t.qset_idx]; if (t.rspq_size >= 0) @@ -1853,13 +1948,26 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) case CHELSIO_GET_QSET_PARAMS:{ struct qset_params *q; struct ch_qset_params t; + int q1 = pi->first_qset; + int nqsets = pi->nqsets; + int i; if (copy_from_user(&t, useraddr, sizeof(t))) return -EFAULT; - if (t.qset_idx >= SGE_QSETS) + + /* Display qsets for all ports when offload enabled */ + if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { + q1 = 0; + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + nqsets = pi->first_qset + pi->nqsets; + } + } + + if (t.qset_idx >= nqsets) return -EINVAL; - q = &adapter->params.sge.qset[t.qset_idx]; + q = &adapter->params.sge.qset[q1 + t.qset_idx]; t.rspq_size = q->rspq_size; t.txq_size[0] = q->txq_size[0]; t.txq_size[1] = q->txq_size[1]; @@ -1870,6 +1978,12 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) t.lro = q->lro; t.intr_lat = q->coalesce_usecs; t.cong_thres = q->cong_thres; + t.qnum = q1; + + if (adapter->flags & USING_MSIX) + t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec; + else + t.vector = adapter->pdev->irq; if (copy_to_user(useraddr, &t, sizeof(t))) return -EFAULT; @@ -2117,7 +2231,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) mmd = data->phy_id >> 8; if (!mmd) mmd = MDIO_DEV_PCS; - else if (mmd > MDIO_DEV_XGXS) + else if (mmd > MDIO_DEV_VEND2) return -EINVAL; ret = @@ -2143,7 +2257,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) mmd = data->phy_id >> 8; if (!mmd) mmd = MDIO_DEV_PCS; - else if (mmd > MDIO_DEV_XGXS) + else if (mmd > MDIO_DEV_VEND2) return -EINVAL; ret = @@ -2215,8 +2329,8 @@ static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p) { int i; - for (i = 0; i < p->nqsets; i++) { - struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq; + for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { + struct sge_rspq *q = &adap->sge.qs[i].rspq; spin_lock_irq(&q->lock); spin_unlock_irq(&q->lock); @@ -2290,7 +2404,7 @@ static void check_link_status(struct adapter *adapter) struct net_device *dev = adapter->port[i]; struct port_info *p = netdev_priv(dev); - if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev)) + if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) t3_link_changed(adapter, i); } } @@ -2355,10 +2469,10 @@ static void t3_adap_check_task(struct work_struct *work) check_t3b2_mac(adapter); /* Schedule the next check update if any port is active. */ - spin_lock(&adapter->work_lock); + spin_lock_irq(&adapter->work_lock); if (adapter->open_device_map & PORT_MASK) schedule_chk_task(adapter); - spin_unlock(&adapter->work_lock); + spin_unlock_irq(&adapter->work_lock); } /* @@ -2403,6 +2517,96 @@ void t3_os_ext_intr_handler(struct adapter *adapter) spin_unlock(&adapter->work_lock); } +static int t3_adapter_error(struct adapter *adapter, int reset) +{ + int i, ret = 0; + + /* Stop all ports */ + for_each_port(adapter, i) { + struct net_device *netdev = adapter->port[i]; + + if (netif_running(netdev)) + cxgb_close(netdev); + } + + if (is_offload(adapter) && + test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) + offload_close(&adapter->tdev); + + /* Stop SGE timers */ + t3_stop_sge_timers(adapter); + + adapter->flags &= ~FULL_INIT_DONE; + + if (reset) + ret = t3_reset_adapter(adapter); + + pci_disable_device(adapter->pdev); + + return ret; +} + +static int t3_reenable_adapter(struct adapter *adapter) +{ + if (pci_enable_device(adapter->pdev)) { + dev_err(&adapter->pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + goto err; + } + pci_set_master(adapter->pdev); + pci_restore_state(adapter->pdev); + + /* Free sge resources */ + t3_free_sge_resources(adapter); + + if (t3_replay_prep_adapter(adapter)) + goto err; + + return 0; +err: + return -1; +} + +static void t3_resume_ports(struct adapter *adapter) +{ + int i; + + /* Restart the ports */ + for_each_port(adapter, i) { + struct net_device *netdev = adapter->port[i]; + + if (netif_running(netdev)) { + if (cxgb_open(netdev)) { + dev_err(&adapter->pdev->dev, + "can't bring device back up" + " after reset\n"); + continue; + } + } + } +} + +/* + * processes a fatal error. + * Bring the ports down, reset the chip, bring the ports back up. + */ +static void fatal_error_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + fatal_error_handler_task); + int err = 0; + + rtnl_lock(); + err = t3_adapter_error(adapter, 1); + if (!err) + err = t3_reenable_adapter(adapter); + if (!err) + t3_resume_ports(adapter); + + CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded"); + rtnl_unlock(); +} + void t3_fatal_err(struct adapter *adapter) { unsigned int fw_status[4]; @@ -2413,7 +2617,11 @@ void t3_fatal_err(struct adapter *adapter) t3_write_reg(adapter, A_XGM_RX_CTRL, 0); t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0); t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0); + + spin_lock(&adapter->work_lock); t3_intr_disable(adapter); + queue_work(cxgb3_wq, &adapter->fatal_error_handler_task); + spin_unlock(&adapter->work_lock); } CH_ALERT(adapter, "encountered fatal error, operation suspended\n"); if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status)) @@ -2435,23 +2643,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct adapter *adapter = pci_get_drvdata(pdev); - int i; - - /* Stop all ports */ - for_each_port(adapter, i) { - struct net_device *netdev = adapter->port[i]; - - if (netif_running(netdev)) - cxgb_close(netdev); - } - - if (is_offload(adapter) && - test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) - offload_close(&adapter->tdev); - - adapter->flags &= ~FULL_INIT_DONE; + int ret; - pci_disable_device(pdev); + ret = t3_adapter_error(adapter, 0); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@ -2467,22 +2661,9 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); - if (pci_enable_device(pdev)) { - dev_err(&pdev->dev, - "Cannot re-enable PCI device after reset.\n"); - goto err; - } - pci_set_master(pdev); - pci_restore_state(pdev); - - /* Free sge resources */ - t3_free_sge_resources(adapter); - - if (t3_replay_prep_adapter(adapter)) - goto err; + if (!t3_reenable_adapter(adapter)) + return PCI_ERS_RESULT_RECOVERED; - return PCI_ERS_RESULT_RECOVERED; -err: return PCI_ERS_RESULT_DISCONNECT; } @@ -2496,22 +2677,8 @@ err: static void t3_io_resume(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); - int i; - /* Restart the ports */ - for_each_port(adapter, i) { - struct net_device *netdev = adapter->port[i]; - - if (netif_running(netdev)) { - if (cxgb_open(netdev)) { - dev_err(&pdev->dev, - "can't bring device back up" - " after reset\n"); - continue; - } - netif_device_attach(netdev); - } - } + t3_resume_ports(adapter); } static struct pci_error_handlers t3_err_handler = { @@ -2520,6 +2687,42 @@ static struct pci_error_handlers t3_err_handler = { .resume = t3_io_resume, }; +/* + * Set the number of qsets based on the number of CPUs and the number of ports, + * not to exceed the number of available qsets, assuming there are enough qsets + * per port in HW. + */ +static void set_nqsets(struct adapter *adap) +{ + int i, j = 0; + int num_cpus = num_online_cpus(); + int hwports = adap->params.nports; + int nqsets = SGE_QSETS; + + if (adap->params.rev > 0) { + if (hwports == 2 && + (hwports * nqsets > SGE_QSETS || + num_cpus >= nqsets / hwports)) + nqsets /= hwports; + if (nqsets > num_cpus) + nqsets = num_cpus; + if (nqsets < 1 || hwports == 4) + nqsets = 1; + } else + nqsets = 1; + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->first_qset = j; + pi->nqsets = nqsets; + j = pi->first_qset + nqsets; + + dev_info(&adap->pdev->dev, + "Port %d using %d queue sets.\n", i, nqsets); + } +} + static int __devinit cxgb_enable_msix(struct adapter *adap) { struct msix_entry entries[SGE_QSETS + 1]; @@ -2564,7 +2767,7 @@ static void __devinit print_port_info(struct adapter *adap, if (!test_bit(i, &adap->registered_device_map)) continue; printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n", - dev->name, ai->desc, pi->port_type->desc, + dev->name, ai->desc, pi->phy.desc, is_offload(adap) ? "R" : "", adap->params.rev, buf, (adap->flags & USING_MSIX) ? " MSI-X" : (adap->flags & USING_MSI) ? " MSI" : ""); @@ -2660,6 +2863,7 @@ static int __devinit init_one(struct pci_dev *pdev, INIT_LIST_HEAD(&adapter->adapter_list); INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); + INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); for (i = 0; i < ai->nports; ++i) { @@ -2677,9 +2881,6 @@ static int __devinit init_one(struct pci_dev *pdev, pi = netdev_priv(netdev); pi->adapter = adapter; pi->rx_csum_offload = 1; - pi->nqsets = 1; - pi->first_qset = i; - pi->activity = 0; pi->port_id = i; netif_carrier_off(netdev); netdev->irq = pdev->irq; @@ -2756,6 +2957,8 @@ static int __devinit init_one(struct pci_dev *pdev, else if (msi > 0 && pci_enable_msi(pdev) == 0) adapter->flags |= USING_MSI; + set_nqsets(adapter); + err = sysfs_create_group(&adapter->port[0]->dev.kobj, &cxgb3_attr_group); @@ -2801,6 +3004,7 @@ static void __devexit remove_one(struct pci_dev *pdev) if (test_bit(i, &adapter->registered_device_map)) unregister_netdev(adapter->port[i]); + t3_stop_sge_timers(adapter); t3_free_sge_resources(adapter); cxgb_disable_msi(adapter); |