diff options
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/tg3.c | 71 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/tg3.h | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/sge.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/octeon/octeon_mgmt.c | 312 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qlge/qlge_main.c | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/seeq/seeq8005.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac.h | 42 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 33 |
13 files changed, 305 insertions, 180 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 9aaf863b423..dd451c3dd83 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9360,8 +9360,7 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp) struct bnx2x_prev_path_list *tmp_list; int rc; - tmp_list = (struct bnx2x_prev_path_list *) - kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); + tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); if (!tmp_list) { BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 9a009fd6ea1..bf906c51d82 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -92,7 +92,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 123 +#define TG3_MIN_NUM 124 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) #define DRV_MODULE_RELDATE "March 21, 2012" @@ -672,6 +672,12 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) else bit = 1 << tp->pci_fn; break; + case TG3_APE_LOCK_PHY0: + case TG3_APE_LOCK_PHY1: + case TG3_APE_LOCK_PHY2: + case TG3_APE_LOCK_PHY3: + bit = APE_LOCK_REQ_DRIVER; + break; default: return -EINVAL; } @@ -723,6 +729,12 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) else bit = 1 << tp->pci_fn; break; + case TG3_APE_LOCK_PHY0: + case TG3_APE_LOCK_PHY1: + case TG3_APE_LOCK_PHY2: + case TG3_APE_LOCK_PHY3: + bit = APE_LOCK_GRANT_DRIVER; + break; default: return; } @@ -1052,6 +1064,8 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) udelay(80); } + tg3_ape_lock(tp, tp->phy_ape_lock); + *val = 0x0; frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & @@ -1086,6 +1100,8 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) udelay(80); } + tg3_ape_unlock(tp, tp->phy_ape_lock); + return ret; } @@ -1105,6 +1121,8 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) udelay(80); } + tg3_ape_lock(tp, tp->phy_ape_lock); + frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & MI_COM_PHY_ADDR_MASK); frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & @@ -1135,6 +1153,8 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) udelay(80); } + tg3_ape_unlock(tp, tp->phy_ape_lock); + return ret; } @@ -9066,8 +9086,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || tg3_flag(tp, 57765_PLUS)) { val = tr32(TG3_RDMA_RSRVCTRL_REG); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) { val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); @@ -9257,6 +9276,19 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32_f(RDMAC_MODE, rdmac_mode); udelay(40); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { + for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { + if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) + break; + } + if (i < TG3_NUM_RDMA_CHANNELS) { + val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); + val |= TG3_LSO_RD_DMA_TX_LENGTH_WA; + tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); + tg3_flag_set(tp, 5719_RDMA_BUG); + } + } + tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); if (!tg3_flag(tp, 5705_PLUS)) tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); @@ -9616,6 +9648,16 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp) TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); + if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) && + (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + + sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { + u32 val; + + val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); + val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA; + tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); + tg3_flag_clear(tp, 5719_RDMA_BUG); + } TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); @@ -12482,10 +12524,12 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, { struct tg3 *tp = netdev_priv(dev); - if (!tp->hw_stats) + spin_lock_bh(&tp->lock); + if (!tp->hw_stats) { + spin_unlock_bh(&tp->lock); return &tp->net_stats_prev; + } - spin_lock_bh(&tp->lock); tg3_get_nstats(tp, stats); spin_unlock_bh(&tp->lock); @@ -13648,6 +13692,23 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) tg3_flag_set(tp, PAUSE_AUTONEG); tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; + if (tg3_flag(tp, ENABLE_APE)) { + switch (tp->pci_fn) { + case 0: + tp->phy_ape_lock = TG3_APE_LOCK_PHY0; + break; + case 1: + tp->phy_ape_lock = TG3_APE_LOCK_PHY1; + break; + case 2: + tp->phy_ape_lock = TG3_APE_LOCK_PHY2; + break; + case 3: + tp->phy_ape_lock = TG3_APE_LOCK_PHY3; + break; + } + } + if (tg3_flag(tp, USE_PHYLIB)) return tg3_phy_init(tp); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index a1b75cd67b9..6d52cb28682 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -1376,7 +1376,11 @@ #define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910 #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000 #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000 -/* 0x4914 --> 0x4c00 unused */ +#define TG3_LSO_RD_DMA_TX_LENGTH_WA 0x02000000 +/* 0x4914 --> 0x4be0 unused */ + +#define TG3_NUM_RDMA_CHANNELS 4 +#define TG3_RDMA_LENGTH 0x00004be0 /* Write DMA control registers */ #define WDMAC_MODE 0x00004c00 @@ -2959,6 +2963,7 @@ enum TG3_FLAGS { TG3_FLAG_L1PLLPD_EN, TG3_FLAG_APE_HAS_NCSI, TG3_FLAG_4K_FIFO_LIMIT, + TG3_FLAG_5719_RDMA_BUG, TG3_FLAG_RESET_TASK_PENDING, TG3_FLAG_5705_PLUS, TG3_FLAG_IS_5788, @@ -3107,6 +3112,7 @@ struct tg3 { int old_link; u8 phy_addr; + u8 phy_ape_lock; /* PHY info */ u32 phy_id; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 8596acaa402..d49933ed551 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -528,7 +528,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, #endif while (n--) { - pg = alloc_page(gfp); + pg = __skb_alloc_page(gfp, NULL); if (unlikely(!pg)) { q->alloc_failed++; break; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index f2d1ecdcaf9..8877fbfefb6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -653,7 +653,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, alloc_small_pages: while (n--) { - page = alloc_page(gfp | __GFP_NOWARN | __GFP_COLD); + page = __skb_alloc_page(gfp | __GFP_NOWARN, NULL); if (unlikely(!page)) { fl->alloc_failed++; break; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 1050411e7ca..b7c2d505057 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6235,7 +6235,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, return true; if (!page) { - page = alloc_page(GFP_ATOMIC | __GFP_COLD); + page = __skb_alloc_page(GFP_ATOMIC, bi->skb); bi->page = page; if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c709eae58c6..4326f74f713 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1141,8 +1141,8 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, /* alloc new page for storage */ if (likely(!page)) { - page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP, - ixgbe_rx_pg_order(rx_ring)); + page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP, + bi->skb, ixgbe_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_rx_page_failed++; return false; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 3f9841d619a..60ef6458741 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -352,7 +352,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, adapter->alloc_rx_buff_failed++; goto no_buffers; } - bi->skb = skb; } if (!bi->dma) { diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index cd827ff4a02..c42bbb16cda 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -6,19 +6,21 @@ * Copyright (C) 2009 Cavium Networks */ -#include <linux/capability.h> +#include <linux/platform_device.h> #include <linux/dma-mapping.h> -#include <linux/init.h> -#include <linux/module.h> +#include <linux/etherdevice.h> +#include <linux/capability.h> #include <linux/interrupt.h> -#include <linux/platform_device.h> #include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/if.h> +#include <linux/spinlock.h> #include <linux/if_vlan.h> +#include <linux/of_mdio.h> +#include <linux/module.h> +#include <linux/of_net.h> +#include <linux/init.h> #include <linux/slab.h> #include <linux/phy.h> -#include <linux/spinlock.h> +#include <linux/io.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-mixx-defs.h> @@ -58,8 +60,56 @@ union mgmt_port_ring_entry { } s; }; +#define MIX_ORING1 0x0 +#define MIX_ORING2 0x8 +#define MIX_IRING1 0x10 +#define MIX_IRING2 0x18 +#define MIX_CTL 0x20 +#define MIX_IRHWM 0x28 +#define MIX_IRCNT 0x30 +#define MIX_ORHWM 0x38 +#define MIX_ORCNT 0x40 +#define MIX_ISR 0x48 +#define MIX_INTENA 0x50 +#define MIX_REMCNT 0x58 +#define MIX_BIST 0x78 + +#define AGL_GMX_PRT_CFG 0x10 +#define AGL_GMX_RX_FRM_CTL 0x18 +#define AGL_GMX_RX_FRM_MAX 0x30 +#define AGL_GMX_RX_JABBER 0x38 +#define AGL_GMX_RX_STATS_CTL 0x50 + +#define AGL_GMX_RX_STATS_PKTS_DRP 0xb0 +#define AGL_GMX_RX_STATS_OCTS_DRP 0xb8 +#define AGL_GMX_RX_STATS_PKTS_BAD 0xc0 + +#define AGL_GMX_RX_ADR_CTL 0x100 +#define AGL_GMX_RX_ADR_CAM_EN 0x108 +#define AGL_GMX_RX_ADR_CAM0 0x180 +#define AGL_GMX_RX_ADR_CAM1 0x188 +#define AGL_GMX_RX_ADR_CAM2 0x190 +#define AGL_GMX_RX_ADR_CAM3 0x198 +#define AGL_GMX_RX_ADR_CAM4 0x1a0 +#define AGL_GMX_RX_ADR_CAM5 0x1a8 + +#define AGL_GMX_TX_STATS_CTL 0x268 +#define AGL_GMX_TX_CTL 0x270 +#define AGL_GMX_TX_STAT0 0x280 +#define AGL_GMX_TX_STAT1 0x288 +#define AGL_GMX_TX_STAT2 0x290 +#define AGL_GMX_TX_STAT3 0x298 +#define AGL_GMX_TX_STAT4 0x2a0 +#define AGL_GMX_TX_STAT5 0x2a8 +#define AGL_GMX_TX_STAT6 0x2b0 +#define AGL_GMX_TX_STAT7 0x2b8 +#define AGL_GMX_TX_STAT8 0x2c0 +#define AGL_GMX_TX_STAT9 0x2c8 + struct octeon_mgmt { struct net_device *netdev; + u64 mix; + u64 agl; int port; int irq; u64 *tx_ring; @@ -85,31 +135,34 @@ struct octeon_mgmt { struct napi_struct napi; struct tasklet_struct tx_clean_tasklet; struct phy_device *phydev; + struct device_node *phy_np; + resource_size_t mix_phys; + resource_size_t mix_size; + resource_size_t agl_phys; + resource_size_t agl_size; }; static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) { - int port = p->port; union cvmx_mixx_intena mix_intena; unsigned long flags; spin_lock_irqsave(&p->lock, flags); - mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); + mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); mix_intena.s.ithena = enable ? 1 : 0; - cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); + cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); spin_unlock_irqrestore(&p->lock, flags); } static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) { - int port = p->port; union cvmx_mixx_intena mix_intena; unsigned long flags; spin_lock_irqsave(&p->lock, flags); - mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); + mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); mix_intena.s.othena = enable ? 1 : 0; - cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); + cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); spin_unlock_irqrestore(&p->lock, flags); } @@ -146,7 +199,6 @@ static unsigned int ring_size_to_bytes(unsigned int ring_size) static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { unsigned int size; @@ -177,24 +229,23 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; p->rx_current_fill++; /* Ring the bell. */ - cvmx_write_csr(CVMX_MIXX_IRING2(port), 1); + cvmx_write_csr(p->mix + MIX_IRING2, 1); } } static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) { - int port = p->port; union cvmx_mixx_orcnt mix_orcnt; union mgmt_port_ring_entry re; struct sk_buff *skb; int cleaned = 0; unsigned long flags; - mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); + mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); while (mix_orcnt.s.orcnt) { spin_lock_irqsave(&p->tx_list.lock, flags); - mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); + mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); if (mix_orcnt.s.orcnt == 0) { spin_unlock_irqrestore(&p->tx_list.lock, flags); @@ -214,7 +265,7 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) mix_orcnt.s.orcnt = 1; /* Acknowledge to hardware that we have the buffer. */ - cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64); + cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64); p->tx_current_fill--; spin_unlock_irqrestore(&p->tx_list.lock, flags); @@ -224,7 +275,7 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) dev_kfree_skb_any(skb); cleaned++; - mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); + mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); } if (cleaned && netif_queue_stopped(p->netdev)) @@ -241,13 +292,12 @@ static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) static void octeon_mgmt_update_rx_stats(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; unsigned long flags; u64 drop, bad; /* These reads also clear the count registers. */ - drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port)); - bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port)); + drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP); + bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD); if (drop || bad) { /* Do an atomic update. */ @@ -261,15 +311,14 @@ static void octeon_mgmt_update_rx_stats(struct net_device *netdev) static void octeon_mgmt_update_tx_stats(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; unsigned long flags; union cvmx_agl_gmx_txx_stat0 s0; union cvmx_agl_gmx_txx_stat1 s1; /* These reads also clear the count registers. */ - s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port)); - s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port)); + s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0); + s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1); if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { /* Do an atomic update. */ @@ -308,7 +357,6 @@ static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, static int octeon_mgmt_receive_one(struct octeon_mgmt *p) { - int port = p->port; struct net_device *netdev = p->netdev; union cvmx_mixx_ircnt mix_ircnt; union mgmt_port_ring_entry re; @@ -381,18 +429,17 @@ done: /* Tell the hardware we processed a packet. */ mix_ircnt.u64 = 0; mix_ircnt.s.ircnt = 1; - cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64); + cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64); return rc; } static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) { - int port = p->port; unsigned int work_done = 0; union cvmx_mixx_ircnt mix_ircnt; int rc; - mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); + mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); while (work_done < budget && mix_ircnt.s.ircnt) { rc = octeon_mgmt_receive_one(p); @@ -400,7 +447,7 @@ static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) work_done++; /* Check for more packets. */ - mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); + mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); } octeon_mgmt_rx_fill_ring(p->netdev); @@ -434,16 +481,16 @@ static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) union cvmx_agl_gmx_bist agl_gmx_bist; mix_ctl.u64 = 0; - cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); do { - mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port)); + mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); } while (mix_ctl.s.busy); mix_ctl.s.reset = 1; - cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); - cvmx_read_csr(CVMX_MIXX_CTL(p->port)); + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); + cvmx_read_csr(p->mix + MIX_CTL); cvmx_wait(64); - mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port)); + mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); if (mix_bist.u64) dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", (unsigned long long)mix_bist.u64); @@ -474,7 +521,6 @@ static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; unsigned long flags; @@ -520,29 +566,29 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) spin_lock_irqsave(&p->lock, flags); /* Disable packet I/O. */ - agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); prev_packet_enable = agl_gmx_prtx.s.en; agl_gmx_prtx.s.en = 0; - cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); adr_ctl.u64 = 0; adr_ctl.s.cam_mode = cam_mode; adr_ctl.s.mcst = multicast_mode; adr_ctl.s.bcst = 1; /* Allow broadcast */ - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64); - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]); - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]); - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]); - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]); - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]); - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]); - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask); /* Restore packet I/O. */ agl_gmx_prtx.s.en = prev_packet_enable; - cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); spin_unlock_irqrestore(&p->lock, flags); } @@ -564,7 +610,6 @@ static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) { struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; /* @@ -580,8 +625,8 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu = new_mtu; - cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs); - cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); + cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, (size_without_fcs + 7) & 0xfff8); return 0; @@ -591,14 +636,13 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) { struct net_device *netdev = dev_id; struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; union cvmx_mixx_isr mixx_isr; - mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port)); + mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR); /* Clear any pending interrupts */ - cvmx_write_csr(CVMX_MIXX_ISR(port), mixx_isr.u64); - cvmx_read_csr(CVMX_MIXX_ISR(port)); + cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64); + cvmx_read_csr(p->mix + MIX_ISR); if (mixx_isr.s.irthresh) { octeon_mgmt_disable_rx_irq(p); @@ -629,7 +673,6 @@ static int octeon_mgmt_ioctl(struct net_device *netdev, static void octeon_mgmt_adjust_link(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; union cvmx_agl_gmx_prtx_cfg prtx_cfg; unsigned long flags; int link_changed = 0; @@ -640,11 +683,9 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev) link_changed = 1; if (p->last_duplex != p->phydev->duplex) { p->last_duplex = p->phydev->duplex; - prtx_cfg.u64 = - cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); prtx_cfg.s.duplex = p->phydev->duplex; - cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), - prtx_cfg.u64); + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); } } else { if (p->last_link) @@ -670,18 +711,16 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev) static int octeon_mgmt_init_phy(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - char phy_id[MII_BUS_ID_SIZE + 3]; - if (octeon_is_simulation()) { + if (octeon_is_simulation() || p->phy_np == NULL) { /* No PHYs in the simulator. */ netif_carrier_on(netdev); return 0; } - snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "mdio-octeon-0", p->port); - - p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0, - PHY_INTERFACE_MODE_MII); + p->phydev = of_phy_connect(netdev, p->phy_np, + octeon_mgmt_adjust_link, 0, + PHY_INTERFACE_MODE_MII); if (IS_ERR(p->phydev)) { p->phydev = NULL; @@ -737,14 +776,14 @@ static int octeon_mgmt_open(struct net_device *netdev) octeon_mgmt_reset_hw(p); - mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); + mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); /* Bring it out of reset if needed. */ if (mix_ctl.s.reset) { mix_ctl.s.reset = 0; - cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); do { - mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); + mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); } while (mix_ctl.s.reset); } @@ -755,17 +794,17 @@ static int octeon_mgmt_open(struct net_device *netdev) oring1.u64 = 0; oring1.s.obase = p->tx_ring_handle >> 3; oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; - cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64); + cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64); iring1.u64 = 0; iring1.s.ibase = p->rx_ring_handle >> 3; iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; - cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64); + cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); /* Disable packet I/O. */ - prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); prtx_cfg.s.en = 0; - cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); octeon_mgmt_set_mac_address(netdev, &sa); @@ -782,7 +821,7 @@ static int octeon_mgmt_open(struct net_device *netdev) mix_ctl.s.nbtarb = 0; /* Arbitration mode */ /* MII CB-request FIFO programmable high watermark */ mix_ctl.s.mrq_hwm = 1; - cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { @@ -809,16 +848,16 @@ static int octeon_mgmt_open(struct net_device *netdev) /* Clear statistics. */ /* Clear on read. */ - cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1); - cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0); - cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0); + cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1); + cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0); + cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0); - cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1); - cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0); - cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0); + cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1); + cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0); + cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0); /* Clear any pending interrupts */ - cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port))); + cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR)); if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, netdev)) { @@ -829,18 +868,18 @@ static int octeon_mgmt_open(struct net_device *netdev) /* Interrupt every single RX packet */ mix_irhwm.u64 = 0; mix_irhwm.s.irhwm = 0; - cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64); + cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64); /* Interrupt when we have 1 or more packets to clean. */ mix_orhwm.u64 = 0; mix_orhwm.s.orhwm = 1; - cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64); + cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); /* Enable receive and transmit interrupts */ mix_intena.u64 = 0; mix_intena.s.ithena = 1; mix_intena.s.othena = 1; - cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); + cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); /* Enable packet I/O. */ @@ -871,7 +910,7 @@ static int octeon_mgmt_open(struct net_device *netdev) * frame. GMX checks that the PREAMBLE is sent correctly. */ rxx_frm_ctl.s.pre_chk = 1; - cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64); + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); /* Enable the AGL block */ agl_gmx_inf_mode.u64 = 0; @@ -879,13 +918,13 @@ static int octeon_mgmt_open(struct net_device *netdev) cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); /* Configure the port duplex and enables */ - prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); prtx_cfg.s.tx_en = 1; prtx_cfg.s.rx_en = 1; prtx_cfg.s.en = 1; p->last_duplex = 1; prtx_cfg.s.duplex = p->last_duplex; - cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); p->last_link = 0; netif_carrier_off(netdev); @@ -949,7 +988,6 @@ static int octeon_mgmt_stop(struct net_device *netdev) static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; union mgmt_port_ring_entry re; unsigned long flags; int rv = NETDEV_TX_BUSY; @@ -993,7 +1031,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) netdev->stats.tx_bytes += skb->len; /* Ring the bell. */ - cvmx_write_csr(CVMX_MIXX_ORING2(port), 1); + cvmx_write_csr(p->mix + MIX_ORING2, 1); rv = NETDEV_TX_OK; out: @@ -1071,10 +1109,14 @@ static const struct net_device_ops octeon_mgmt_ops = { static int __devinit octeon_mgmt_probe(struct platform_device *pdev) { - struct resource *res_irq; struct net_device *netdev; struct octeon_mgmt *p; - int i; + const __be32 *data; + const u8 *mac; + struct resource *res_mix; + struct resource *res_agl; + int len; + int result; netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); if (netdev == NULL) @@ -1088,14 +1130,63 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev) p->netdev = netdev; p->dev = &pdev->dev; - p->port = pdev->id; + data = of_get_property(pdev->dev.of_node, "cell-index", &len); + if (data && len == sizeof(*data)) { + p->port = be32_to_cpup(data); + } else { + dev_err(&pdev->dev, "no 'cell-index' property\n"); + result = -ENXIO; + goto err; + } + snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); - res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res_irq) + result = platform_get_irq(pdev, 0); + if (result < 0) + goto err; + + p->irq = result; + + res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res_mix == NULL) { + dev_err(&pdev->dev, "no 'reg' resource\n"); + result = -ENXIO; + goto err; + } + + res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res_agl == NULL) { + dev_err(&pdev->dev, "no 'reg' resource\n"); + result = -ENXIO; + goto err; + } + + p->mix_phys = res_mix->start; + p->mix_size = resource_size(res_mix); + p->agl_phys = res_agl->start; + p->agl_size = resource_size(res_agl); + + + if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, + res_mix->name)) { + dev_err(&pdev->dev, "request_mem_region (%s) failed\n", + res_mix->name); + result = -ENXIO; + goto err; + } + + if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size, + res_agl->name)) { + result = -ENXIO; + dev_err(&pdev->dev, "request_mem_region (%s) failed\n", + res_agl->name); goto err; + } + + + p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); + p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); - p->irq = res_irq->start; spin_lock_init(&p->lock); skb_queue_head_init(&p->tx_list); @@ -1108,24 +1199,26 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev) netdev->netdev_ops = &octeon_mgmt_ops; netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; - /* The mgmt ports get the first N MACs. */ - for (i = 0; i < 6; i++) - netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i]; - netdev->dev_addr[5] += p->port; + mac = of_get_mac_address(pdev->dev.of_node); + + if (mac) + memcpy(netdev->dev_addr, mac, 6); - if (p->port >= octeon_bootinfo->mac_addr_count) - dev_err(&pdev->dev, - "Error %s: Using MAC outside of the assigned range: %pM\n", - netdev->name, netdev->dev_addr); + p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); - if (register_netdev(netdev)) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64); + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + + result = register_netdev(netdev); + if (result) goto err; dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); return 0; + err: free_netdev(netdev); - return -ENOENT; + return result; } static int __devexit octeon_mgmt_remove(struct platform_device *pdev) @@ -1137,10 +1230,19 @@ static int __devexit octeon_mgmt_remove(struct platform_device *pdev) return 0; } +static struct of_device_id octeon_mgmt_match[] = { + { + .compatible = "cavium,octeon-5750-mix", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, octeon_mgmt_match); + static struct platform_driver octeon_mgmt_driver = { .driver = { .name = "octeon_mgmt", .owner = THIS_MODULE, + .of_match_table = octeon_mgmt_match, }, .probe = octeon_mgmt_probe, .remove = __devexit_p(octeon_mgmt_remove), diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 3769f5711cc..b53a3b60b64 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -4682,6 +4682,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev, NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM; ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + ndev->vlan_features = ndev->hw_features; if (test_bit(QL_DMA64, &qdev->flags)) ndev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/seeq/seeq8005.c b/drivers/net/ethernet/seeq/seeq8005.c index 698edbbfc14..d6e50de7118 100644 --- a/drivers/net/ethernet/seeq/seeq8005.c +++ b/drivers/net/ethernet/seeq/seeq8005.c @@ -736,9 +736,7 @@ MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number"); int __init init_module(void) { dev_seeq = seeq8005_probe(-1); - if (IS_ERR(dev_seeq)) - return PTR_ERR(dev_seeq); - return 0; + return PTR_RET(dev_seeq); } void __exit cleanup_module(void) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index ab4c376cb27..f2d3665430a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -82,9 +82,7 @@ struct stmmac_priv { struct stmmac_counters mmc; struct dma_features dma_cap; int hw_cap_support; -#ifdef CONFIG_HAVE_CLK struct clk *stmmac_clk; -#endif int clk_csr; int synopsys_id; struct timer_list eee_ctrl_timer; @@ -113,46 +111,6 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, void stmmac_disable_eee_mode(struct stmmac_priv *priv); bool stmmac_eee_init(struct stmmac_priv *priv); -#ifdef CONFIG_HAVE_CLK -static inline int stmmac_clk_enable(struct stmmac_priv *priv) -{ - if (!IS_ERR(priv->stmmac_clk)) - return clk_prepare_enable(priv->stmmac_clk); - - return 0; -} - -static inline void stmmac_clk_disable(struct stmmac_priv *priv) -{ - if (IS_ERR(priv->stmmac_clk)) - return; - - clk_disable_unprepare(priv->stmmac_clk); -} -static inline int stmmac_clk_get(struct stmmac_priv *priv) -{ - priv->stmmac_clk = clk_get(priv->device, NULL); - - if (IS_ERR(priv->stmmac_clk)) - return PTR_ERR(priv->stmmac_clk); - - return 0; -} -#else -static inline int stmmac_clk_enable(struct stmmac_priv *priv) -{ - return 0; -} -static inline void stmmac_clk_disable(struct stmmac_priv *priv) -{ -} -static inline int stmmac_clk_get(struct stmmac_priv *priv) -{ - return 0; -} -#endif /* CONFIG_HAVE_CLK */ - - #ifdef CONFIG_STMMAC_PLATFORM extern struct platform_driver stmmac_pltfr_driver; static inline int stmmac_register_platform(void) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f6b04c1a367..fd8882f9602 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -28,6 +28,7 @@ https://bugzilla.stlinux.com/ *******************************************************************************/ +#include <linux/clk.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/ip.h> @@ -173,12 +174,8 @@ static void stmmac_verify_args(void) static void stmmac_clk_csr_set(struct stmmac_priv *priv) { -#ifdef CONFIG_HAVE_CLK u32 clk_rate; - if (IS_ERR(priv->stmmac_clk)) - return; - clk_rate = clk_get_rate(priv->stmmac_clk); /* Platform provided default clk_csr would be assumed valid @@ -200,7 +197,6 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) * we can not estimate the proper divider as it is not known * the frequency of clk_csr_i. So we do not change the default * divider. */ -#endif } #if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG) @@ -1070,7 +1066,7 @@ static int stmmac_open(struct net_device *dev) } else priv->tm->enable = 1; #endif - stmmac_clk_enable(priv); + clk_enable(priv->stmmac_clk); stmmac_check_ether_addr(priv); @@ -1192,7 +1188,7 @@ open_error: if (priv->phydev) phy_disconnect(priv->phydev); - stmmac_clk_disable(priv); + clk_disable(priv->stmmac_clk); return ret; } @@ -1250,7 +1246,7 @@ static int stmmac_release(struct net_device *dev) #ifdef CONFIG_STMMAC_DEBUG_FS stmmac_exit_fs(); #endif - stmmac_clk_disable(priv); + clk_disable(priv->stmmac_clk); return 0; } @@ -2078,11 +2074,14 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, ret = register_netdev(ndev); if (ret) { pr_err("%s: ERROR %i registering the device\n", __func__, ret); - goto error; + goto error_netdev_register; } - if (stmmac_clk_get(priv)) + priv->stmmac_clk = clk_get(priv->device, NULL); + if (IS_ERR(priv->stmmac_clk)) { pr_warning("%s: warning: cannot get CSR clock\n", __func__); + goto error_clk_get; + } /* If a specific clk_csr value is passed from the platform * this means that the CSR Clock Range selection cannot be @@ -2100,15 +2099,17 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, if (ret < 0) { pr_debug("%s: MDIO bus (id: %d) registration failed", __func__, priv->plat->bus_id); - goto error; + goto error_mdio_register; } return priv; -error: - netif_napi_del(&priv->napi); - +error_mdio_register: + clk_put(priv->stmmac_clk); +error_clk_get: unregister_netdev(ndev); +error_netdev_register: + netif_napi_del(&priv->napi); free_netdev(ndev); return NULL; @@ -2177,7 +2178,7 @@ int stmmac_suspend(struct net_device *ndev) else { stmmac_set_mac(priv->ioaddr, false); /* Disable clock in case of PWM is off */ - stmmac_clk_disable(priv); + clk_disable(priv->stmmac_clk); } spin_unlock_irqrestore(&priv->lock, flags); return 0; @@ -2202,7 +2203,7 @@ int stmmac_resume(struct net_device *ndev) priv->hw->mac->pmt(priv->ioaddr, 0); else /* enable the clk prevously disabled */ - stmmac_clk_enable(priv); + clk_enable(priv->stmmac_clk); netif_device_attach(ndev); |