diff options
Diffstat (limited to 'drivers/net/can')
-rw-r--r-- | drivers/net/can/Kconfig | 32 | ||||
-rw-r--r-- | drivers/net/can/Makefile | 3 | ||||
-rw-r--r-- | drivers/net/can/at91_can.c | 34 | ||||
-rw-r--r-- | drivers/net/can/dev.c | 76 | ||||
-rw-r--r-- | drivers/net/can/mcp251x.c | 1164 | ||||
-rw-r--r-- | drivers/net/can/mscan/Makefile | 5 | ||||
-rw-r--r-- | drivers/net/can/mscan/mpc52xx_can.c | 279 | ||||
-rw-r--r-- | drivers/net/can/mscan/mscan.c | 699 | ||||
-rw-r--r-- | drivers/net/can/mscan/mscan.h | 262 | ||||
-rw-r--r-- | drivers/net/can/sja1000/sja1000.c | 17 | ||||
-rw-r--r-- | drivers/net/can/sja1000/sja1000.h | 2 | ||||
-rw-r--r-- | drivers/net/can/ti_hecc.c | 993 | ||||
-rw-r--r-- | drivers/net/can/usb/ems_usb.c | 20 |
13 files changed, 3510 insertions, 76 deletions
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 772f6d2489c..732b093e081 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -41,6 +41,38 @@ config CAN_AT91 ---help--- This is a driver for the SoC CAN controller in Atmel's AT91SAM9263. +config CAN_TI_HECC + depends on CAN_DEV && ARCH_OMAP3 + tristate "TI High End CAN Controller" + ---help--- + Driver for TI HECC (High End CAN Controller) module found on many + TI devices. The device specifications are available from www.ti.com + +config CAN_MCP251X + tristate "Microchip MCP251x SPI CAN controllers" + depends on CAN_DEV && SPI + ---help--- + Driver for the Microchip MCP251x SPI CAN controllers. + +config CAN_MSCAN + depends on CAN_DEV && (PPC || M68K || M68KNOMMU) + tristate "Support for Freescale MSCAN based chips" + ---help--- + The Motorola Scalable Controller Area Network (MSCAN) definition + is based on the MSCAN12 definition which is the specific + implementation of the Motorola Scalable CAN concept targeted for + the Motorola MC68HC12 Microcontroller Family. + +config CAN_MPC52XX + tristate "Freescale MPC5xxx onboard CAN controller" + depends on CAN_MSCAN && PPC_MPC52xx + ---help--- + If you say yes here you get support for Freescale's MPC52xx + onboard dualCAN controller. + + This driver can also be built as a module. If so, the module + will be called mpc5xxx_can. + source "drivers/net/can/sja1000/Kconfig" source "drivers/net/can/usb/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 0dea62721f2..56899fef1c6 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -10,6 +10,9 @@ can-dev-y := dev.o obj-y += usb/ obj-$(CONFIG_CAN_SJA1000) += sja1000/ +obj-$(CONFIG_CAN_MSCAN) += mscan/ obj-$(CONFIG_CAN_AT91) += at91_can.o +obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o +obj-$(CONFIG_CAN_MCP251X) += mcp251x.o ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index f67ae285a35..cbe3fce53e3 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -221,38 +221,6 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb, set_mb_mode_prio(priv, mb, mode, 0); } -static struct sk_buff *alloc_can_skb(struct net_device *dev, - struct can_frame **cf) -{ - struct sk_buff *skb; - - skb = netdev_alloc_skb(dev, sizeof(struct can_frame)); - if (unlikely(!skb)) - return NULL; - - skb->protocol = htons(ETH_P_CAN); - skb->ip_summed = CHECKSUM_UNNECESSARY; - *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame)); - - return skb; -} - -static struct sk_buff *alloc_can_err_skb(struct net_device *dev, - struct can_frame **cf) -{ - struct sk_buff *skb; - - skb = alloc_can_skb(dev, cf); - if (unlikely(!skb)) - return NULL; - - memset(*cf, 0, sizeof(struct can_frame)); - (*cf)->can_id = CAN_ERR_FLAG; - (*cf)->can_dlc = CAN_ERR_DLC; - - return skb; -} - /* * Swtich transceiver on or off */ @@ -1087,7 +1055,7 @@ static int __init at91_can_probe(struct platform_device *pdev) goto exit_release; } - dev = alloc_candev(sizeof(struct at91_priv)); + dev = alloc_candev(sizeof(struct at91_priv), AT91_MB_TX_NUM); if (!dev) { err = -ENOMEM; goto exit_iounmap; diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 2868fe842a4..c1bb29f0322 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -245,7 +245,7 @@ static void can_flush_echo_skb(struct net_device *dev) struct net_device_stats *stats = &dev->stats; int i; - for (i = 0; i < CAN_ECHO_SKB_MAX; i++) { + for (i = 0; i < priv->echo_skb_max; i++) { if (priv->echo_skb[i]) { kfree_skb(priv->echo_skb[i]); priv->echo_skb[i] = NULL; @@ -262,10 +262,13 @@ static void can_flush_echo_skb(struct net_device *dev) * of the device driver. The driver must protect access to * priv->echo_skb, if necessary. */ -void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, int idx) +void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, + unsigned int idx) { struct can_priv *priv = netdev_priv(dev); + BUG_ON(idx >= priv->echo_skb_max); + /* check flag whether this packet has to be looped back */ if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) { kfree_skb(skb); @@ -311,10 +314,12 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb); * is handled in the device driver. The driver must protect * access to priv->echo_skb, if necessary. */ -void can_get_echo_skb(struct net_device *dev, int idx) +void can_get_echo_skb(struct net_device *dev, unsigned int idx) { struct can_priv *priv = netdev_priv(dev); + BUG_ON(idx >= priv->echo_skb_max); + if (priv->echo_skb[idx]) { netif_rx(priv->echo_skb[idx]); priv->echo_skb[idx] = NULL; @@ -327,10 +332,12 @@ EXPORT_SYMBOL_GPL(can_get_echo_skb); * * The function is typically called when TX failed. */ -void can_free_echo_skb(struct net_device *dev, int idx) +void can_free_echo_skb(struct net_device *dev, unsigned int idx) { struct can_priv *priv = netdev_priv(dev); + BUG_ON(idx >= priv->echo_skb_max); + if (priv->echo_skb[idx]) { kfree_skb(priv->echo_skb[idx]); priv->echo_skb[idx] = NULL; @@ -359,17 +366,12 @@ void can_restart(unsigned long data) can_flush_echo_skb(dev); /* send restart message upstream */ - skb = dev_alloc_skb(sizeof(struct can_frame)); + skb = alloc_can_err_skb(dev, &cf); if (skb == NULL) { err = -ENOMEM; goto restart; } - skb->dev = dev; - skb->protocol = htons(ETH_P_CAN); - cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame)); - memset(cf, 0, sizeof(struct can_frame)); - cf->can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED; - cf->can_dlc = CAN_ERR_DLC; + cf->can_id |= CAN_ERR_RESTARTED; netif_rx(skb); @@ -442,20 +444,66 @@ static void can_setup(struct net_device *dev) dev->features = NETIF_F_NO_CSUM; } +struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) +{ + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, sizeof(struct can_frame)); + if (unlikely(!skb)) + return NULL; + + skb->protocol = htons(ETH_P_CAN); + skb->pkt_type = PACKET_BROADCAST; + skb->ip_summed = CHECKSUM_UNNECESSARY; + *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame)); + memset(*cf, 0, sizeof(struct can_frame)); + + return skb; +} +EXPORT_SYMBOL_GPL(alloc_can_skb); + +struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf) +{ + struct sk_buff *skb; + + skb = alloc_can_skb(dev, cf); + if (unlikely(!skb)) + return NULL; + + (*cf)->can_id = CAN_ERR_FLAG; + (*cf)->can_dlc = CAN_ERR_DLC; + + return skb; +} +EXPORT_SYMBOL_GPL(alloc_can_err_skb); + /* * Allocate and setup space for the CAN network device */ -struct net_device *alloc_candev(int sizeof_priv) +struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) { struct net_device *dev; struct can_priv *priv; + int size; - dev = alloc_netdev(sizeof_priv, "can%d", can_setup); + if (echo_skb_max) + size = ALIGN(sizeof_priv, sizeof(struct sk_buff *)) + + echo_skb_max * sizeof(struct sk_buff *); + else + size = sizeof_priv; + + dev = alloc_netdev(size, "can%d", can_setup); if (!dev) return NULL; priv = netdev_priv(dev); + if (echo_skb_max) { + priv->echo_skb_max = echo_skb_max; + priv->echo_skb = (void *)priv + + ALIGN(sizeof_priv, sizeof(struct sk_buff *)); + } + priv->state = CAN_STATE_STOPPED; init_timer(&priv->restart_timer); @@ -647,7 +695,7 @@ nla_put_failure: return -EMSGSIZE; } -static int can_newlink(struct net_device *dev, +static int can_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { return -EOPNOTSUPP; diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c new file mode 100644 index 00000000000..8f48f4b50b7 --- /dev/null +++ b/drivers/net/can/mcp251x.c @@ -0,0 +1,1164 @@ +/* + * CAN bus driver for Microchip 251x CAN Controller with SPI Interface + * + * MCP2510 support and bug fixes by Christian Pellegrin + * <chripell@evolware.org> + * + * Copyright 2009 Christian Pellegrin EVOL S.r.l. + * + * Copyright 2007 Raymarine UK, Ltd. All Rights Reserved. + * Written under contract by: + * Chris Elston, Katalix Systems, Ltd. + * + * Based on Microchip MCP251x CAN controller driver written by + * David Vrabel, Copyright 2006 Arcom Control Systems Ltd. + * + * Based on CAN bus driver for the CCAN controller written by + * - Sascha Hauer, Marc Kleine-Budde, Pengutronix + * - Simon Kallweit, intefo AG + * Copyright 2007 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * + * + * Your platform definition file should specify something like: + * + * static struct mcp251x_platform_data mcp251x_info = { + * .oscillator_frequency = 8000000, + * .board_specific_setup = &mcp251x_setup, + * .model = CAN_MCP251X_MCP2510, + * .power_enable = mcp251x_power_enable, + * .transceiver_enable = NULL, + * }; + * + * static struct spi_board_info spi_board_info[] = { + * { + * .modalias = "mcp251x", + * .platform_data = &mcp251x_info, + * .irq = IRQ_EINT13, + * .max_speed_hz = 2*1000*1000, + * .chip_select = 2, + * }, + * }; + * + * Please see mcp251x.h for a description of the fields in + * struct mcp251x_platform_data. + * + */ + +#include <linux/can.h> +#include <linux/can/core.h> +#include <linux/can/dev.h> +#include <linux/can/platform/mcp251x.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/freezer.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/uaccess.h> + +/* SPI interface instruction set */ +#define INSTRUCTION_WRITE 0x02 +#define INSTRUCTION_READ 0x03 +#define INSTRUCTION_BIT_MODIFY 0x05 +#define INSTRUCTION_LOAD_TXB(n) (0x40 + 2 * (n)) +#define INSTRUCTION_READ_RXB(n) (((n) == 0) ? 0x90 : 0x94) +#define INSTRUCTION_RESET 0xC0 + +/* MPC251x registers */ +#define CANSTAT 0x0e +#define CANCTRL 0x0f +# define CANCTRL_REQOP_MASK 0xe0 +# define CANCTRL_REQOP_CONF 0x80 +# define CANCTRL_REQOP_LISTEN_ONLY 0x60 +# define CANCTRL_REQOP_LOOPBACK 0x40 +# define CANCTRL_REQOP_SLEEP 0x20 +# define CANCTRL_REQOP_NORMAL 0x00 +# define CANCTRL_OSM 0x08 +# define CANCTRL_ABAT 0x10 +#define TEC 0x1c +#define REC 0x1d +#define CNF1 0x2a +# define CNF1_SJW_SHIFT 6 +#define CNF2 0x29 +# define CNF2_BTLMODE 0x80 +# define CNF2_SAM 0x40 +# define CNF2_PS1_SHIFT 3 +#define CNF3 0x28 +# define CNF3_SOF 0x08 +# define CNF3_WAKFIL 0x04 +# define CNF3_PHSEG2_MASK 0x07 +#define CANINTE 0x2b +# define CANINTE_MERRE 0x80 +# define CANINTE_WAKIE 0x40 +# define CANINTE_ERRIE 0x20 +# define CANINTE_TX2IE 0x10 +# define CANINTE_TX1IE 0x08 +# define CANINTE_TX0IE 0x04 +# define CANINTE_RX1IE 0x02 +# define CANINTE_RX0IE 0x01 +#define CANINTF 0x2c +# define CANINTF_MERRF 0x80 +# define CANINTF_WAKIF 0x40 +# define CANINTF_ERRIF 0x20 +# define CANINTF_TX2IF 0x10 +# define CANINTF_TX1IF 0x08 +# define CANINTF_TX0IF 0x04 +# define CANINTF_RX1IF 0x02 +# define CANINTF_RX0IF 0x01 +#define EFLG 0x2d +# define EFLG_EWARN 0x01 +# define EFLG_RXWAR 0x02 +# define EFLG_TXWAR 0x04 +# define EFLG_RXEP 0x08 +# define EFLG_TXEP 0x10 +# define EFLG_TXBO 0x20 +# define EFLG_RX0OVR 0x40 +# define EFLG_RX1OVR 0x80 +#define TXBCTRL(n) (((n) * 0x10) + 0x30 + TXBCTRL_OFF) +# define TXBCTRL_ABTF 0x40 +# define TXBCTRL_MLOA 0x20 +# define TXBCTRL_TXERR 0x10 +# define TXBCTRL_TXREQ 0x08 +#define TXBSIDH(n) (((n) * 0x10) + 0x30 + TXBSIDH_OFF) +# define SIDH_SHIFT 3 +#define TXBSIDL(n) (((n) * 0x10) + 0x30 + TXBSIDL_OFF) +# define SIDL_SID_MASK 7 +# define SIDL_SID_SHIFT 5 +# define SIDL_EXIDE_SHIFT 3 +# define SIDL_EID_SHIFT 16 +# define SIDL_EID_MASK 3 +#define TXBEID8(n) (((n) * 0x10) + 0x30 + TXBEID8_OFF) +#define TXBEID0(n) (((n) * 0x10) + 0x30 + TXBEID0_OFF) +#define TXBDLC(n) (((n) * 0x10) + 0x30 + TXBDLC_OFF) +# define DLC_RTR_SHIFT 6 +#define TXBCTRL_OFF 0 +#define TXBSIDH_OFF 1 +#define TXBSIDL_OFF 2 +#define TXBEID8_OFF 3 +#define TXBEID0_OFF 4 +#define TXBDLC_OFF 5 +#define TXBDAT_OFF 6 +#define RXBCTRL(n) (((n) * 0x10) + 0x60 + RXBCTRL_OFF) +# define RXBCTRL_BUKT 0x04 +# define RXBCTRL_RXM0 0x20 +# define RXBCTRL_RXM1 0x40 +#define RXBSIDH(n) (((n) * 0x10) + 0x60 + RXBSIDH_OFF) +# define RXBSIDH_SHIFT 3 +#define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF) +# define RXBSIDL_IDE 0x08 +# define RXBSIDL_EID 3 +# define RXBSIDL_SHIFT 5 +#define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF) +#define RXBEID0(n) (((n) * 0x10) + 0x60 + RXBEID0_OFF) +#define RXBDLC(n) (((n) * 0x10) + 0x60 + RXBDLC_OFF) +# define RXBDLC_LEN_MASK 0x0f +# define RXBDLC_RTR 0x40 +#define RXBCTRL_OFF 0 +#define RXBSIDH_OFF 1 +#define RXBSIDL_OFF 2 +#define RXBEID8_OFF 3 +#define RXBEID0_OFF 4 +#define RXBDLC_OFF 5 +#define RXBDAT_OFF 6 + +#define GET_BYTE(val, byte) \ + (((val) >> ((byte) * 8)) & 0xff) +#define SET_BYTE(val, byte) \ + (((val) & 0xff) << ((byte) * 8)) + +/* + * Buffer size required for the largest SPI transfer (i.e., reading a + * frame) + */ +#define CAN_FRAME_MAX_DATA_LEN 8 +#define SPI_TRANSFER_BUF_LEN (6 + CAN_FRAME_MAX_DATA_LEN) +#define CAN_FRAME_MAX_BITS 128 + +#define TX_ECHO_SKB_MAX 1 + +#define DEVICE_NAME "mcp251x" + +static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */ +module_param(mcp251x_enable_dma, int, S_IRUGO); +MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)"); + +static struct can_bittiming_const mcp251x_bittiming_const = { + .name = DEVICE_NAME, + .tseg1_min = 3, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 64, + .brp_inc = 1, +}; + +struct mcp251x_priv { + struct can_priv can; + struct net_device *net; + struct spi_device *spi; + + struct mutex spi_lock; /* SPI buffer lock */ + u8 *spi_tx_buf; + u8 *spi_rx_buf; + dma_addr_t spi_tx_dma; + dma_addr_t spi_rx_dma; + + struct sk_buff *tx_skb; + int tx_len; + struct workqueue_struct *wq; + struct work_struct tx_work; + struct work_struct irq_work; + struct completion awake; + int wake; + int force_quit; + int after_suspend; +#define AFTER_SUSPEND_UP 1 +#define AFTER_SUSPEND_DOWN 2 +#define AFTER_SUSPEND_POWER 4 +#define AFTER_SUSPEND_RESTART 8 + int restart_tx; +}; + +static void mcp251x_clean(struct net_device *net) +{ + struct mcp251x_priv *priv = netdev_priv(net); + + net->stats.tx_errors++; + if (priv->tx_skb) + dev_kfree_skb(priv->tx_skb); + if (priv->tx_len) + can_free_echo_skb(priv->net, 0); + priv->tx_skb = NULL; + priv->tx_len = 0; +} + +/* + * Note about handling of error return of mcp251x_spi_trans: accessing + * registers via SPI is not really different conceptually than using + * normal I/O assembler instructions, although it's much more + * complicated from a practical POV. So it's not advisable to always + * check the return value of this function. Imagine that every + * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0) + * error();", it would be a great mess (well there are some situation + * when exception handling C++ like could be useful after all). So we + * just check that transfers are OK at the beginning of our + * conversation with the chip and to avoid doing really nasty things + * (like injecting bogus packets in the network stack). + */ +static int mcp251x_spi_trans(struct spi_device *spi, int len) +{ + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct spi_transfer t = { + .tx_buf = priv->spi_tx_buf, + .rx_buf = priv->spi_rx_buf, + .len = len, + .cs_change = 0, + }; + struct spi_message m; + int ret; + + spi_message_init(&m); + + if (mcp251x_enable_dma) { + t.tx_dma = priv->spi_tx_dma; + t.rx_dma = priv->spi_rx_dma; + m.is_dma_mapped = 1; + } + + spi_message_add_tail(&t, &m); + + ret = spi_sync(spi, &m); + if (ret) + dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret); + return ret; +} + +static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg) +{ + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + u8 val = 0; + + mutex_lock(&priv->spi_lock); + + priv->spi_tx_buf[0] = INSTRUCTION_READ; + priv->spi_tx_buf[1] = reg; + + mcp251x_spi_trans(spi, 3); + val = priv->spi_rx_buf[2]; + + mutex_unlock(&priv->spi_lock); + + return val; +} + +static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val) +{ + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + + mutex_lock(&priv->spi_lock); + + priv->spi_tx_buf[0] = INSTRUCTION_WRITE; + priv->spi_tx_buf[1] = reg; + priv->spi_tx_buf[2] = val; + + mcp251x_spi_trans(spi, 3); + + mutex_unlock(&priv->spi_lock); +} + +static void mcp251x_write_bits(struct spi_device *spi, u8 reg, + u8 mask, uint8_t val) +{ + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + + mutex_lock(&priv->spi_lock); + + priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY; + priv->spi_tx_buf[1] = reg; + priv->spi_tx_buf[2] = mask; + priv->spi_tx_buf[3] = val; + + mcp251x_spi_trans(spi, 4); + + mutex_unlock(&priv->spi_lock); +} + +static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, + int len, int tx_buf_idx) +{ + struct mcp251x_platform_data *pdata = spi->dev.platform_data; + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + + if (pdata->model == CAN_MCP251X_MCP2510) { + int i; + + for (i = 1; i < TXBDAT_OFF + len; i++) + mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i, + buf[i]); + } else { + mutex_lock(&priv->spi_lock); + memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len); + mcp251x_spi_trans(spi, TXBDAT_OFF + len); + mutex_unlock(&priv->spi_lock); + } +} + +static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, + int tx_buf_idx) +{ + u32 sid, eid, exide, rtr; + u8 buf[SPI_TRANSFER_BUF_LEN]; + + exide = (frame->can_id & CAN_EFF_FLAG) ? 1 : 0; /* Extended ID Enable */ + if (exide) + sid = (frame->can_id & CAN_EFF_MASK) >> 18; + else + sid = frame->can_id & CAN_SFF_MASK; /* Standard ID */ + eid = frame->can_id & CAN_EFF_MASK; /* Extended ID */ + rtr = (frame->can_id & CAN_RTR_FLAG) ? 1 : 0; /* Remote transmission */ + + buf[TXBCTRL_OFF] = INSTRUCTION_LOAD_TXB(tx_buf_idx); + buf[TXBSIDH_OFF] = sid >> SIDH_SHIFT; + buf[TXBSIDL_OFF] = ((sid & SIDL_SID_MASK) << SIDL_SID_SHIFT) | + (exide << SIDL_EXIDE_SHIFT) | + ((eid >> SIDL_EID_SHIFT) & SIDL_EID_MASK); + buf[TXBEID8_OFF] = GET_BYTE(eid, 1); + buf[TXBEID0_OFF] = GET_BYTE(eid, 0); + buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc; + memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc); + mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx); + mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ); +} + +static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, + int buf_idx) +{ + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct mcp251x_platform_data *pdata = spi->dev.platform_data; + + if (pdata->model == CAN_MCP251X_MCP2510) { + int i, len; + + for (i = 1; i < RXBDAT_OFF; i++) + buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i); + len = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK; + if (len > 8) + len = 8; + for (; i < (RXBDAT_OFF + len); i++) + buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i); + } else { + mutex_lock(&priv->spi_lock); + + priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx); + mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN); + memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN); + + mutex_unlock(&priv->spi_lock); + } +} + +static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx) +{ + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct sk_buff *skb; + struct can_frame *frame; + u8 buf[SPI_TRANSFER_BUF_LEN]; + + skb = alloc_can_skb(priv->net, &frame); + if (!skb) { + dev_err(&spi->dev, "cannot allocate RX skb\n"); + priv->net->stats.rx_dropped++; + return; + } + + mcp251x_hw_rx_frame(spi, buf, buf_idx); + if (buf[RXBSIDL_OFF] & RXBSIDL_IDE) { + /* Extended ID format */ + frame->can_id = CAN_EFF_FLAG; + frame->can_id |= + /* Extended ID part */ + SET_BYTE(buf[RXBSIDL_OFF] & RXBSIDL_EID, 2) | + SET_BYTE(buf[RXBEID8_OFF], 1) | + SET_BYTE(buf[RXBEID0_OFF], 0) | + /* Standard ID part */ + (((buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) | + (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT)) << 18); + /* Remote transmission request */ + if (buf[RXBDLC_OFF] & RXBDLC_RTR) + frame->can_id |= CAN_RTR_FLAG; + } else { + /* Standard ID format */ + frame->can_id = + (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) | + (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT); + } + /* Data length */ + frame->can_dlc = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK; + if (frame->can_dlc > 8) { + dev_warn(&spi->dev, "invalid frame recevied\n"); + priv->net->stats.rx_errors++; + dev_kfree_skb(skb); + return; + } + memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc); + + priv->net->stats.rx_packets++; + priv->net->stats.rx_bytes += frame->can_dlc; + netif_rx(skb); +} + +static void mcp251x_hw_sleep(struct spi_device *spi) +{ + mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP); +} + +static void mcp251x_hw_wakeup(struct spi_device *spi) +{ + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + + priv->wake = 1; + + /* Can only wake up by generating a wake-up interrupt. */ + mcp251x_write_bits(spi, CANINTE, CANINTE_WAKIE, CANINTE_WAKIE); + mcp251x_write_bits(spi, CANINTF, CANINTF_WAKIF, CANINTF_WAKIF); + + /* Wait until the device is awake */ + if (!wait_for_completion_timeout(&priv->awake, HZ)) + dev_err(&spi->dev, "MCP251x didn't wake-up\n"); +} + +static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb, + struct net_device *net) +{ + struct mcp251x_priv *priv = netdev_priv(net); + struct spi_device *spi = priv->spi; + + if (priv->tx_skb || priv->tx_len) { + dev_warn(&spi->dev, "hard_xmit called while tx busy\n"); + netif_stop_queue(net); + return NETDEV_TX_BUSY; + } + + if (skb->len != sizeof(struct can_frame)) { + dev_err(&spi->dev, "dropping packet - bad length\n"); + dev_kfree_skb(skb); + net->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + netif_stop_queue(net); + priv->tx_skb = skb; + net->trans_start = jiffies; + queue_work(priv->wq, &priv->tx_work); + + return NETDEV_TX_OK; +} + +static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode) +{ + struct mcp251x_priv *priv = netdev_priv(net); + + switch (mode) { + case CAN_MODE_START: + /* We have to delay work since SPI I/O may sleep */ + priv->can.state = CAN_STATE_ERROR_ACTIVE; + priv->restart_tx = 1; + if (priv->can.restart_ms == 0) + priv->after_suspend = AFTER_SUSPEND_RESTART; + queue_work(priv->wq, &priv->irq_work); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static void mcp251x_set_normal_mode(struct spi_device *spi) +{ + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + unsigned long timeout; + + /* Enable interrupts */ + mcp251x_write_reg(spi, CANINTE, + CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE | + CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE | + CANINTF_MERRF); + + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { + /* Put device into loopback mode */ + mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK); + } else { + /* Put device into normal mode */ + mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL); + + /* Wait for the device to enter normal mode */ + timeout = jiffies + HZ; + while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) { + schedule(); + if (time_after(jiffies, timeout)) { + dev_err(&spi->dev, "MCP251x didn't" + " enter in normal mode\n"); + return; + } + } + } + priv->can.state = CAN_STATE_ERROR_ACTIVE; +} + +static int mcp251x_do_set_bittiming(struct net_device *net) +{ + struct mcp251x_priv *priv = netdev_priv(net); + struct can_bittiming *bt = &priv->can.bittiming; + struct spi_device *spi = priv->spi; + + mcp251x_write_reg(spi, CNF1, ((bt->sjw - 1) << CNF1_SJW_SHIFT) | + (bt->brp - 1)); + mcp251x_write_reg(spi, CNF2, CNF2_BTLMODE | + (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ? + CNF2_SAM : 0) | + ((bt->phase_seg1 - 1) << CNF2_PS1_SHIFT) | + (bt->prop_seg - 1)); + mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK, + (bt->phase_seg2 - 1)); + dev_info(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n", + mcp251x_read_reg(spi, CNF1), + mcp251x_read_reg(spi, CNF2), + mcp251x_read_reg(spi, CNF3)); + + return 0; +} + +static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv, + struct spi_device *spi) +{ + int ret; + + ret = open_candev(net); + if (ret) { + dev_err(&spi->dev, "unable to set initial baudrate!\n"); + return ret; + } + + /* Enable RX0->RX1 buffer roll over and disable filters */ + mcp251x_write_bits(spi, RXBCTRL(0), + RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1, + RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1); + mcp251x_write_bits(spi, RXBCTRL(1), + RXBCTRL_RXM0 | RXBCTRL_RXM1, + RXBCTRL_RXM0 | RXBCTRL_RXM1); + return 0; +} + +static void mcp251x_hw_reset(struct spi_device *spi) +{ + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + int ret; + + mutex_lock(&priv->spi_lock); + + priv->spi_tx_buf[0] = INSTRUCTION_RESET; + + ret = spi_write(spi, priv->spi_tx_buf, 1); + + mutex_unlock(&priv->spi_lock); + + if (ret) + dev_err(&spi->dev, "reset failed: ret = %d\n", ret); + /* Wait for reset to finish */ + mdelay(10); +} + +static int mcp251x_hw_probe(struct spi_device *spi) +{ + int st1, st2; + + mcp251x_hw_reset(spi); + + /* + * Please note that these are "magic values" based on after + * reset defaults taken from data sheet which allows us to see + * if we really have a chip on the bus (we avoid common all + * zeroes or all ones situations) + */ + st1 = mcp251x_read_reg(spi, CANSTAT) & 0xEE; + st2 = mcp251x_read_reg(spi, CANCTRL) & 0x17; + + dev_dbg(&spi->dev, "CANSTAT 0x%02x CANCTRL 0x%02x\n", st1, st2); + + /* Check for power up default values */ + return (st1 == 0x80 && st2 == 0x07) ? 1 : 0; +} + +static irqreturn_t mcp251x_can_isr(int irq, void *dev_id) +{ + struct net_device *net = (struct net_device *)dev_id; + struct mcp251x_priv *priv = netdev_priv(net); + + /* Schedule bottom half */ + if (!work_pending(&priv->irq_work)) + queue_work(priv->wq, &priv->irq_work); + + return IRQ_HANDLED; +} + +static int mcp251x_open(struct net_device *net) +{ + struct mcp251x_priv *priv = netdev_priv(net); + struct spi_device *spi = priv->spi; + struct mcp251x_platform_data *pdata = spi->dev.platform_data; + int ret; + + if (pdata->transceiver_enable) + pdata->transceiver_enable(1); + + priv->force_quit = 0; + priv->tx_skb = NULL; + priv->tx_len = 0; + + ret = request_irq(spi->irq, mcp251x_can_isr, + IRQF_TRIGGER_FALLING, DEVICE_NAME, net); + if (ret) { + dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); + if (pdata->transceiver_enable) + pdata->transceiver_enable(0); + return ret; + } + + mcp251x_hw_wakeup(spi); + mcp251x_hw_reset(spi); + ret = mcp251x_setup(net, priv, spi); + if (ret) { + free_irq(spi->irq, net); + if (pdata->transceiver_enable) + pdata->transceiver_enable(0); + return ret; + } + mcp251x_set_normal_mode(spi); + netif_wake_queue(net); + + return 0; +} + +static int mcp251x_stop(struct net_device *net) +{ + struct mcp251x_priv *priv = netdev_priv(net); + struct spi_device *spi = priv->spi; + struct mcp251x_platform_data *pdata = spi->dev.platform_data; + + close_candev(net); + + /* Disable and clear pending interrupts */ + mcp251x_write_reg(spi, CANINTE, 0x00); + mcp251x_write_reg(spi, CANINTF, 0x00); + + priv->force_quit = 1; + free_irq(spi->irq, net); + flush_workqueue(priv->wq); + + mcp251x_write_reg(spi, TXBCTRL(0), 0); + if (priv->tx_skb || priv->tx_len) + mcp251x_clean(net); + + mcp251x_hw_sleep(spi); + + if (pdata->transceiver_enable) + pdata->transceiver_enable(0); + + priv->can.state = CAN_STATE_STOPPED; + + return 0; +} + +static void mcp251x_tx_work_handler(struct work_struct *ws) +{ + struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv, + tx_work); + struct spi_device *spi = priv->spi; + struct net_device *net = priv->net; + struct can_frame *frame; + + if (priv->tx_skb) { + frame = (struct can_frame *)priv->tx_skb->data; + + if (priv->can.state == CAN_STATE_BUS_OFF) { + mcp251x_clean(net); + netif_wake_queue(net); + return; + } + if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN) + frame->can_dlc = CAN_FRAME_MAX_DATA_LEN; + mcp251x_hw_tx(spi, frame, 0); + priv->tx_len = 1 + frame->can_dlc; + can_put_echo_skb(priv->tx_skb, net, 0); + priv->tx_skb = NULL; + } +} + +static void mcp251x_irq_work_handler(struct work_struct *ws) +{ + struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv, + irq_work); + struct spi_device *spi = priv->spi; + struct net_device *net = priv->net; + u8 txbnctrl; + u8 intf; + enum can_state new_state; + + if (priv->after_suspend) { + mdelay(10); + mcp251x_hw_reset(spi); + mcp251x_setup(net, priv, spi); + if (priv->after_suspend & AFTER_SUSPEND_RESTART) { + mcp251x_set_normal_mode(spi); + } else if (priv->after_suspend & AFTER_SUSPEND_UP) { + netif_device_attach(net); + /* Clean since we lost tx buffer */ + if (priv->tx_skb || priv->tx_len) { + mcp251x_clean(net); + netif_wake_queue(net); + } + mcp251x_set_normal_mode(spi); + } else { + mcp251x_hw_sleep(spi); + } + priv->after_suspend = 0; + } + + if (priv->can.restart_ms == 0 && priv->can.state == CAN_STATE_BUS_OFF) + return; + + while (!priv->force_quit && !freezing(current)) { + u8 eflag = mcp251x_read_reg(spi, EFLG); + int can_id = 0, data1 = 0; + + mcp251x_write_reg(spi, EFLG, 0x00); + + if (priv->restart_tx) { + priv->restart_tx = 0; + mcp251x_write_reg(spi, TXBCTRL(0), 0); + if (priv->tx_skb || priv->tx_len) + mcp251x_clean(net); + netif_wake_queue(net); + can_id |= CAN_ERR_RESTARTED; + } + + if (priv->wake) { + /* Wait whilst the device wakes up */ + mdelay(10); + priv->wake = 0; + } + + intf = mcp251x_read_reg(spi, CANINTF); + mcp251x_write_bits(spi, CANINTF, intf, 0x00); + + /* Update can state */ + if (eflag & EFLG_TXBO) { + new_state = CAN_STATE_BUS_OFF; + can_id |= CAN_ERR_BUSOFF; + } else if (eflag & EFLG_TXEP) { + new_state = CAN_STATE_ERROR_PASSIVE; + can_id |= CAN_ERR_CRTL; + data1 |= CAN_ERR_CRTL_TX_PASSIVE; + } else if (eflag & EFLG_RXEP) { + new_state = CAN_STATE_ERROR_PASSIVE; + can_id |= CAN_ERR_CRTL; + data1 |= CAN_ERR_CRTL_RX_PASSIVE; + } else if (eflag & EFLG_TXWAR) { + new_state = CAN_STATE_ERROR_WARNING; + can_id |= CAN_ERR_CRTL; + data1 |= CAN_ERR_CRTL_TX_WARNING; + } else if (eflag & EFLG_RXWAR) { + new_state = CAN_STATE_ERROR_WARNING; + can_id |= CAN_ERR_CRTL; + data1 |= CAN_ERR_CRTL_RX_WARNING; + } else { + new_state = CAN_STATE_ERROR_ACTIVE; + } + + /* Update can state statistics */ + switch (priv->can.state) { + case CAN_STATE_ERROR_ACTIVE: + if (new_state >= CAN_STATE_ERROR_WARNING && + new_state <= CAN_STATE_BUS_OFF) + priv->can.can_stats.error_warning++; + case CAN_STATE_ERROR_WARNING: /* fallthrough */ + if (new_state >= CAN_STATE_ERROR_PASSIVE && + new_state <= CAN_STATE_BUS_OFF) + priv->can.can_stats.error_passive++; + break; + default: + break; + } + priv->can.state = new_state; + + if ((intf & CANINTF_ERRIF) || (can_id & CAN_ERR_RESTARTED)) { + struct sk_buff *skb; + struct can_frame *frame; + + /* Create error frame */ + skb = alloc_can_err_skb(net, &frame); + if (skb) { + /* Set error frame flags based on bus state */ + frame->can_id = can_id; + frame->data[1] = data1; + + /* Update net stats for overflows */ + if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) { + if (eflag & EFLG_RX0OVR) + net->stats.rx_over_errors++; + if (eflag & EFLG_RX1OVR) + net->stats.rx_over_errors++; + frame->can_id |= CAN_ERR_CRTL; + frame->data[1] |= + CAN_ERR_CRTL_RX_OVERFLOW; + } + + netif_rx(skb); + } else { + dev_info(&spi->dev, + "cannot allocate error skb\n"); + } + } + + if (priv->can.state == CAN_STATE_BUS_OFF) { + if (priv->can.restart_ms == 0) { + can_bus_off(net); + mcp251x_hw_sleep(spi); + return; + } + } + + if (intf == 0) + break; + + if (intf & CANINTF_WAKIF) + complete(&priv->awake); + + if (intf & CANINTF_MERRF) { + /* If there are pending Tx buffers, restart queue */ + txbnctrl = mcp251x_read_reg(spi, TXBCTRL(0)); + if (!(txbnctrl & TXBCTRL_TXREQ)) { + if (priv->tx_skb || priv->tx_len) + mcp251x_clean(net); + netif_wake_queue(net); + } + } + + if (intf & (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)) { + net->stats.tx_packets++; + net->stats.tx_bytes += priv->tx_len - 1; + if (priv->tx_len) { + can_get_echo_skb(net, 0); + priv->tx_len = 0; + } + netif_wake_queue(net); + } + + if (intf & CANINTF_RX0IF) + mcp251x_hw_rx(spi, 0); + + if (intf & CANINTF_RX1IF) + mcp251x_hw_rx(spi, 1); + } +} + +static const struct net_device_ops mcp251x_netdev_ops = { + .ndo_open = mcp251x_open, + .ndo_stop = mcp251x_stop, + .ndo_start_xmit = mcp251x_hard_start_xmit, +}; + +static int __devinit mcp251x_can_probe(struct spi_device *spi) +{ + struct net_device *net; + struct mcp251x_priv *priv; + struct mcp251x_platform_data *pdata = spi->dev.platform_data; + int ret = -ENODEV; + + if (!pdata) + /* Platform data is required for osc freq */ + goto error_out; + + /* Allocate can/net device */ + net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX); + if (!net) { + ret = -ENOMEM; + goto error_alloc; + } + + net->netdev_ops = &mcp251x_netdev_ops; + net->flags |= IFF_ECHO; + + priv = netdev_priv(net); + priv->can.bittiming_const = &mcp251x_bittiming_const; + priv->can.do_set_mode = mcp251x_do_set_mode; + priv->can.clock.freq = pdata->oscillator_frequency / 2; + priv->can.do_set_bittiming = mcp251x_do_set_bittiming; + priv->net = net; + dev_set_drvdata(&spi->dev, priv); + + priv->spi = spi; + mutex_init(&priv->spi_lock); + + /* If requested, allocate DMA buffers */ + if (mcp251x_enable_dma) { + spi->dev.coherent_dma_mask = ~0; + + /* + * Minimum coherent DMA allocation is PAGE_SIZE, so allocate + * that much and share it between Tx and Rx DMA buffers. + */ + priv->spi_tx_buf = dma_alloc_coherent(&spi->dev, + PAGE_SIZE, + &priv->spi_tx_dma, + GFP_DMA); + + if (priv->spi_tx_buf) { + priv->spi_rx_buf = (u8 *)(priv->spi_tx_buf + + (PAGE_SIZE / 2)); + priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma + + (PAGE_SIZE / 2)); + } else { + /* Fall back to non-DMA */ + mcp251x_enable_dma = 0; + } + } + + /* Allocate non-DMA buffers */ + if (!mcp251x_enable_dma) { + priv->spi_tx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL); + if (!priv->spi_tx_buf) { + ret = -ENOMEM; + goto error_tx_buf; + } + priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL); + if (!priv->spi_tx_buf) { + ret = -ENOMEM; + goto error_rx_buf; + } + } + + if (pdata->power_enable) + pdata->power_enable(1); + + /* Call out to platform specific setup */ + if (pdata->board_specific_setup) + pdata->board_specific_setup(spi); + + SET_NETDEV_DEV(net, &spi->dev); + + priv->wq = create_freezeable_workqueue("mcp251x_wq"); + + INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); + INIT_WORK(&priv->irq_work, mcp251x_irq_work_handler); + + init_completion(&priv->awake); + + /* Configure the SPI bus */ + spi->mode = SPI_MODE_0; + spi->bits_per_word = 8; + spi_setup(spi); + + if (!mcp251x_hw_probe(spi)) { + dev_info(&spi->dev, "Probe failed\n"); + goto error_probe; + } + mcp251x_hw_sleep(spi); + + if (pdata->transceiver_enable) + pdata->transceiver_enable(0); + + ret = register_candev(net); + if (!ret) { + dev_info(&spi->dev, "probed\n"); + return ret; + } +error_probe: + if (!mcp251x_enable_dma) + kfree(priv->spi_rx_buf); +error_rx_buf: + if (!mcp251x_enable_dma) + kfree(priv->spi_tx_buf); +error_tx_buf: + free_candev(net); + if (mcp251x_enable_dma) + dma_free_coherent(&spi->dev, PAGE_SIZE, + priv->spi_tx_buf, priv->spi_tx_dma); +error_alloc: + if (pdata->power_enable) + pdata->power_enable(0); + dev_err(&spi->dev, "probe failed\n"); +error_out: + return ret; +} + +static int __devexit mcp251x_can_remove(struct spi_device *spi) +{ + struct mcp251x_platform_data *pdata = spi->dev.platform_data; + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct net_device *net = priv->net; + + unregister_candev(net); + free_candev(net); + + priv->force_quit = 1; + flush_workqueue(priv->wq); + destroy_workqueue(priv->wq); + + if (mcp251x_enable_dma) { + dma_free_coherent(&spi->dev, PAGE_SIZE, + priv->spi_tx_buf, priv->spi_tx_dma); + } else { + kfree(priv->spi_tx_buf); + kfree(priv->spi_rx_buf); + } + + if (pdata->power_enable) + pdata->power_enable(0); + + return 0; +} + +#ifdef CONFIG_PM +static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state) +{ + struct mcp251x_platform_data *pdata = spi->dev.platform_data; + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct net_device *net = priv->net; + + if (netif_running(net)) { + netif_device_detach(net); + + mcp251x_hw_sleep(spi); + if (pdata->transceiver_enable) + pdata->transceiver_enable(0); + priv->after_suspend = AFTER_SUSPEND_UP; + } else { + priv->after_suspend = AFTER_SUSPEND_DOWN; + } + + if (pdata->power_enable) { + pdata->power_enable(0); + priv->after_suspend |= AFTER_SUSPEND_POWER; + } + + return 0; +} + +static int mcp251x_can_resume(struct spi_device *spi) +{ + struct mcp251x_platform_data *pdata = spi->dev.platform_data; + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + + if (priv->after_suspend & AFTER_SUSPEND_POWER) { + pdata->power_enable(1); + queue_work(priv->wq, &priv->irq_work); + } else { + if (priv->after_suspend & AFTER_SUSPEND_UP) { + if (pdata->transceiver_enable) + pdata->transceiver_enable(1); + queue_work(priv->wq, &priv->irq_work); + } else { + priv->after_suspend = 0; + } + } + return 0; +} +#else +#define mcp251x_can_suspend NULL +#define mcp251x_can_resume NULL +#endif + +static struct spi_driver mcp251x_can_driver = { + .driver = { + .name = DEVICE_NAME, + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + + .probe = mcp251x_can_probe, + .remove = __devexit_p(mcp251x_can_remove), + .suspend = mcp251x_can_suspend, + .resume = mcp251x_can_resume, +}; + +static int __init mcp251x_can_init(void) +{ + return spi_register_driver(&mcp251x_can_driver); +} + +static void __exit mcp251x_can_exit(void) +{ + spi_unregister_driver(&mcp251x_can_driver); +} + +module_init(mcp251x_can_init); +module_exit(mcp251x_can_exit); + +MODULE_AUTHOR("Chris Elston <celston@katalix.com>, " + "Christian Pellegrin <chripell@evolware.org>"); +MODULE_DESCRIPTION("Microchip 251x CAN driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/mscan/Makefile b/drivers/net/can/mscan/Makefile new file mode 100644 index 00000000000..2bd9f04c790 --- /dev/null +++ b/drivers/net/can/mscan/Makefile @@ -0,0 +1,5 @@ + +obj-$(CONFIG_CAN_MPC52XX) += mscan-mpc52xx.o +mscan-mpc52xx-objs := mscan.o mpc52xx_can.o + +ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/mscan/mpc52xx_can.c b/drivers/net/can/mscan/mpc52xx_can.c new file mode 100644 index 00000000000..4707a82f1ae --- /dev/null +++ b/drivers/net/can/mscan/mpc52xx_can.c @@ -0,0 +1,279 @@ +/* + * CAN bus driver for the Freescale MPC5xxx embedded CPU. + * + * Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>, + * Varma Electronics Oy + * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> + * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/netdevice.h> +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/of_platform.h> +#include <sysdev/fsl_soc.h> +#include <linux/io.h> +#include <asm/mpc52xx.h> + +#include "mscan.h" + + +#define DRV_NAME "mpc5xxx_can" + +static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = { + { .compatible = "fsl,mpc5200-cdm", }, + { .compatible = "fsl,mpc5200b-cdm", }, + {} +}; + +/* + * Get the frequency of the external oscillator clock connected + * to the SYS_XTAL_IN pin, or return 0 if it cannot be determined. + */ +static unsigned int __devinit mpc52xx_can_xtal_freq(struct of_device *of) +{ + struct mpc52xx_cdm __iomem *cdm; + struct device_node *np_cdm; + unsigned int freq; + u32 val; + + freq = mpc5xxx_get_bus_frequency(of->node); + if (!freq) + return 0; + + /* + * Determine SYS_XTAL_IN frequency from the clock domain settings + */ + np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids); + if (!np_cdm) { + dev_err(&of->dev, "can't get clock node!\n"); + return 0; + } + cdm = of_iomap(np_cdm, 0); + of_node_put(np_cdm); + + if (in_8(&cdm->ipb_clk_sel) & 0x1) + freq *= 2; + val = in_be32(&cdm->rstcfg); + if (val & (1 << 5)) + freq *= 8; + else + freq *= 4; + if (val & (1 << 6)) + freq /= 12; + else + freq /= 16; + + iounmap(cdm); + + return freq; +} + +/* + * Get frequency of the MSCAN clock source + * + * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK) + * can be selected. According to the MPC5200 user's manual, the oscillator + * clock is the better choice as it has less jitter but due to a hardware + * bug, it can not be selected for the old MPC5200 Rev. A chips. + */ + +static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of, + int clock_src) +{ + unsigned int pvr; + + pvr = mfspr(SPRN_PVR); + + if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011) + return mpc5xxx_get_bus_frequency(of->node); + + return mpc52xx_can_xtal_freq(of); +} + +static int __devinit mpc5xxx_can_probe(struct of_device *ofdev, + const struct of_device_id *id) +{ + struct device_node *np = ofdev->node; + struct net_device *dev; + struct mscan_priv *priv; + void __iomem *base; + const char *clk_src; + int err, irq, clock_src; + + base = of_iomap(ofdev->node, 0); + if (!base) { + dev_err(&ofdev->dev, "couldn't ioremap\n"); + err = -ENOMEM; + goto exit_release_mem; + } + + irq = irq_of_parse_and_map(np, 0); + if (!irq) { + dev_err(&ofdev->dev, "no irq found\n"); + err = -ENODEV; + goto exit_unmap_mem; + } + + dev = alloc_mscandev(); + if (!dev) { + err = -ENOMEM; + goto exit_dispose_irq; + } + + priv = netdev_priv(dev); + priv->reg_base = base; + dev->irq = irq; + + /* + * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock + * (IP_CLK) can be selected as MSCAN clock source. According to + * the MPC5200 user's manual, the oscillator clock is the better + * choice as it has less jitter. For this reason, it is selected + * by default. + */ + clk_src = of_get_property(np, "fsl,mscan-clk-src", NULL); + if (clk_src && strcmp(clk_src, "ip") == 0) + clock_src = MSCAN_CLKSRC_BUS; + else + clock_src = MSCAN_CLKSRC_XTAL; + priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src); + if (!priv->can.clock.freq) { + dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n"); + err = -ENODEV; + goto exit_free_mscan; + } + + SET_NETDEV_DEV(dev, &ofdev->dev); + + err = register_mscandev(dev, clock_src); + if (err) { + dev_err(&ofdev->dev, "registering %s failed (err=%d)\n", + DRV_NAME, err); + goto exit_free_mscan; + } + + dev_set_drvdata(&ofdev->dev, dev); + + dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n", + priv->reg_base, dev->irq, priv->can.clock.freq); + + return 0; + +exit_free_mscan: + free_candev(dev); +exit_dispose_irq: + irq_dispose_mapping(irq); +exit_unmap_mem: + iounmap(base); +exit_release_mem: + return err; +} + +static int __devexit mpc5xxx_can_remove(struct of_device *ofdev) +{ + struct net_device *dev = dev_get_drvdata(&ofdev->dev); + struct mscan_priv *priv = netdev_priv(dev); + + dev_set_drvdata(&ofdev->dev, NULL); + + unregister_mscandev(dev); + iounmap(priv->reg_base); + irq_dispose_mapping(dev->irq); + free_candev(dev); + + return 0; +} + +#ifdef CONFIG_PM +static struct mscan_regs saved_regs; +static int mpc5xxx_can_suspend(struct of_device *ofdev, pm_message_t state) +{ + struct net_device *dev = dev_get_drvdata(&ofdev->dev); + struct mscan_priv *priv = netdev_priv(dev); + struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + + _memcpy_fromio(&saved_regs, regs, sizeof(*regs)); + + return 0; +} + +static int mpc5xxx_can_resume(struct of_device *ofdev) +{ + struct net_device *dev = dev_get_drvdata(&ofdev->dev); + struct mscan_priv *priv = netdev_priv(dev); + struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + + regs->canctl0 |= MSCAN_INITRQ; + while ((regs->canctl1 & MSCAN_INITAK) == 0) + udelay(10); + + regs->canctl1 = saved_regs.canctl1; + regs->canbtr0 = saved_regs.canbtr0; + regs->canbtr1 = saved_regs.canbtr1; + regs->canidac = saved_regs.canidac; + + /* restore masks, buffers etc. */ + _memcpy_toio(®s->canidar1_0, (void *)&saved_regs.canidar1_0, + sizeof(*regs) - offsetof(struct mscan_regs, canidar1_0)); + + regs->canctl0 &= ~MSCAN_INITRQ; + regs->cantbsel = saved_regs.cantbsel; + regs->canrier = saved_regs.canrier; + regs->cantier = saved_regs.cantier; + regs->canctl0 = saved_regs.canctl0; + + return 0; +} +#endif + +static struct of_device_id __devinitdata mpc5xxx_can_table[] = { + {.compatible = "fsl,mpc5200-mscan"}, + {.compatible = "fsl,mpc5200b-mscan"}, + {}, +}; + +static struct of_platform_driver mpc5xxx_can_driver = { + .owner = THIS_MODULE, + .name = "mpc5xxx_can", + .probe = mpc5xxx_can_probe, + .remove = __devexit_p(mpc5xxx_can_remove), +#ifdef CONFIG_PM + .suspend = mpc5xxx_can_suspend, + .resume = mpc5xxx_can_resume, +#endif + .match_table = mpc5xxx_can_table, +}; + +static int __init mpc5xxx_can_init(void) +{ + return of_register_platform_driver(&mpc5xxx_can_driver); +} +module_init(mpc5xxx_can_init); + +static void __exit mpc5xxx_can_exit(void) +{ + return of_unregister_platform_driver(&mpc5xxx_can_driver); +}; +module_exit(mpc5xxx_can_exit); + +MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); +MODULE_DESCRIPTION("Freescale MPC5200 CAN driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c new file mode 100644 index 00000000000..49542cab9df --- /dev/null +++ b/drivers/net/can/mscan/mscan.c @@ -0,0 +1,699 @@ +/* + * CAN bus driver for the alone generic (as possible as) MSCAN controller. + * + * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>, + * Varma Electronics Oy + * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> + * Copytight (C) 2008-2009 Pengutronix <kernel@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/if_arp.h> +#include <linux/if_ether.h> +#include <linux/list.h> +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> +#include <linux/io.h> + +#include "mscan.h" + +#define MSCAN_NORMAL_MODE 0 +#define MSCAN_SLEEP_MODE MSCAN_SLPRQ +#define MSCAN_INIT_MODE (MSCAN_INITRQ | MSCAN_SLPRQ) +#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ) +#define MSCAN_SET_MODE_RETRIES 255 +#define MSCAN_ECHO_SKB_MAX 3 + +#define BTR0_BRP_MASK 0x3f +#define BTR0_SJW_SHIFT 6 +#define BTR0_SJW_MASK (0x3 << BTR0_SJW_SHIFT) + +#define BTR1_TSEG1_MASK 0xf +#define BTR1_TSEG2_SHIFT 4 +#define BTR1_TSEG2_MASK (0x7 << BTR1_TSEG2_SHIFT) +#define BTR1_SAM_SHIFT 7 + +#define BTR0_SET_BRP(brp) (((brp) - 1) & BTR0_BRP_MASK) +#define BTR0_SET_SJW(sjw) ((((sjw) - 1) << BTR0_SJW_SHIFT) & \ + BTR0_SJW_MASK) + +#define BTR1_SET_TSEG1(tseg1) (((tseg1) - 1) & BTR1_TSEG1_MASK) +#define BTR1_SET_TSEG2(tseg2) ((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & \ + BTR1_TSEG2_MASK) +#define BTR1_SET_SAM(sam) ((sam) ? 1 << BTR1_SAM_SHIFT : 0) + +static struct can_bittiming_const mscan_bittiming_const = { + .name = "mscan", + .tseg1_min = 4, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 64, + .brp_inc = 1, +}; + +struct mscan_state { + u8 mode; + u8 canrier; + u8 cantier; +}; + +#define F_RX_PROGRESS 0 +#define F_TX_PROGRESS 1 +#define F_TX_WAIT_ALL 2 + +static enum can_state state_map[] = { + CAN_STATE_ERROR_ACTIVE, + CAN_STATE_ERROR_WARNING, + CAN_STATE_ERROR_PASSIVE, + CAN_STATE_BUS_OFF +}; + +static int mscan_set_mode(struct net_device *dev, u8 mode) +{ + struct mscan_priv *priv = netdev_priv(dev); + struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + int ret = 0; + int i; + u8 canctl1; + + if (mode != MSCAN_NORMAL_MODE) { + + if (priv->tx_active) { + /* Abort transfers before going to sleep */# + out_8(®s->cantarq, priv->tx_active); + /* Suppress TX done interrupts */ + out_8(®s->cantier, 0); + } + + canctl1 = in_8(®s->canctl1); + if ((mode & MSCAN_SLPRQ) && (canctl1 & MSCAN_SLPAK) == 0) { + out_8(®s->canctl0, + in_8(®s->canctl0) | MSCAN_SLPRQ); + for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) { + if (in_8(®s->canctl1) & MSCAN_SLPAK) + break; + udelay(100); + } + /* + * The mscan controller will fail to enter sleep mode, + * while there are irregular activities on bus, like + * somebody keeps retransmitting. This behavior is + * undocumented and seems to differ between mscan built + * in mpc5200b and mpc5200. We proceed in that case, + * since otherwise the slprq will be kept set and the + * controller will get stuck. NOTE: INITRQ or CSWAI + * will abort all active transmit actions, if still + * any, at once. + */ + if (i >= MSCAN_SET_MODE_RETRIES) + dev_dbg(dev->dev.parent, + "device failed to enter sleep mode. " + "We proceed anyhow.\n"); + else + priv->can.state = CAN_STATE_SLEEPING; + } + + if ((mode & MSCAN_INITRQ) && (canctl1 & MSCAN_INITAK) == 0) { + out_8(®s->canctl0, + in_8(®s->canctl0) | MSCAN_INITRQ); + for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) { + if (in_8(®s->canctl1) & MSCAN_INITAK) + break; + } + if (i >= MSCAN_SET_MODE_RETRIES) + ret = -ENODEV; + } + if (!ret) + priv->can.state = CAN_STATE_STOPPED; + + if (mode & MSCAN_CSWAI) + out_8(®s->canctl0, + in_8(®s->canctl0) | MSCAN_CSWAI); + + } else { + canctl1 = in_8(®s->canctl1); + if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) { + out_8(®s->canctl0, in_8(®s->canctl0) & + ~(MSCAN_SLPRQ | MSCAN_INITRQ)); + for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) { + canctl1 = in_8(®s->canctl1); + if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK))) + break; + } + if (i >= MSCAN_SET_MODE_RETRIES) + ret = -ENODEV; + else + priv->can.state = CAN_STATE_ERROR_ACTIVE; + } + } + return ret; +} + +static int mscan_start(struct net_device *dev) +{ + struct mscan_priv *priv = netdev_priv(dev); + struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + u8 canrflg; + int err; + + out_8(®s->canrier, 0); + + INIT_LIST_HEAD(&priv->tx_head); + priv->prev_buf_id = 0; + priv->cur_pri = 0; + priv->tx_active = 0; + priv->shadow_canrier = 0; + priv->flags = 0; + + err = mscan_set_mode(dev, MSCAN_NORMAL_MODE); + if (err) + return err; + + canrflg = in_8(®s->canrflg); + priv->shadow_statflg = canrflg & MSCAN_STAT_MSK; + priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg), + MSCAN_STATE_TX(canrflg))]; + out_8(®s->cantier, 0); + + /* Enable receive interrupts. */ + out_8(®s->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE | + MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0); + + return 0; +} + +static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct can_frame *frame = (struct can_frame *)skb->data; + struct mscan_priv *priv = netdev_priv(dev); + struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + int i, rtr, buf_id; + u32 can_id; + + if (frame->can_dlc > 8) + return -EINVAL; + + out_8(®s->cantier, 0); + + i = ~priv->tx_active & MSCAN_TXE; + buf_id = ffs(i) - 1; + switch (hweight8(i)) { + case 0: + netif_stop_queue(dev); + dev_err(dev->dev.parent, "Tx Ring full when queue awake!\n"); + return NETDEV_TX_BUSY; + case 1: + /* + * if buf_id < 3, then current frame will be send out of order, + * since buffer with lower id have higher priority (hell..) + */ + netif_stop_queue(dev); + case 2: + if (buf_id < priv->prev_buf_id) { + priv->cur_pri++; + if (priv->cur_pri == 0xff) { + set_bit(F_TX_WAIT_ALL, &priv->flags); + netif_stop_queue(dev); + } + } + set_bit(F_TX_PROGRESS, &priv->flags); + break; + } + priv->prev_buf_id = buf_id; + out_8(®s->cantbsel, i); + + rtr = frame->can_id & CAN_RTR_FLAG; + + if (frame->can_id & CAN_EFF_FLAG) { + can_id = (frame->can_id & CAN_EFF_MASK) << 1; + if (rtr) + can_id |= 1; + out_be16(®s->tx.idr3_2, can_id); + + can_id >>= 16; + can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0) | (3 << 3); + } else { + can_id = (frame->can_id & CAN_SFF_MASK) << 5; + if (rtr) + can_id |= 1 << 4; + } + out_be16(®s->tx.idr1_0, can_id); + + if (!rtr) { + void __iomem *data = ®s->tx.dsr1_0; + u16 *payload = (u16 *) frame->data; + /* It is safe to write into dsr[dlc+1] */ + for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { + out_be16(data, *payload++); + data += 2 + _MSCAN_RESERVED_DSR_SIZE; + } + } + + out_8(®s->tx.dlr, frame->can_dlc); + out_8(®s->tx.tbpr, priv->cur_pri); + + /* Start transmission. */ + out_8(®s->cantflg, 1 << buf_id); + + if (!test_bit(F_TX_PROGRESS, &priv->flags)) + dev->trans_start = jiffies; + + list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head); + + can_put_echo_skb(skb, dev, buf_id); + + /* Enable interrupt. */ + priv->tx_active |= 1 << buf_id; + out_8(®s->cantier, priv->tx_active); + + return NETDEV_TX_OK; +} + +/* This function returns the old state to see where we came from */ +static enum can_state check_set_state(struct net_device *dev, u8 canrflg) +{ + struct mscan_priv *priv = netdev_priv(dev); + enum can_state state, old_state = priv->can.state; + + if (canrflg & MSCAN_CSCIF && old_state <= CAN_STATE_BUS_OFF) { + state = state_map[max(MSCAN_STATE_RX(canrflg), + MSCAN_STATE_TX(canrflg))]; + priv->can.state = state; + } + return old_state; +} + +static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame) +{ + struct mscan_priv *priv = netdev_priv(dev); + struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + u32 can_id; + int i; + + can_id = in_be16(®s->rx.idr1_0); + if (can_id & (1 << 3)) { + frame->can_id = CAN_EFF_FLAG; + can_id = ((can_id << 16) | in_be16(®s->rx.idr3_2)); + can_id = ((can_id & 0xffe00000) | + ((can_id & 0x7ffff) << 2)) >> 2; + } else { + can_id >>= 4; + frame->can_id = 0; + } + + frame->can_id |= can_id >> 1; + if (can_id & 1) + frame->can_id |= CAN_RTR_FLAG; + frame->can_dlc = in_8(®s->rx.dlr) & 0xf; + + if (!(frame->can_id & CAN_RTR_FLAG)) { + void __iomem *data = ®s->rx.dsr1_0; + u16 *payload = (u16 *) frame->data; + for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { + *payload++ = in_be16(data); + data += 2 + _MSCAN_RESERVED_DSR_SIZE; + } + } + + out_8(®s->canrflg, MSCAN_RXF); +} + +static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame, + u8 canrflg) +{ + struct mscan_priv *priv = netdev_priv(dev); + struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct net_device_stats *stats = &dev->stats; + enum can_state old_state; + + dev_dbg(dev->dev.parent, "error interrupt (canrflg=%#x)\n", canrflg); + frame->can_id = CAN_ERR_FLAG; + + if (canrflg & MSCAN_OVRIF) { + frame->can_id |= CAN_ERR_CRTL; + frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + stats->rx_over_errors++; + stats->rx_errors++; + } else + frame->data[1] = 0; + + old_state = check_set_state(dev, canrflg); + /* State changed */ + if (old_state != priv->can.state) { + switch (priv->can.state) { + case CAN_STATE_ERROR_WARNING: + frame->can_id |= CAN_ERR_CRTL; + priv->can.can_stats.error_warning++; + if ((priv->shadow_statflg & MSCAN_RSTAT_MSK) < + (canrflg & MSCAN_RSTAT_MSK)) + frame->data[1] |= CAN_ERR_CRTL_RX_WARNING; + + if ((priv->shadow_statflg & MSCAN_TSTAT_MSK) < + (canrflg & MSCAN_TSTAT_MSK)) + frame->data[1] |= CAN_ERR_CRTL_TX_WARNING; + break; + case CAN_STATE_ERROR_PASSIVE: + frame->can_id |= CAN_ERR_CRTL; + priv->can.can_stats.error_passive++; + frame->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; + break; + case CAN_STATE_BUS_OFF: + frame->can_id |= CAN_ERR_BUSOFF; + /* + * The MSCAN on the MPC5200 does recover from bus-off + * automatically. To avoid that we stop the chip doing + * a light-weight stop (we are in irq-context). + */ + out_8(®s->cantier, 0); + out_8(®s->canrier, 0); + out_8(®s->canctl0, in_8(®s->canctl0) | + MSCAN_SLPRQ | MSCAN_INITRQ); + can_bus_off(dev); + break; + default: + break; + } + } + priv->shadow_statflg = canrflg & MSCAN_STAT_MSK; + frame->can_dlc = CAN_ERR_DLC; + out_8(®s->canrflg, MSCAN_ERR_IF); +} + +static int mscan_rx_poll(struct napi_struct *napi, int quota) +{ + struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi); + struct net_device *dev = napi->dev; + struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct net_device_stats *stats = &dev->stats; + int npackets = 0; + int ret = 1; + struct sk_buff *skb; + struct can_frame *frame; + u8 canrflg; + + while (npackets < quota && ((canrflg = in_8(®s->canrflg)) & + (MSCAN_RXF | MSCAN_ERR_IF))) { + + skb = alloc_can_skb(dev, &frame); + if (!skb) { + if (printk_ratelimit()) + dev_notice(dev->dev.parent, "packet dropped\n"); + stats->rx_dropped++; + out_8(®s->canrflg, canrflg); + continue; + } + + if (canrflg & MSCAN_RXF) + mscan_get_rx_frame(dev, frame); + else if (canrflg & MSCAN_ERR_IF) + mscan_get_err_frame(dev, frame, canrflg); + + stats->rx_packets++; + stats->rx_bytes += frame->can_dlc; + npackets++; + netif_receive_skb(skb); + } + + if (!(in_8(®s->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) { + napi_complete(&priv->napi); + clear_bit(F_RX_PROGRESS, &priv->flags); + if (priv->can.state < CAN_STATE_BUS_OFF) + out_8(®s->canrier, priv->shadow_canrier); + ret = 0; + } + return ret; +} + +static irqreturn_t mscan_isr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct mscan_priv *priv = netdev_priv(dev); + struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct net_device_stats *stats = &dev->stats; + u8 cantier, cantflg, canrflg; + irqreturn_t ret = IRQ_NONE; + + cantier = in_8(®s->cantier) & MSCAN_TXE; + cantflg = in_8(®s->cantflg) & cantier; + + if (cantier && cantflg) { + + struct list_head *tmp, *pos; + + list_for_each_safe(pos, tmp, &priv->tx_head) { + struct tx_queue_entry *entry = + list_entry(pos, struct tx_queue_entry, list); + u8 mask = entry->mask; + + if (!(cantflg & mask)) + continue; + + out_8(®s->cantbsel, mask); + stats->tx_bytes += in_8(®s->tx.dlr); + stats->tx_packets++; + can_get_echo_skb(dev, entry->id); + priv->tx_active &= ~mask; + list_del(pos); + } + + if (list_empty(&priv->tx_head)) { + clear_bit(F_TX_WAIT_ALL, &priv->flags); + clear_bit(F_TX_PROGRESS, &priv->flags); + priv->cur_pri = 0; + } else + dev->trans_start = jiffies; + + if (!test_bit(F_TX_WAIT_ALL, &priv->flags)) + netif_wake_queue(dev); + + out_8(®s->cantier, priv->tx_active); + ret = IRQ_HANDLED; + } + + canrflg = in_8(®s->canrflg); + if ((canrflg & ~MSCAN_STAT_MSK) && + !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) { + if (canrflg & ~MSCAN_STAT_MSK) { + priv->shadow_canrier = in_8(®s->canrier); + out_8(®s->canrier, 0); + napi_schedule(&priv->napi); + ret = IRQ_HANDLED; + } else + clear_bit(F_RX_PROGRESS, &priv->flags); + } + return ret; +} + +static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode) +{ + + struct mscan_priv *priv = netdev_priv(dev); + int ret = 0; + + if (!priv->open_time) + return -EINVAL; + + switch (mode) { + case CAN_MODE_SLEEP: + case CAN_MODE_STOP: + netif_stop_queue(dev); + mscan_set_mode(dev, + (mode == + CAN_MODE_STOP) ? MSCAN_INIT_MODE : + MSCAN_SLEEP_MODE); + break; + case CAN_MODE_START: + if (priv->can.state <= CAN_STATE_BUS_OFF) + mscan_set_mode(dev, MSCAN_INIT_MODE); + ret = mscan_start(dev); + if (ret) + break; + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + break; + + default: + ret = -EOPNOTSUPP; + break; + } + return ret; +} + +static int mscan_do_set_bittiming(struct net_device *dev) +{ + struct mscan_priv *priv = netdev_priv(dev); + struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + struct can_bittiming *bt = &priv->can.bittiming; + u8 btr0, btr1; + + btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw); + btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) | + BTR1_SET_TSEG2(bt->phase_seg2) | + BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)); + + dev_info(dev->dev.parent, "setting BTR0=0x%02x BTR1=0x%02x\n", + btr0, btr1); + + out_8(®s->canbtr0, btr0); + out_8(®s->canbtr1, btr1); + + return 0; +} + +static int mscan_open(struct net_device *dev) +{ + int ret; + struct mscan_priv *priv = netdev_priv(dev); + struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + + /* common open */ + ret = open_candev(dev); + if (ret) + return ret; + + napi_enable(&priv->napi); + + ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev); + if (ret < 0) { + napi_disable(&priv->napi); + printk(KERN_ERR "%s - failed to attach interrupt\n", + dev->name); + return ret; + } + + priv->open_time = jiffies; + + out_8(®s->canctl1, in_8(®s->canctl1) & ~MSCAN_LISTEN); + + ret = mscan_start(dev); + if (ret) + return ret; + + netif_start_queue(dev); + + return 0; +} + +static int mscan_close(struct net_device *dev) +{ + struct mscan_priv *priv = netdev_priv(dev); + struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + + netif_stop_queue(dev); + napi_disable(&priv->napi); + + out_8(®s->cantier, 0); + out_8(®s->canrier, 0); + mscan_set_mode(dev, MSCAN_INIT_MODE); + close_candev(dev); + free_irq(dev->irq, dev); + priv->open_time = 0; + + return 0; +} + +static const struct net_device_ops mscan_netdev_ops = { + .ndo_open = mscan_open, + .ndo_stop = mscan_close, + .ndo_start_xmit = mscan_start_xmit, +}; + +int register_mscandev(struct net_device *dev, int clock_src) +{ + struct mscan_priv *priv = netdev_priv(dev); + struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + u8 ctl1; + + ctl1 = in_8(®s->canctl1); + if (clock_src) + ctl1 |= MSCAN_CLKSRC; + else + ctl1 &= ~MSCAN_CLKSRC; + + ctl1 |= MSCAN_CANE; + out_8(®s->canctl1, ctl1); + udelay(100); + + /* acceptance mask/acceptance code (accept everything) */ + out_be16(®s->canidar1_0, 0); + out_be16(®s->canidar3_2, 0); + out_be16(®s->canidar5_4, 0); + out_be16(®s->canidar7_6, 0); + + out_be16(®s->canidmr1_0, 0xffff); + out_be16(®s->canidmr3_2, 0xffff); + out_be16(®s->canidmr5_4, 0xffff); + out_be16(®s->canidmr7_6, 0xffff); + /* Two 32 bit Acceptance Filters */ + out_8(®s->canidac, MSCAN_AF_32BIT); + + mscan_set_mode(dev, MSCAN_INIT_MODE); + + return register_candev(dev); +} +EXPORT_SYMBOL_GPL(register_mscandev); + +void unregister_mscandev(struct net_device *dev) +{ + struct mscan_priv *priv = netdev_priv(dev); + struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; + mscan_set_mode(dev, MSCAN_INIT_MODE); + out_8(®s->canctl1, in_8(®s->canctl1) & ~MSCAN_CANE); + unregister_candev(dev); +} +EXPORT_SYMBOL_GPL(unregister_mscandev); + +struct net_device *alloc_mscandev(void) +{ + struct net_device *dev; + struct mscan_priv *priv; + int i; + + dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX); + if (!dev) + return NULL; + priv = netdev_priv(dev); + + dev->netdev_ops = &mscan_netdev_ops; + + dev->flags |= IFF_ECHO; /* we support local echo */ + + netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8); + + priv->can.bittiming_const = &mscan_bittiming_const; + priv->can.do_set_bittiming = mscan_do_set_bittiming; + priv->can.do_set_mode = mscan_do_set_mode; + + for (i = 0; i < TX_QUEUE_SIZE; i++) { + priv->tx_queue[i].id = i; + priv->tx_queue[i].mask = 1 << i; + } + + return dev; +} +EXPORT_SYMBOL_GPL(alloc_mscandev); + +MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips"); diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h new file mode 100644 index 00000000000..57820f5fb81 --- /dev/null +++ b/drivers/net/can/mscan/mscan.h @@ -0,0 +1,262 @@ +/* + * Definitions of consts/structs to drive the Freescale MSCAN. + * + * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>, + * Varma Electronics Oy + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __MSCAN_H__ +#define __MSCAN_H__ + +#include <linux/types.h> + +/* MSCAN control register 0 (CANCTL0) bits */ +#define MSCAN_RXFRM 0x80 +#define MSCAN_RXACT 0x40 +#define MSCAN_CSWAI 0x20 +#define MSCAN_SYNCH 0x10 +#define MSCAN_TIME 0x08 +#define MSCAN_WUPE 0x04 +#define MSCAN_SLPRQ 0x02 +#define MSCAN_INITRQ 0x01 + +/* MSCAN control register 1 (CANCTL1) bits */ +#define MSCAN_CANE 0x80 +#define MSCAN_CLKSRC 0x40 +#define MSCAN_LOOPB 0x20 +#define MSCAN_LISTEN 0x10 +#define MSCAN_WUPM 0x04 +#define MSCAN_SLPAK 0x02 +#define MSCAN_INITAK 0x01 + +/* Use the MPC5200 MSCAN variant? */ +#ifdef CONFIG_PPC +#define MSCAN_FOR_MPC5200 +#endif + +#ifdef MSCAN_FOR_MPC5200 +#define MSCAN_CLKSRC_BUS 0 +#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC +#else +#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC +#define MSCAN_CLKSRC_XTAL 0 +#endif + +/* MSCAN receiver flag register (CANRFLG) bits */ +#define MSCAN_WUPIF 0x80 +#define MSCAN_CSCIF 0x40 +#define MSCAN_RSTAT1 0x20 +#define MSCAN_RSTAT0 0x10 +#define MSCAN_TSTAT1 0x08 +#define MSCAN_TSTAT0 0x04 +#define MSCAN_OVRIF 0x02 +#define MSCAN_RXF 0x01 +#define MSCAN_ERR_IF (MSCAN_OVRIF | MSCAN_CSCIF) +#define MSCAN_RSTAT_MSK (MSCAN_RSTAT1 | MSCAN_RSTAT0) +#define MSCAN_TSTAT_MSK (MSCAN_TSTAT1 | MSCAN_TSTAT0) +#define MSCAN_STAT_MSK (MSCAN_RSTAT_MSK | MSCAN_TSTAT_MSK) + +#define MSCAN_STATE_BUS_OFF (MSCAN_RSTAT1 | MSCAN_RSTAT0 | \ + MSCAN_TSTAT1 | MSCAN_TSTAT0) +#define MSCAN_STATE_TX(canrflg) (((canrflg)&MSCAN_TSTAT_MSK)>>2) +#define MSCAN_STATE_RX(canrflg) (((canrflg)&MSCAN_RSTAT_MSK)>>4) +#define MSCAN_STATE_ACTIVE 0 +#define MSCAN_STATE_WARNING 1 +#define MSCAN_STATE_PASSIVE 2 +#define MSCAN_STATE_BUSOFF 3 + +/* MSCAN receiver interrupt enable register (CANRIER) bits */ +#define MSCAN_WUPIE 0x80 +#define MSCAN_CSCIE 0x40 +#define MSCAN_RSTATE1 0x20 +#define MSCAN_RSTATE0 0x10 +#define MSCAN_TSTATE1 0x08 +#define MSCAN_TSTATE0 0x04 +#define MSCAN_OVRIE 0x02 +#define MSCAN_RXFIE 0x01 + +/* MSCAN transmitter flag register (CANTFLG) bits */ +#define MSCAN_TXE2 0x04 +#define MSCAN_TXE1 0x02 +#define MSCAN_TXE0 0x01 +#define MSCAN_TXE (MSCAN_TXE2 | MSCAN_TXE1 | MSCAN_TXE0) + +/* MSCAN transmitter interrupt enable register (CANTIER) bits */ +#define MSCAN_TXIE2 0x04 +#define MSCAN_TXIE1 0x02 +#define MSCAN_TXIE0 0x01 +#define MSCAN_TXIE (MSCAN_TXIE2 | MSCAN_TXIE1 | MSCAN_TXIE0) + +/* MSCAN transmitter message abort request (CANTARQ) bits */ +#define MSCAN_ABTRQ2 0x04 +#define MSCAN_ABTRQ1 0x02 +#define MSCAN_ABTRQ0 0x01 + +/* MSCAN transmitter message abort ack (CANTAAK) bits */ +#define MSCAN_ABTAK2 0x04 +#define MSCAN_ABTAK1 0x02 +#define MSCAN_ABTAK0 0x01 + +/* MSCAN transmit buffer selection (CANTBSEL) bits */ +#define MSCAN_TX2 0x04 +#define MSCAN_TX1 0x02 +#define MSCAN_TX0 0x01 + +/* MSCAN ID acceptance control register (CANIDAC) bits */ +#define MSCAN_IDAM1 0x20 +#define MSCAN_IDAM0 0x10 +#define MSCAN_IDHIT2 0x04 +#define MSCAN_IDHIT1 0x02 +#define MSCAN_IDHIT0 0x01 + +#define MSCAN_AF_32BIT 0x00 +#define MSCAN_AF_16BIT MSCAN_IDAM0 +#define MSCAN_AF_8BIT MSCAN_IDAM1 +#define MSCAN_AF_CLOSED (MSCAN_IDAM0|MSCAN_IDAM1) +#define MSCAN_AF_MASK (~(MSCAN_IDAM0|MSCAN_IDAM1)) + +/* MSCAN Miscellaneous Register (CANMISC) bits */ +#define MSCAN_BOHOLD 0x01 + +#ifdef MSCAN_FOR_MPC5200 +#define _MSCAN_RESERVED_(n, num) u8 _res##n[num] +#define _MSCAN_RESERVED_DSR_SIZE 2 +#else +#define _MSCAN_RESERVED_(n, num) +#define _MSCAN_RESERVED_DSR_SIZE 0 +#endif + +/* Structure of the hardware registers */ +struct mscan_regs { + /* (see doc S12MSCANV3/D) MPC5200 MSCAN */ + u8 canctl0; /* + 0x00 0x00 */ + u8 canctl1; /* + 0x01 0x01 */ + _MSCAN_RESERVED_(1, 2); /* + 0x02 */ + u8 canbtr0; /* + 0x04 0x02 */ + u8 canbtr1; /* + 0x05 0x03 */ + _MSCAN_RESERVED_(2, 2); /* + 0x06 */ + u8 canrflg; /* + 0x08 0x04 */ + u8 canrier; /* + 0x09 0x05 */ + _MSCAN_RESERVED_(3, 2); /* + 0x0a */ + u8 cantflg; /* + 0x0c 0x06 */ + u8 cantier; /* + 0x0d 0x07 */ + _MSCAN_RESERVED_(4, 2); /* + 0x0e */ + u8 cantarq; /* + 0x10 0x08 */ + u8 cantaak; /* + 0x11 0x09 */ + _MSCAN_RESERVED_(5, 2); /* + 0x12 */ + u8 cantbsel; /* + 0x14 0x0a */ + u8 canidac; /* + 0x15 0x0b */ + u8 reserved; /* + 0x16 0x0c */ + _MSCAN_RESERVED_(6, 5); /* + 0x17 */ +#ifndef MSCAN_FOR_MPC5200 + u8 canmisc; /* 0x0d */ +#endif + u8 canrxerr; /* + 0x1c 0x0e */ + u8 cantxerr; /* + 0x1d 0x0f */ + _MSCAN_RESERVED_(7, 2); /* + 0x1e */ + u16 canidar1_0; /* + 0x20 0x10 */ + _MSCAN_RESERVED_(8, 2); /* + 0x22 */ + u16 canidar3_2; /* + 0x24 0x12 */ + _MSCAN_RESERVED_(9, 2); /* + 0x26 */ + u16 canidmr1_0; /* + 0x28 0x14 */ + _MSCAN_RESERVED_(10, 2); /* + 0x2a */ + u16 canidmr3_2; /* + 0x2c 0x16 */ + _MSCAN_RESERVED_(11, 2); /* + 0x2e */ + u16 canidar5_4; /* + 0x30 0x18 */ + _MSCAN_RESERVED_(12, 2); /* + 0x32 */ + u16 canidar7_6; /* + 0x34 0x1a */ + _MSCAN_RESERVED_(13, 2); /* + 0x36 */ + u16 canidmr5_4; /* + 0x38 0x1c */ + _MSCAN_RESERVED_(14, 2); /* + 0x3a */ + u16 canidmr7_6; /* + 0x3c 0x1e */ + _MSCAN_RESERVED_(15, 2); /* + 0x3e */ + struct { + u16 idr1_0; /* + 0x40 0x20 */ + _MSCAN_RESERVED_(16, 2); /* + 0x42 */ + u16 idr3_2; /* + 0x44 0x22 */ + _MSCAN_RESERVED_(17, 2); /* + 0x46 */ + u16 dsr1_0; /* + 0x48 0x24 */ + _MSCAN_RESERVED_(18, 2); /* + 0x4a */ + u16 dsr3_2; /* + 0x4c 0x26 */ + _MSCAN_RESERVED_(19, 2); /* + 0x4e */ + u16 dsr5_4; /* + 0x50 0x28 */ + _MSCAN_RESERVED_(20, 2); /* + 0x52 */ + u16 dsr7_6; /* + 0x54 0x2a */ + _MSCAN_RESERVED_(21, 2); /* + 0x56 */ + u8 dlr; /* + 0x58 0x2c */ + u8:8; /* + 0x59 0x2d */ + _MSCAN_RESERVED_(22, 2); /* + 0x5a */ + u16 time; /* + 0x5c 0x2e */ + } rx; + _MSCAN_RESERVED_(23, 2); /* + 0x5e */ + struct { + u16 idr1_0; /* + 0x60 0x30 */ + _MSCAN_RESERVED_(24, 2); /* + 0x62 */ + u16 idr3_2; /* + 0x64 0x32 */ + _MSCAN_RESERVED_(25, 2); /* + 0x66 */ + u16 dsr1_0; /* + 0x68 0x34 */ + _MSCAN_RESERVED_(26, 2); /* + 0x6a */ + u16 dsr3_2; /* + 0x6c 0x36 */ + _MSCAN_RESERVED_(27, 2); /* + 0x6e */ + u16 dsr5_4; /* + 0x70 0x38 */ + _MSCAN_RESERVED_(28, 2); /* + 0x72 */ + u16 dsr7_6; /* + 0x74 0x3a */ + _MSCAN_RESERVED_(29, 2); /* + 0x76 */ + u8 dlr; /* + 0x78 0x3c */ + u8 tbpr; /* + 0x79 0x3d */ + _MSCAN_RESERVED_(30, 2); /* + 0x7a */ + u16 time; /* + 0x7c 0x3e */ + } tx; + _MSCAN_RESERVED_(31, 2); /* + 0x7e */ +} __attribute__ ((packed)); + +#undef _MSCAN_RESERVED_ +#define MSCAN_REGION sizeof(struct mscan) + +#define TX_QUEUE_SIZE 3 + +struct tx_queue_entry { + struct list_head list; + u8 mask; + u8 id; +}; + +struct mscan_priv { + struct can_priv can; /* must be the first member */ + long open_time; + unsigned long flags; + void __iomem *reg_base; /* ioremap'ed address to registers */ + u8 shadow_statflg; + u8 shadow_canrier; + u8 cur_pri; + u8 prev_buf_id; + u8 tx_active; + + struct list_head tx_head; + struct tx_queue_entry tx_queue[TX_QUEUE_SIZE]; + struct napi_struct napi; +}; + +struct net_device *alloc_mscandev(void); +/* + * clock_src: + * 1 = The MSCAN clock source is the onchip Bus Clock. + * 0 = The MSCAN clock source is the chip Oscillator Clock. + */ +extern int register_mscandev(struct net_device *dev, int clock_src); +extern void unregister_mscandev(struct net_device *dev); + +#endif /* __MSCAN_H__ */ diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 16d2ecd2a3b..782a47fabf2 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -296,11 +296,9 @@ static void sja1000_rx(struct net_device *dev) uint8_t dlc; int i; - skb = dev_alloc_skb(sizeof(struct can_frame)); + skb = alloc_can_skb(dev, &cf); if (skb == NULL) return; - skb->dev = dev; - skb->protocol = htons(ETH_P_CAN); fi = priv->read_reg(priv, REG_FI); dlc = fi & 0x0F; @@ -323,8 +321,6 @@ static void sja1000_rx(struct net_device *dev) if (fi & FI_RTR) id |= CAN_RTR_FLAG; - cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame)); - memset(cf, 0, sizeof(struct can_frame)); cf->can_id = id; cf->can_dlc = dlc; for (i = 0; i < dlc; i++) @@ -351,15 +347,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) enum can_state state = priv->can.state; uint8_t ecc, alc; - skb = dev_alloc_skb(sizeof(struct can_frame)); + skb = alloc_can_err_skb(dev, &cf); if (skb == NULL) return -ENOMEM; - skb->dev = dev; - skb->protocol = htons(ETH_P_CAN); - cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame)); - memset(cf, 0, sizeof(struct can_frame)); - cf->can_id = CAN_ERR_FLAG; - cf->can_dlc = CAN_ERR_DLC; if (isrc & IRQ_DOI) { /* data overrun interrupt */ @@ -565,7 +555,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv) struct net_device *dev; struct sja1000_priv *priv; - dev = alloc_candev(sizeof(struct sja1000_priv) + sizeof_priv); + dev = alloc_candev(sizeof(struct sja1000_priv) + sizeof_priv, + SJA1000_ECHO_SKB_MAX); if (!dev) return NULL; diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h index 302d2c763ad..97a622b9302 100644 --- a/drivers/net/can/sja1000/sja1000.h +++ b/drivers/net/can/sja1000/sja1000.h @@ -50,6 +50,8 @@ #include <linux/can/dev.h> #include <linux/can/platform/sja1000.h> +#define SJA1000_ECHO_SKB_MAX 1 /* the SJA1000 has one TX buffer object */ + #define SJA1000_MAX_IRQ 20 /* max. number of interrupts handled in ISR */ /* SJA1000 registers - manual section 6.4 (Pelican Mode) */ diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c new file mode 100644 index 00000000000..07e8016b17e --- /dev/null +++ b/drivers/net/can/ti_hecc.c @@ -0,0 +1,993 @@ +/* + * TI HECC (CAN) device driver + * + * This driver supports TI's HECC (High End CAN Controller module) and the + * specs for the same is available at <http://www.ti.com> + * + * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed as is WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * Your platform definitions should specify module ram offsets and interrupt + * number to use as follows: + * + * static struct ti_hecc_platform_data am3517_evm_hecc_pdata = { + * .scc_hecc_offset = 0, + * .scc_ram_offset = 0x3000, + * .hecc_ram_offset = 0x3000, + * .mbx_offset = 0x2000, + * .int_line = 0, + * .revision = 1, + * }; + * + * Please see include/can/platform/ti_hecc.h for description of above fields + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/errno.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/platform_device.h> +#include <linux/clk.h> + +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> +#include <linux/can/platform/ti_hecc.h> + +#define DRV_NAME "ti_hecc" +#define HECC_MODULE_VERSION "0.7" +MODULE_VERSION(HECC_MODULE_VERSION); +#define DRV_DESC "TI High End CAN Controller Driver " HECC_MODULE_VERSION + +/* TX / RX Mailbox Configuration */ +#define HECC_MAX_MAILBOXES 32 /* hardware mailboxes - do not change */ +#define MAX_TX_PRIO 0x3F /* hardware value - do not change */ + +/* + * Important Note: TX mailbox configuration + * TX mailboxes should be restricted to the number of SKB buffers to avoid + * maintaining SKB buffers separately. TX mailboxes should be a power of 2 + * for the mailbox logic to work. Top mailbox numbers are reserved for RX + * and lower mailboxes for TX. + * + * HECC_MAX_TX_MBOX HECC_MB_TX_SHIFT + * 4 (default) 2 + * 8 3 + * 16 4 + */ +#define HECC_MB_TX_SHIFT 2 /* as per table above */ +#define HECC_MAX_TX_MBOX BIT(HECC_MB_TX_SHIFT) + +#define HECC_TX_PRIO_SHIFT (HECC_MB_TX_SHIFT) +#define HECC_TX_PRIO_MASK (MAX_TX_PRIO << HECC_MB_TX_SHIFT) +#define HECC_TX_MB_MASK (HECC_MAX_TX_MBOX - 1) +#define HECC_TX_MASK ((HECC_MAX_TX_MBOX - 1) | HECC_TX_PRIO_MASK) +#define HECC_TX_MBOX_MASK (~(BIT(HECC_MAX_TX_MBOX) - 1)) +#define HECC_DEF_NAPI_WEIGHT HECC_MAX_RX_MBOX + +/* + * Important Note: RX mailbox configuration + * RX mailboxes are further logically split into two - main and buffer + * mailboxes. The goal is to get all packets into main mailboxes as + * driven by mailbox number and receive priority (higher to lower) and + * buffer mailboxes are used to receive pkts while main mailboxes are being + * processed. This ensures in-order packet reception. + * + * Here are the recommended values for buffer mailbox. Note that RX mailboxes + * start after TX mailboxes: + * + * HECC_MAX_RX_MBOX HECC_RX_BUFFER_MBOX No of buffer mailboxes + * 28 12 8 + * 16 20 4 + */ + +#define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX) +#define HECC_RX_BUFFER_MBOX 12 /* as per table above */ +#define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1) +#define HECC_RX_HIGH_MBOX_MASK (~(BIT(HECC_RX_BUFFER_MBOX) - 1)) + +/* TI HECC module registers */ +#define HECC_CANME 0x0 /* Mailbox enable */ +#define HECC_CANMD 0x4 /* Mailbox direction */ +#define HECC_CANTRS 0x8 /* Transmit request set */ +#define HECC_CANTRR 0xC /* Transmit request */ +#define HECC_CANTA 0x10 /* Transmission acknowledge */ +#define HECC_CANAA 0x14 /* Abort acknowledge */ +#define HECC_CANRMP 0x18 /* Receive message pending */ +#define HECC_CANRML 0x1C /* Remote message lost */ +#define HECC_CANRFP 0x20 /* Remote frame pending */ +#define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */ +#define HECC_CANMC 0x28 /* Master control */ +#define HECC_CANBTC 0x2C /* Bit timing configuration */ +#define HECC_CANES 0x30 /* Error and status */ +#define HECC_CANTEC 0x34 /* Transmit error counter */ +#define HECC_CANREC 0x38 /* Receive error counter */ +#define HECC_CANGIF0 0x3C /* Global interrupt flag 0 */ +#define HECC_CANGIM 0x40 /* Global interrupt mask */ +#define HECC_CANGIF1 0x44 /* Global interrupt flag 1 */ +#define HECC_CANMIM 0x48 /* Mailbox interrupt mask */ +#define HECC_CANMIL 0x4C /* Mailbox interrupt level */ +#define HECC_CANOPC 0x50 /* Overwrite protection control */ +#define HECC_CANTIOC 0x54 /* Transmit I/O control */ +#define HECC_CANRIOC 0x58 /* Receive I/O control */ +#define HECC_CANLNT 0x5C /* HECC only: Local network time */ +#define HECC_CANTOC 0x60 /* HECC only: Time-out control */ +#define HECC_CANTOS 0x64 /* HECC only: Time-out status */ +#define HECC_CANTIOCE 0x68 /* SCC only:Enhanced TX I/O control */ +#define HECC_CANRIOCE 0x6C /* SCC only:Enhanced RX I/O control */ + +/* Mailbox registers */ +#define HECC_CANMID 0x0 +#define HECC_CANMCF 0x4 +#define HECC_CANMDL 0x8 +#define HECC_CANMDH 0xC + +#define HECC_SET_REG 0xFFFFFFFF +#define HECC_CANID_MASK 0x3FF /* 18 bits mask for extended id's */ +#define HECC_CCE_WAIT_COUNT 100 /* Wait for ~1 sec for CCE bit */ + +#define HECC_CANMC_SCM BIT(13) /* SCC compat mode */ +#define HECC_CANMC_CCR BIT(12) /* Change config request */ +#define HECC_CANMC_PDR BIT(11) /* Local Power down - for sleep mode */ +#define HECC_CANMC_ABO BIT(7) /* Auto Bus On */ +#define HECC_CANMC_STM BIT(6) /* Self test mode - loopback */ +#define HECC_CANMC_SRES BIT(5) /* Software reset */ + +#define HECC_CANTIOC_EN BIT(3) /* Enable CAN TX I/O pin */ +#define HECC_CANRIOC_EN BIT(3) /* Enable CAN RX I/O pin */ + +#define HECC_CANMID_IDE BIT(31) /* Extended frame format */ +#define HECC_CANMID_AME BIT(30) /* Acceptance mask enable */ +#define HECC_CANMID_AAM BIT(29) /* Auto answer mode */ + +#define HECC_CANES_FE BIT(24) /* form error */ +#define HECC_CANES_BE BIT(23) /* bit error */ +#define HECC_CANES_SA1 BIT(22) /* stuck at dominant error */ +#define HECC_CANES_CRCE BIT(21) /* CRC error */ +#define HECC_CANES_SE BIT(20) /* stuff bit error */ +#define HECC_CANES_ACKE BIT(19) /* ack error */ +#define HECC_CANES_BO BIT(18) /* Bus off status */ +#define HECC_CANES_EP BIT(17) /* Error passive status */ +#define HECC_CANES_EW BIT(16) /* Error warning status */ +#define HECC_CANES_SMA BIT(5) /* suspend mode ack */ +#define HECC_CANES_CCE BIT(4) /* Change config enabled */ +#define HECC_CANES_PDA BIT(3) /* Power down mode ack */ + +#define HECC_CANBTC_SAM BIT(7) /* sample points */ + +#define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\ + HECC_CANES_CRCE | HECC_CANES_SE |\ + HECC_CANES_ACKE) + +#define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */ + +#define HECC_CANGIF_MAIF BIT(17) /* Message alarm interrupt */ +#define HECC_CANGIF_TCOIF BIT(16) /* Timer counter overflow int */ +#define HECC_CANGIF_GMIF BIT(15) /* Global mailbox interrupt */ +#define HECC_CANGIF_AAIF BIT(14) /* Abort ack interrupt */ +#define HECC_CANGIF_WDIF BIT(13) /* Write denied interrupt */ +#define HECC_CANGIF_WUIF BIT(12) /* Wake up interrupt */ +#define HECC_CANGIF_RMLIF BIT(11) /* Receive message lost interrupt */ +#define HECC_CANGIF_BOIF BIT(10) /* Bus off interrupt */ +#define HECC_CANGIF_EPIF BIT(9) /* Error passive interrupt */ +#define HECC_CANGIF_WLIF BIT(8) /* Warning level interrupt */ +#define HECC_CANGIF_MBOX_MASK 0x1F /* Mailbox number mask */ +#define HECC_CANGIM_I1EN BIT(1) /* Int line 1 enable */ +#define HECC_CANGIM_I0EN BIT(0) /* Int line 0 enable */ +#define HECC_CANGIM_DEF_MASK 0x700 /* only busoff/warning/passive */ +#define HECC_CANGIM_SIL BIT(2) /* system interrupts to int line 1 */ + +/* CAN Bittiming constants as per HECC specs */ +static struct can_bittiming_const ti_hecc_bittiming_const = { + .name = DRV_NAME, + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + +struct ti_hecc_priv { + struct can_priv can; /* MUST be first member/field */ + struct napi_struct napi; + struct net_device *ndev; + struct clk *clk; + void __iomem *base; + u32 scc_ram_offset; + u32 hecc_ram_offset; + u32 mbx_offset; + u32 int_line; + spinlock_t mbx_lock; /* CANME register needs protection */ + u32 tx_head; + u32 tx_tail; + u32 rx_next; +}; + +static inline int get_tx_head_mb(struct ti_hecc_priv *priv) +{ + return priv->tx_head & HECC_TX_MB_MASK; +} + +static inline int get_tx_tail_mb(struct ti_hecc_priv *priv) +{ + return priv->tx_tail & HECC_TX_MB_MASK; +} + +static inline int get_tx_head_prio(struct ti_hecc_priv *priv) +{ + return (priv->tx_head >> HECC_TX_PRIO_SHIFT) & MAX_TX_PRIO; +} + +static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val) +{ + __raw_writel(val, priv->base + priv->hecc_ram_offset + mbxno * 4); +} + +static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno, + u32 reg, u32 val) +{ + __raw_writel(val, priv->base + priv->mbx_offset + mbxno * 0x10 + + reg); +} + +static inline u32 hecc_read_mbx(struct ti_hecc_priv *priv, u32 mbxno, u32 reg) +{ + return __raw_readl(priv->base + priv->mbx_offset + mbxno * 0x10 + + reg); +} + +static inline void hecc_write(struct ti_hecc_priv *priv, u32 reg, u32 val) +{ + __raw_writel(val, priv->base + reg); +} + +static inline u32 hecc_read(struct ti_hecc_priv *priv, int reg) +{ + return __raw_readl(priv->base + reg); +} + +static inline void hecc_set_bit(struct ti_hecc_priv *priv, int reg, + u32 bit_mask) +{ + hecc_write(priv, reg, hecc_read(priv, reg) | bit_mask); +} + +static inline void hecc_clear_bit(struct ti_hecc_priv *priv, int reg, + u32 bit_mask) +{ + hecc_write(priv, reg, hecc_read(priv, reg) & ~bit_mask); +} + +static inline u32 hecc_get_bit(struct ti_hecc_priv *priv, int reg, u32 bit_mask) +{ + return (hecc_read(priv, reg) & bit_mask) ? 1 : 0; +} + +static int ti_hecc_get_state(const struct net_device *ndev, + enum can_state *state) +{ + struct ti_hecc_priv *priv = netdev_priv(ndev); + + *state = priv->can.state; + return 0; +} + +static int ti_hecc_set_btc(struct ti_hecc_priv *priv) +{ + struct can_bittiming *bit_timing = &priv->can.bittiming; + u32 can_btc; + + can_btc = (bit_timing->phase_seg2 - 1) & 0x7; + can_btc |= ((bit_timing->phase_seg1 + bit_timing->prop_seg - 1) + & 0xF) << 3; + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) { + if (bit_timing->brp > 4) + can_btc |= HECC_CANBTC_SAM; + else + dev_warn(priv->ndev->dev.parent, "WARN: Triple" \ + "sampling not set due to h/w limitations"); + } + can_btc |= ((bit_timing->sjw - 1) & 0x3) << 8; + can_btc |= ((bit_timing->brp - 1) & 0xFF) << 16; + + /* ERM being set to 0 by default meaning resync at falling edge */ + + hecc_write(priv, HECC_CANBTC, can_btc); + dev_info(priv->ndev->dev.parent, "setting CANBTC=%#x\n", can_btc); + + return 0; +} + +static void ti_hecc_reset(struct net_device *ndev) +{ + u32 cnt; + struct ti_hecc_priv *priv = netdev_priv(ndev); + + dev_dbg(ndev->dev.parent, "resetting hecc ...\n"); + hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SRES); + + /* Set change control request and wait till enabled */ + hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR); + + /* + * INFO: It has been observed that at times CCE bit may not be + * set and hw seems to be ok even if this bit is not set so + * timing out with a timing of 1ms to respect the specs + */ + cnt = HECC_CCE_WAIT_COUNT; + while (!hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) { + --cnt; + udelay(10); + } + + /* + * Note: On HECC, BTC can be programmed only in initialization mode, so + * it is expected that the can bittiming parameters are set via ip + * utility before the device is opened + */ + ti_hecc_set_btc(priv); + + /* Clear CCR (and CANMC register) and wait for CCE = 0 enable */ + hecc_write(priv, HECC_CANMC, 0); + + /* + * INFO: CAN net stack handles bus off and hence disabling auto-bus-on + * hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_ABO); + */ + + /* + * INFO: It has been observed that at times CCE bit may not be + * set and hw seems to be ok even if this bit is not set so + */ + cnt = HECC_CCE_WAIT_COUNT; + while (hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) { + --cnt; + udelay(10); + } + + /* Enable TX and RX I/O Control pins */ + hecc_write(priv, HECC_CANTIOC, HECC_CANTIOC_EN); + hecc_write(priv, HECC_CANRIOC, HECC_CANRIOC_EN); + + /* Clear registers for clean operation */ + hecc_write(priv, HECC_CANTA, HECC_SET_REG); + hecc_write(priv, HECC_CANRMP, HECC_SET_REG); + hecc_write(priv, HECC_CANGIF0, HECC_SET_REG); + hecc_write(priv, HECC_CANGIF1, HECC_SET_REG); + hecc_write(priv, HECC_CANME, 0); + hecc_write(priv, HECC_CANMD, 0); + + /* SCC compat mode NOT supported (and not needed too) */ + hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SCM); +} + +static void ti_hecc_start(struct net_device *ndev) +{ + struct ti_hecc_priv *priv = netdev_priv(ndev); + u32 cnt, mbxno, mbx_mask; + + /* put HECC in initialization mode and set btc */ + ti_hecc_reset(ndev); + + priv->tx_head = priv->tx_tail = HECC_TX_MASK; + priv->rx_next = HECC_RX_FIRST_MBOX; + + /* Enable local and global acceptance mask registers */ + hecc_write(priv, HECC_CANGAM, HECC_SET_REG); + + /* Prepare configured mailboxes to receive messages */ + for (cnt = 0; cnt < HECC_MAX_RX_MBOX; cnt++) { + mbxno = HECC_MAX_MAILBOXES - 1 - cnt; + mbx_mask = BIT(mbxno); + hecc_clear_bit(priv, HECC_CANME, mbx_mask); + hecc_write_mbx(priv, mbxno, HECC_CANMID, HECC_CANMID_AME); + hecc_write_lam(priv, mbxno, HECC_SET_REG); + hecc_set_bit(priv, HECC_CANMD, mbx_mask); + hecc_set_bit(priv, HECC_CANME, mbx_mask); + hecc_set_bit(priv, HECC_CANMIM, mbx_mask); + } + + /* Prevent message over-write & Enable interrupts */ + hecc_write(priv, HECC_CANOPC, HECC_SET_REG); + if (priv->int_line) { + hecc_write(priv, HECC_CANMIL, HECC_SET_REG); + hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK | + HECC_CANGIM_I1EN | HECC_CANGIM_SIL); + } else { + hecc_write(priv, HECC_CANMIL, 0); + hecc_write(priv, HECC_CANGIM, + HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN); + } + priv->can.state = CAN_STATE_ERROR_ACTIVE; +} + +static void ti_hecc_stop(struct net_device *ndev) +{ + struct ti_hecc_priv *priv = netdev_priv(ndev); + + /* Disable interrupts and disable mailboxes */ + hecc_write(priv, HECC_CANGIM, 0); + hecc_write(priv, HECC_CANMIM, 0); + hecc_write(priv, HECC_CANME, 0); + priv->can.state = CAN_STATE_STOPPED; +} + +static int ti_hecc_do_set_mode(struct net_device *ndev, enum can_mode mode) +{ + int ret = 0; + + switch (mode) { + case CAN_MODE_START: + ti_hecc_start(ndev); + netif_wake_queue(ndev); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +/* + * ti_hecc_xmit: HECC Transmit + * + * The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the + * priority of the mailbox for tranmission is dependent upon priority setting + * field in mailbox registers. The mailbox with highest value in priority field + * is transmitted first. Only when two mailboxes have the same value in + * priority field the highest numbered mailbox is transmitted first. + * + * To utilize the HECC priority feature as described above we start with the + * highest numbered mailbox with highest priority level and move on to the next + * mailbox with the same priority level and so on. Once we loop through all the + * transmit mailboxes we choose the next priority level (lower) and so on + * until we reach the lowest priority level on the lowest numbered mailbox + * when we stop transmission until all mailboxes are transmitted and then + * restart at highest numbered mailbox with highest priority. + * + * Two counters (head and tail) are used to track the next mailbox to transmit + * and to track the echo buffer for already transmitted mailbox. The queue + * is stopped when all the mailboxes are busy or when there is a priority + * value roll-over happens. + */ +static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct ti_hecc_priv *priv = netdev_priv(ndev); + struct can_frame *cf = (struct can_frame *)skb->data; + u32 mbxno, mbx_mask, data; + unsigned long flags; + + mbxno = get_tx_head_mb(priv); + mbx_mask = BIT(mbxno); + spin_lock_irqsave(&priv->mbx_lock, flags); + if (unlikely(hecc_read(priv, HECC_CANME) & mbx_mask)) { + spin_unlock_irqrestore(&priv->mbx_lock, flags); + netif_stop_queue(ndev); + dev_err(priv->ndev->dev.parent, + "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n", + priv->tx_head, priv->tx_tail); + return NETDEV_TX_BUSY; + } + spin_unlock_irqrestore(&priv->mbx_lock, flags); + + /* Prepare mailbox for transmission */ + data = min_t(u8, cf->can_dlc, 8); + if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ + data |= HECC_CANMCF_RTR; + data |= get_tx_head_prio(priv) << 8; + hecc_write_mbx(priv, mbxno, HECC_CANMCF, data); + + if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ + data = (cf->can_id & CAN_EFF_MASK) | HECC_CANMID_IDE; + else /* Standard frame format */ + data = (cf->can_id & CAN_SFF_MASK) << 18; + hecc_write_mbx(priv, mbxno, HECC_CANMID, data); + hecc_write_mbx(priv, mbxno, HECC_CANMDL, + be32_to_cpu(*(u32 *)(cf->data))); + if (cf->can_dlc > 4) + hecc_write_mbx(priv, mbxno, HECC_CANMDH, + be32_to_cpu(*(u32 *)(cf->data + 4))); + else + *(u32 *)(cf->data + 4) = 0; + can_put_echo_skb(skb, ndev, mbxno); + + spin_lock_irqsave(&priv->mbx_lock, flags); + --priv->tx_head; + if ((hecc_read(priv, HECC_CANME) & BIT(get_tx_head_mb(priv))) || + (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) { + netif_stop_queue(ndev); + } + hecc_set_bit(priv, HECC_CANME, mbx_mask); + spin_unlock_irqrestore(&priv->mbx_lock, flags); + + hecc_clear_bit(priv, HECC_CANMD, mbx_mask); + hecc_set_bit(priv, HECC_CANMIM, mbx_mask); + hecc_write(priv, HECC_CANTRS, mbx_mask); + + return NETDEV_TX_OK; +} + +static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + u32 data, mbx_mask; + unsigned long flags; + + skb = alloc_can_skb(priv->ndev, &cf); + if (!skb) { + if (printk_ratelimit()) + dev_err(priv->ndev->dev.parent, + "ti_hecc_rx_pkt: alloc_can_skb() failed\n"); + return -ENOMEM; + } + + mbx_mask = BIT(mbxno); + data = hecc_read_mbx(priv, mbxno, HECC_CANMID); + if (data & HECC_CANMID_IDE) + cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG; + else + cf->can_id = (data >> 18) & CAN_SFF_MASK; + data = hecc_read_mbx(priv, mbxno, HECC_CANMCF); + if (data & HECC_CANMCF_RTR) + cf->can_id |= CAN_RTR_FLAG; + cf->can_dlc = data & 0xF; + data = hecc_read_mbx(priv, mbxno, HECC_CANMDL); + *(u32 *)(cf->data) = cpu_to_be32(data); + if (cf->can_dlc > 4) { + data = hecc_read_mbx(priv, mbxno, HECC_CANMDH); + *(u32 *)(cf->data + 4) = cpu_to_be32(data); + } else { + *(u32 *)(cf->data + 4) = 0; + } + spin_lock_irqsave(&priv->mbx_lock, flags); + hecc_clear_bit(priv, HECC_CANME, mbx_mask); + hecc_write(priv, HECC_CANRMP, mbx_mask); + /* enable mailbox only if it is part of rx buffer mailboxes */ + if (priv->rx_next < HECC_RX_BUFFER_MBOX) + hecc_set_bit(priv, HECC_CANME, mbx_mask); + spin_unlock_irqrestore(&priv->mbx_lock, flags); + + stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); + stats->rx_packets++; + + return 0; +} + +/* + * ti_hecc_rx_poll - HECC receive pkts + * + * The receive mailboxes start from highest numbered mailbox till last xmit + * mailbox. On CAN frame reception the hardware places the data into highest + * numbered mailbox that matches the CAN ID filter. Since all receive mailboxes + * have same filtering (ALL CAN frames) packets will arrive in the highest + * available RX mailbox and we need to ensure in-order packet reception. + * + * To ensure the packets are received in the right order we logically divide + * the RX mailboxes into main and buffer mailboxes. Packets are received as per + * mailbox priotity (higher to lower) in the main bank and once it is full we + * disable further reception into main mailboxes. While the main mailboxes are + * processed in NAPI, further packets are received in buffer mailboxes. + * + * We maintain a RX next mailbox counter to process packets and once all main + * mailboxe packets are passed to the upper stack we enable all of them but + * continue to process packets received in buffer mailboxes. With each packet + * received from buffer mailbox we enable it immediately so as to handle the + * overflow from higher mailboxes. + */ +static int ti_hecc_rx_poll(struct napi_struct *napi, int quota) +{ + struct net_device *ndev = napi->dev; + struct ti_hecc_priv *priv = netdev_priv(ndev); + u32 num_pkts = 0; + u32 mbx_mask; + unsigned long pending_pkts, flags; + + if (!netif_running(ndev)) + return 0; + + while ((pending_pkts = hecc_read(priv, HECC_CANRMP)) && + num_pkts < quota) { + mbx_mask = BIT(priv->rx_next); /* next rx mailbox to process */ + if (mbx_mask & pending_pkts) { + if (ti_hecc_rx_pkt(priv, priv->rx_next) < 0) + return num_pkts; + ++num_pkts; + } else if (priv->rx_next > HECC_RX_BUFFER_MBOX) { + break; /* pkt not received yet */ + } + --priv->rx_next; + if (priv->rx_next == HECC_RX_BUFFER_MBOX) { + /* enable high bank mailboxes */ + spin_lock_irqsave(&priv->mbx_lock, flags); + mbx_mask = hecc_read(priv, HECC_CANME); + mbx_mask |= HECC_RX_HIGH_MBOX_MASK; + hecc_write(priv, HECC_CANME, mbx_mask); + spin_unlock_irqrestore(&priv->mbx_lock, flags); + } else if (priv->rx_next == HECC_MAX_TX_MBOX - 1) { + priv->rx_next = HECC_RX_FIRST_MBOX; + break; + } + } + + /* Enable packet interrupt if all pkts are handled */ + if (hecc_read(priv, HECC_CANRMP) == 0) { + napi_complete(napi); + /* Re-enable RX mailbox interrupts */ + mbx_mask = hecc_read(priv, HECC_CANMIM); + mbx_mask |= HECC_TX_MBOX_MASK; + hecc_write(priv, HECC_CANMIM, mbx_mask); + } + + return num_pkts; +} + +static int ti_hecc_error(struct net_device *ndev, int int_status, + int err_status) +{ + struct ti_hecc_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + /* propogate the error condition to the can stack */ + skb = alloc_can_err_skb(ndev, &cf); + if (!skb) { + if (printk_ratelimit()) + dev_err(priv->ndev->dev.parent, + "ti_hecc_error: alloc_can_err_skb() failed\n"); + return -ENOMEM; + } + + if (int_status & HECC_CANGIF_WLIF) { /* warning level int */ + if ((int_status & HECC_CANGIF_BOIF) == 0) { + priv->can.state = CAN_STATE_ERROR_WARNING; + ++priv->can.can_stats.error_warning; + cf->can_id |= CAN_ERR_CRTL; + if (hecc_read(priv, HECC_CANTEC) > 96) + cf->data[1] |= CAN_ERR_CRTL_TX_WARNING; + if (hecc_read(priv, HECC_CANREC) > 96) + cf->data[1] |= CAN_ERR_CRTL_RX_WARNING; + } + hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW); + dev_dbg(priv->ndev->dev.parent, "Error Warning interrupt\n"); + hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR); + } + + if (int_status & HECC_CANGIF_EPIF) { /* error passive int */ + if ((int_status & HECC_CANGIF_BOIF) == 0) { + priv->can.state = CAN_STATE_ERROR_PASSIVE; + ++priv->can.can_stats.error_passive; + cf->can_id |= CAN_ERR_CRTL; + if (hecc_read(priv, HECC_CANTEC) > 127) + cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; + if (hecc_read(priv, HECC_CANREC) > 127) + cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; + } + hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP); + dev_dbg(priv->ndev->dev.parent, "Error passive interrupt\n"); + hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR); + } + + /* + * Need to check busoff condition in error status register too to + * ensure warning interrupts don't hog the system + */ + if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) { + priv->can.state = CAN_STATE_BUS_OFF; + cf->can_id |= CAN_ERR_BUSOFF; + hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO); + hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR); + /* Disable all interrupts in bus-off to avoid int hog */ + hecc_write(priv, HECC_CANGIM, 0); + can_bus_off(ndev); + } + + if (err_status & HECC_BUS_ERROR) { + ++priv->can.can_stats.bus_error; + cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; + cf->data[2] |= CAN_ERR_PROT_UNSPEC; + if (err_status & HECC_CANES_FE) { + hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE); + cf->data[2] |= CAN_ERR_PROT_FORM; + } + if (err_status & HECC_CANES_BE) { + hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE); + cf->data[2] |= CAN_ERR_PROT_BIT; + } + if (err_status & HECC_CANES_SE) { + hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE); + cf->data[2] |= CAN_ERR_PROT_STUFF; + } + if (err_status & HECC_CANES_CRCE) { + hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); + cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | + CAN_ERR_PROT_LOC_CRC_DEL; + } + if (err_status & HECC_CANES_ACKE) { + hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); + cf->data[2] |= CAN_ERR_PROT_LOC_ACK | + CAN_ERR_PROT_LOC_ACK_DEL; + } + } + + netif_receive_skb(skb); + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + return 0; +} + +static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = (struct net_device *)dev_id; + struct ti_hecc_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + u32 mbxno, mbx_mask, int_status, err_status; + unsigned long ack, flags; + + int_status = hecc_read(priv, + (priv->int_line) ? HECC_CANGIF1 : HECC_CANGIF0); + + if (!int_status) + return IRQ_NONE; + + err_status = hecc_read(priv, HECC_CANES); + if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO | + HECC_CANES_EP | HECC_CANES_EW)) + ti_hecc_error(ndev, int_status, err_status); + + if (int_status & HECC_CANGIF_GMIF) { + while (priv->tx_tail - priv->tx_head > 0) { + mbxno = get_tx_tail_mb(priv); + mbx_mask = BIT(mbxno); + if (!(mbx_mask & hecc_read(priv, HECC_CANTA))) + break; + hecc_clear_bit(priv, HECC_CANMIM, mbx_mask); + hecc_write(priv, HECC_CANTA, mbx_mask); + spin_lock_irqsave(&priv->mbx_lock, flags); + hecc_clear_bit(priv, HECC_CANME, mbx_mask); + spin_unlock_irqrestore(&priv->mbx_lock, flags); + stats->tx_bytes += hecc_read_mbx(priv, mbxno, + HECC_CANMCF) & 0xF; + stats->tx_packets++; + can_get_echo_skb(ndev, mbxno); + --priv->tx_tail; + } + + /* restart queue if wrap-up or if queue stalled on last pkt */ + if (((priv->tx_head == priv->tx_tail) && + ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) || + (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) && + ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK))) + netif_wake_queue(ndev); + + /* Disable RX mailbox interrupts and let NAPI reenable them */ + if (hecc_read(priv, HECC_CANRMP)) { + ack = hecc_read(priv, HECC_CANMIM); + ack &= BIT(HECC_MAX_TX_MBOX) - 1; + hecc_write(priv, HECC_CANMIM, ack); + napi_schedule(&priv->napi); + } + } + + /* clear all interrupt conditions - read back to avoid spurious ints */ + if (priv->int_line) { + hecc_write(priv, HECC_CANGIF1, HECC_SET_REG); + int_status = hecc_read(priv, HECC_CANGIF1); + } else { + hecc_write(priv, HECC_CANGIF0, HECC_SET_REG); + int_status = hecc_read(priv, HECC_CANGIF0); + } + + return IRQ_HANDLED; +} + +static int ti_hecc_open(struct net_device *ndev) +{ + struct ti_hecc_priv *priv = netdev_priv(ndev); + int err; + + err = request_irq(ndev->irq, ti_hecc_interrupt, IRQF_SHARED, + ndev->name, ndev); + if (err) { + dev_err(ndev->dev.parent, "error requesting interrupt\n"); + return err; + } + + /* Open common can device */ + err = open_candev(ndev); + if (err) { + dev_err(ndev->dev.parent, "open_candev() failed %d\n", err); + free_irq(ndev->irq, ndev); + return err; + } + + clk_enable(priv->clk); + ti_hecc_start(ndev); + napi_enable(&priv->napi); + netif_start_queue(ndev); + + return 0; +} + +static int ti_hecc_close(struct net_device *ndev) +{ + struct ti_hecc_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + napi_disable(&priv->napi); + ti_hecc_stop(ndev); + free_irq(ndev->irq, ndev); + clk_disable(priv->clk); + close_candev(ndev); + + return 0; +} + +static const struct net_device_ops ti_hecc_netdev_ops = { + .ndo_open = ti_hecc_open, + .ndo_stop = ti_hecc_close, + .ndo_start_xmit = ti_hecc_xmit, +}; + +static int ti_hecc_probe(struct platform_device *pdev) +{ + struct net_device *ndev = (struct net_device *)0; + struct ti_hecc_priv *priv; + struct ti_hecc_platform_data *pdata; + struct resource *mem, *irq; + void __iomem *addr; + int err = -ENODEV; + + pdata = pdev->dev.platform_data; + if (!pdata) { + dev_err(&pdev->dev, "No platform data\n"); + goto probe_exit; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "No mem resources\n"); + goto probe_exit; + } + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq) { + dev_err(&pdev->dev, "No irq resource\n"); + goto probe_exit; + } + if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) { + dev_err(&pdev->dev, "HECC region already claimed\n"); + err = -EBUSY; + goto probe_exit; + } + addr = ioremap(mem->start, resource_size(mem)); + if (!addr) { + dev_err(&pdev->dev, "ioremap failed\n"); + err = -ENOMEM; + goto probe_exit_free_region; + } + + ndev = alloc_candev(sizeof(struct ti_hecc_priv), HECC_MAX_TX_MBOX); + if (!ndev) { + dev_err(&pdev->dev, "alloc_candev failed\n"); + err = -ENOMEM; + goto probe_exit_iounmap; + } + + priv = netdev_priv(ndev); + priv->ndev = ndev; + priv->base = addr; + priv->scc_ram_offset = pdata->scc_ram_offset; + priv->hecc_ram_offset = pdata->hecc_ram_offset; + priv->mbx_offset = pdata->mbx_offset; + priv->int_line = pdata->int_line; + + priv->can.bittiming_const = &ti_hecc_bittiming_const; + priv->can.do_set_mode = ti_hecc_do_set_mode; + priv->can.do_get_state = ti_hecc_get_state; + + ndev->irq = irq->start; + ndev->flags |= IFF_ECHO; + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, &pdev->dev); + ndev->netdev_ops = &ti_hecc_netdev_ops; + + priv->clk = clk_get(&pdev->dev, "hecc_ck"); + if (IS_ERR(priv->clk)) { + dev_err(&pdev->dev, "No clock available\n"); + err = PTR_ERR(priv->clk); + priv->clk = NULL; + goto probe_exit_candev; + } + priv->can.clock.freq = clk_get_rate(priv->clk); + netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll, + HECC_DEF_NAPI_WEIGHT); + + err = register_candev(ndev); + if (err) { + dev_err(&pdev->dev, "register_candev() failed\n"); + goto probe_exit_clk; + } + dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n", + priv->base, (u32) ndev->irq); + + return 0; + +probe_exit_clk: + clk_put(priv->clk); +probe_exit_candev: + free_candev(ndev); +probe_exit_iounmap: + iounmap(addr); +probe_exit_free_region: + release_mem_region(mem->start, resource_size(mem)); +probe_exit: + return err; +} + +static int __devexit ti_hecc_remove(struct platform_device *pdev) +{ + struct resource *res; + struct net_device *ndev = platform_get_drvdata(pdev); + struct ti_hecc_priv *priv = netdev_priv(ndev); + + clk_put(priv->clk); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iounmap(priv->base); + release_mem_region(res->start, resource_size(res)); + unregister_candev(ndev); + free_candev(ndev); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +/* TI HECC netdevice driver: platform driver structure */ +static struct platform_driver ti_hecc_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = ti_hecc_probe, + .remove = __devexit_p(ti_hecc_remove), +}; + +static int __init ti_hecc_init_driver(void) +{ + printk(KERN_INFO DRV_DESC "\n"); + return platform_driver_register(&ti_hecc_driver); +} +module_init(ti_hecc_init_driver); + +static void __exit ti_hecc_exit_driver(void) +{ + printk(KERN_INFO DRV_DESC " unloaded\n"); + platform_driver_unregister(&ti_hecc_driver); +} +module_exit(ti_hecc_exit_driver); + +MODULE_AUTHOR("Anant Gole <anantgole@ti.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION(DRV_DESC); diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index abdbd9c2b78..3e4419054c8 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -232,7 +232,7 @@ MODULE_DEVICE_TABLE(usb, ems_usb_table); #define INTR_IN_BUFFER_SIZE 4 #define MAX_RX_URBS 10 -#define MAX_TX_URBS CAN_ECHO_SKB_MAX +#define MAX_TX_URBS 10 struct ems_usb; @@ -311,14 +311,10 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) int i; struct net_device_stats *stats = &dev->netdev->stats; - skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame)); + skb = alloc_can_skb(dev->netdev, &cf); if (skb == NULL) return; - skb->protocol = htons(ETH_P_CAN); - - cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame)); - cf->can_id = le32_to_cpu(msg->msg.can_msg.id); cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8); @@ -346,18 +342,10 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) struct sk_buff *skb; struct net_device_stats *stats = &dev->netdev->stats; - skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame)); + skb = alloc_can_err_skb(dev->netdev, &cf); if (skb == NULL) return; - skb->protocol = htons(ETH_P_CAN); - - cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame)); - memset(cf, 0, sizeof(struct can_frame)); - - cf->can_id = CAN_ERR_FLAG; - cf->can_dlc = CAN_ERR_DLC; - if (msg->type == CPC_MSG_TYPE_CAN_STATE) { u8 state = msg->msg.can_state; @@ -1015,7 +1003,7 @@ static int ems_usb_probe(struct usb_interface *intf, struct ems_usb *dev; int i, err = -ENOMEM; - netdev = alloc_candev(sizeof(struct ems_usb)); + netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS); if (!netdev) { dev_err(netdev->dev.parent, "Couldn't alloc candev\n"); return -ENOMEM; |