diff options
Diffstat (limited to 'drivers/tty')
88 files changed, 47077 insertions, 3624 deletions
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig new file mode 100644 index 00000000000..3fd7199301b --- /dev/null +++ b/drivers/tty/Kconfig @@ -0,0 +1,321 @@ +config VT + bool "Virtual terminal" if EXPERT + depends on !S390 + select INPUT + default y + ---help--- + If you say Y here, you will get support for terminal devices with + display and keyboard devices. These are called "virtual" because you + can run several virtual terminals (also called virtual consoles) on + one physical terminal. This is rather useful, for example one + virtual terminal can collect system messages and warnings, another + one can be used for a text-mode user session, and a third could run + an X session, all in parallel. Switching between virtual terminals + is done with certain key combinations, usually Alt-<function key>. + + The setterm command ("man setterm") can be used to change the + properties (such as colors or beeping) of a virtual terminal. The + man page console_codes(4) ("man console_codes") contains the special + character sequences that can be used to change those properties + directly. The fonts used on virtual terminals can be changed with + the setfont ("man setfont") command and the key bindings are defined + with the loadkeys ("man loadkeys") command. + + You need at least one virtual terminal device in order to make use + of your keyboard and monitor. Therefore, only people configuring an + embedded system would want to say N here in order to save some + memory; the only way to log into such a system is then via a serial + or network connection. + + If unsure, say Y, or else you won't be able to do much with your new + shiny Linux system :-) + +config CONSOLE_TRANSLATIONS + depends on VT + default y + bool "Enable character translations in console" if EXPERT + ---help--- + This enables support for font mapping and Unicode translation + on virtual consoles. + +config VT_CONSOLE + bool "Support for console on virtual terminal" if EXPERT + depends on VT + default y + ---help--- + The system console is the device which receives all kernel messages + and warnings and which allows logins in single user mode. If you + answer Y here, a virtual terminal (the device used to interact with + a physical terminal) can be used as system console. This is the most + common mode of operations, so you should say Y here unless you want + the kernel messages be output only to a serial port (in which case + you should say Y to "Console on serial port", below). + + If you do say Y here, by default the currently visible virtual + terminal (/dev/tty0) will be used as system console. You can change + that with a kernel command line option such as "console=tty3" which + would use the third virtual terminal as system console. (Try "man + bootparam" or see the documentation of your boot loader (lilo or + loadlin) about how to pass options to the kernel at boot time.) + + If unsure, say Y. + +config HW_CONSOLE + bool + depends on VT && !S390 && !UML + default y + +config VT_HW_CONSOLE_BINDING + bool "Support for binding and unbinding console drivers" + depends on HW_CONSOLE + default n + ---help--- + The virtual terminal is the device that interacts with the physical + terminal through console drivers. On these systems, at least one + console driver is loaded. In other configurations, additional console + drivers may be enabled, such as the framebuffer console. If more than + 1 console driver is enabled, setting this to 'y' will allow you to + select the console driver that will serve as the backend for the + virtual terminals. + + See <file:Documentation/console/console.txt> for more + information. For framebuffer console users, please refer to + <file:Documentation/fb/fbcon.txt>. + +config UNIX98_PTYS + bool "Unix98 PTY support" if EXPERT + default y + ---help--- + A pseudo terminal (PTY) is a software device consisting of two + halves: a master and a slave. The slave device behaves identical to + a physical terminal; the master device is used by a process to + read data from and write data to the slave, thereby emulating a + terminal. Typical programs for the master side are telnet servers + and xterms. + + Linux has traditionally used the BSD-like names /dev/ptyxx for + masters and /dev/ttyxx for slaves of pseudo terminals. This scheme + has a number of problems. The GNU C library glibc 2.1 and later, + however, supports the Unix98 naming standard: in order to acquire a + pseudo terminal, a process opens /dev/ptmx; the number of the pseudo + terminal is then made available to the process and the pseudo + terminal slave can be accessed as /dev/pts/<number>. What was + traditionally /dev/ttyp2 will then be /dev/pts/2, for example. + + All modern Linux systems use the Unix98 ptys. Say Y unless + you're on an embedded system and want to conserve memory. + +config DEVPTS_MULTIPLE_INSTANCES + bool "Support multiple instances of devpts" + depends on UNIX98_PTYS + default n + ---help--- + Enable support for multiple instances of devpts filesystem. + If you want to have isolated PTY namespaces (eg: in containers), + say Y here. Otherwise, say N. If enabled, each mount of devpts + filesystem with the '-o newinstance' option will create an + independent PTY namespace. + +config LEGACY_PTYS + bool "Legacy (BSD) PTY support" + default y + ---help--- + A pseudo terminal (PTY) is a software device consisting of two + halves: a master and a slave. The slave device behaves identical to + a physical terminal; the master device is used by a process to + read data from and write data to the slave, thereby emulating a + terminal. Typical programs for the master side are telnet servers + and xterms. + + Linux has traditionally used the BSD-like names /dev/ptyxx + for masters and /dev/ttyxx for slaves of pseudo + terminals. This scheme has a number of problems, including + security. This option enables these legacy devices; on most + systems, it is safe to say N. + + +config LEGACY_PTY_COUNT + int "Maximum number of legacy PTY in use" + depends on LEGACY_PTYS + range 0 256 + default "256" + ---help--- + The maximum number of legacy PTYs that can be used at any one time. + The default is 256, and should be more than enough. Embedded + systems may want to reduce this to save memory. + + When not in use, each legacy PTY occupies 12 bytes on 32-bit + architectures and 24 bytes on 64-bit architectures. + +config BFIN_JTAG_COMM + tristate "Blackfin JTAG Communication" + depends on BLACKFIN + help + Add support for emulating a TTY device over the Blackfin JTAG. + + To compile this driver as a module, choose M here: the + module will be called bfin_jtag_comm. + +config BFIN_JTAG_COMM_CONSOLE + bool "Console on Blackfin JTAG" + depends on BFIN_JTAG_COMM=y + +config SERIAL_NONSTANDARD + bool "Non-standard serial port support" + depends on HAS_IOMEM + ---help--- + Say Y here if you have any non-standard serial boards -- boards + which aren't supported using the standard "dumb" serial driver. + This includes intelligent serial boards such as Cyclades, + Digiboards, etc. These are usually used for systems that need many + serial ports because they serve many terminals or dial-in + connections. + + Note that the answer to this question won't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about non-standard serial boards. + + Most people can say N here. + +config ROCKETPORT + tristate "Comtrol RocketPort support" + depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI) + help + This driver supports Comtrol RocketPort and RocketModem PCI boards. + These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or + modems. For information about the RocketPort/RocketModem boards + and this driver read <file:Documentation/serial/rocket.txt>. + + To compile this driver as a module, choose M here: the + module will be called rocket. + + If you want to compile this driver into the kernel, say Y here. If + you don't have a Comtrol RocketPort/RocketModem card installed, say N. + +config CYCLADES + tristate "Cyclades async mux support" + depends on SERIAL_NONSTANDARD && (PCI || ISA) + select FW_LOADER + ---help--- + This driver supports Cyclades Z and Y multiserial boards. + You would need something like this to connect more than two modems to + your Linux box, for instance in order to become a dial-in server. + + For information about the Cyclades-Z card, read + <file:Documentation/serial/README.cycladesZ>. + + To compile this driver as a module, choose M here: the + module will be called cyclades. + + If you haven't heard about it, it's safe to say N. + +config CYZ_INTR + bool "Cyclades-Z interrupt mode operation (EXPERIMENTAL)" + depends on EXPERIMENTAL && CYCLADES + help + The Cyclades-Z family of multiport cards allows 2 (two) driver op + modes: polling and interrupt. In polling mode, the driver will check + the status of the Cyclades-Z ports every certain amount of time + (which is called polling cycle and is configurable). In interrupt + mode, it will use an interrupt line (IRQ) in order to check the + status of the Cyclades-Z ports. The default op mode is polling. If + unsure, say N. + +config MOXA_INTELLIO + tristate "Moxa Intellio support" + depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI) + select FW_LOADER + help + Say Y here if you have a Moxa Intellio multiport serial card. + + To compile this driver as a module, choose M here: the + module will be called moxa. + +config MOXA_SMARTIO + tristate "Moxa SmartIO support v. 2.0" + depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA) + help + Say Y here if you have a Moxa SmartIO multiport serial card and/or + want to help develop a new version of this driver. + + This is upgraded (1.9.1) driver from original Moxa drivers with + changes finally resulting in PCI probing. + + This driver can also be built as a module. The module will be called + mxser. If you want to do that, say M here. + +config SYNCLINK + tristate "Microgate SyncLink card support" + depends on SERIAL_NONSTANDARD && PCI && ISA_DMA_API + help + Provides support for the SyncLink ISA and PCI multiprotocol serial + adapters. These adapters support asynchronous and HDLC bit + synchronous communication up to 10Mbps (PCI adapter). + + This driver can only be built as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called synclink. If you want to do that, say M + here. + +config SYNCLINKMP + tristate "SyncLink Multiport support" + depends on SERIAL_NONSTANDARD && PCI + help + Enable support for the SyncLink Multiport (2 or 4 ports) + serial adapter, running asynchronous and HDLC communications up + to 2.048Mbps. Each ports is independently selectable for + RS-232, V.35, RS-449, RS-530, and X.21 + + This driver may be built as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called synclinkmp. If you want to do that, say M + here. + +config SYNCLINK_GT + tristate "SyncLink GT/AC support" + depends on SERIAL_NONSTANDARD && PCI + help + Support for SyncLink GT and SyncLink AC families of + synchronous and asynchronous serial adapters + manufactured by Microgate Systems, Ltd. (www.microgate.com) + +config NOZOMI + tristate "HSDPA Broadband Wireless Data Card - Globe Trotter" + depends on PCI && EXPERIMENTAL + help + If you have a HSDPA driver Broadband Wireless Data Card - + Globe Trotter PCMCIA card, say Y here. + + To compile this driver as a module, choose M here, the module + will be called nozomi. + +config ISI + tristate "Multi-Tech multiport card support (EXPERIMENTAL)" + depends on SERIAL_NONSTANDARD && PCI + select FW_LOADER + help + This is a driver for the Multi-Tech cards which provide several + serial ports. The driver is experimental and can currently only be + built as a module. The module will be called isicom. + If you want to do that, choose M here. + +config N_HDLC + tristate "HDLC line discipline support" + depends on SERIAL_NONSTANDARD + help + Allows synchronous HDLC communications with tty device drivers that + support synchronous HDLC such as the Microgate SyncLink adapter. + + This driver can be built as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called n_hdlc. If you want to do that, say M + here. + +config N_GSM + tristate "GSM MUX line discipline support (EXPERIMENTAL)" + depends on EXPERIMENTAL + depends on NET + help + This line discipline provides support for the GSM MUX protocol and + presents the mux as a set of 61 individual tty devices. + diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile index 396277216e4..690522fcb33 100644 --- a/drivers/tty/Makefile +++ b/drivers/tty/Makefile @@ -11,3 +11,18 @@ obj-$(CONFIG_R3964) += n_r3964.o obj-y += vt/ obj-$(CONFIG_HVC_DRIVER) += hvc/ obj-y += serial/ + +# tty drivers +obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o +obj-$(CONFIG_BFIN_JTAG_COMM) += bfin_jtag_comm.o +obj-$(CONFIG_CYCLADES) += cyclades.o +obj-$(CONFIG_ISI) += isicom.o +obj-$(CONFIG_MOXA_INTELLIO) += moxa.o +obj-$(CONFIG_MOXA_SMARTIO) += mxser.o +obj-$(CONFIG_NOZOMI) += nozomi.o +obj-$(CONFIG_ROCKETPORT) += rocket.o +obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o +obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o +obj-$(CONFIG_SYNCLINK) += synclink.o + +obj-y += ipwireless/ diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c new file mode 100644 index 00000000000..f214e502247 --- /dev/null +++ b/drivers/tty/amiserial.c @@ -0,0 +1,2178 @@ +/* + * linux/drivers/char/amiserial.c + * + * Serial driver for the amiga builtin port. + * + * This code was created by taking serial.c version 4.30 from kernel + * release 2.3.22, replacing all hardware related stuff with the + * corresponding amiga hardware actions, and removing all irrelevant + * code. As a consequence, it uses many of the constants and names + * associated with the registers and bits of 16550 compatible UARTS - + * but only to keep track of status, etc in the state variables. It + * was done this was to make it easier to keep the code in line with + * (non hardware specific) changes to serial.c. + * + * The port is registered with the tty driver as minor device 64, and + * therefore other ports should should only use 65 upwards. + * + * Richard Lucock 28/12/99 + * + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, + * 1998, 1999 Theodore Ts'o + * + */ + +/* + * Serial driver configuration section. Here are the various options: + * + * SERIAL_PARANOIA_CHECK + * Check the magic number for the async_structure where + * ever possible. + */ + +#include <linux/delay.h> + +#undef SERIAL_PARANOIA_CHECK +#define SERIAL_DO_RESTART + +/* Set of debugging defines */ + +#undef SERIAL_DEBUG_INTR +#undef SERIAL_DEBUG_OPEN +#undef SERIAL_DEBUG_FLOW +#undef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT + +/* Sanity checks */ + +#if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) +#define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ + tty->name, (info->flags), serial_driver->refcount,info->count,tty->count,s) +#else +#define DBG_CNT(s) +#endif + +/* + * End of serial driver configuration section. + */ + +#include <linux/module.h> + +#include <linux/types.h> +#include <linux/serial.h> +#include <linux/serialP.h> +#include <linux/serial_reg.h> +static char *serial_version = "4.30"; + +#include <linux/errno.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/timer.h> +#include <linux/interrupt.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/console.h> +#include <linux/major.h> +#include <linux/string.h> +#include <linux/fcntl.h> +#include <linux/ptrace.h> +#include <linux/ioport.h> +#include <linux/mm.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/bitops.h> +#include <linux/platform_device.h> + +#include <asm/setup.h> + +#include <asm/system.h> + +#include <asm/irq.h> + +#include <asm/amigahw.h> +#include <asm/amigaints.h> + +#define custom amiga_custom +static char *serial_name = "Amiga-builtin serial driver"; + +static struct tty_driver *serial_driver; + +/* number of characters left in xmit buffer before we ask for more */ +#define WAKEUP_CHARS 256 + +static struct async_struct *IRQ_ports; + +static unsigned char current_ctl_bits; + +static void change_speed(struct async_struct *info, struct ktermios *old); +static void rs_wait_until_sent(struct tty_struct *tty, int timeout); + + +static struct serial_state rs_table[1]; + +#define NR_PORTS ARRAY_SIZE(rs_table) + +#include <asm/uaccess.h> + +#define serial_isroot() (capable(CAP_SYS_ADMIN)) + + +static inline int serial_paranoia_check(struct async_struct *info, + char *name, const char *routine) +{ +#ifdef SERIAL_PARANOIA_CHECK + static const char *badmagic = + "Warning: bad magic number for serial struct (%s) in %s\n"; + static const char *badinfo = + "Warning: null async_struct for (%s) in %s\n"; + + if (!info) { + printk(badinfo, name, routine); + return 1; + } + if (info->magic != SERIAL_MAGIC) { + printk(badmagic, name, routine); + return 1; + } +#endif + return 0; +} + +/* some serial hardware definitions */ +#define SDR_OVRUN (1<<15) +#define SDR_RBF (1<<14) +#define SDR_TBE (1<<13) +#define SDR_TSRE (1<<12) + +#define SERPER_PARENB (1<<15) + +#define AC_SETCLR (1<<15) +#define AC_UARTBRK (1<<11) + +#define SER_DTR (1<<7) +#define SER_RTS (1<<6) +#define SER_DCD (1<<5) +#define SER_CTS (1<<4) +#define SER_DSR (1<<3) + +static __inline__ void rtsdtr_ctrl(int bits) +{ + ciab.pra = ((bits & (SER_RTS | SER_DTR)) ^ (SER_RTS | SER_DTR)) | (ciab.pra & ~(SER_RTS | SER_DTR)); +} + +/* + * ------------------------------------------------------------ + * rs_stop() and rs_start() + * + * This routines are called before setting or resetting tty->stopped. + * They enable or disable transmitter interrupts, as necessary. + * ------------------------------------------------------------ + */ +static void rs_stop(struct tty_struct *tty) +{ + struct async_struct *info = tty->driver_data; + unsigned long flags; + + if (serial_paranoia_check(info, tty->name, "rs_stop")) + return; + + local_irq_save(flags); + if (info->IER & UART_IER_THRI) { + info->IER &= ~UART_IER_THRI; + /* disable Tx interrupt and remove any pending interrupts */ + custom.intena = IF_TBE; + mb(); + custom.intreq = IF_TBE; + mb(); + } + local_irq_restore(flags); +} + +static void rs_start(struct tty_struct *tty) +{ + struct async_struct *info = tty->driver_data; + unsigned long flags; + + if (serial_paranoia_check(info, tty->name, "rs_start")) + return; + + local_irq_save(flags); + if (info->xmit.head != info->xmit.tail + && info->xmit.buf + && !(info->IER & UART_IER_THRI)) { + info->IER |= UART_IER_THRI; + custom.intena = IF_SETCLR | IF_TBE; + mb(); + /* set a pending Tx Interrupt, transmitter should restart now */ + custom.intreq = IF_SETCLR | IF_TBE; + mb(); + } + local_irq_restore(flags); +} + +/* + * ---------------------------------------------------------------------- + * + * Here starts the interrupt handling routines. All of the following + * subroutines are declared as inline and are folded into + * rs_interrupt(). They were separated out for readability's sake. + * + * Note: rs_interrupt() is a "fast" interrupt, which means that it + * runs with interrupts turned off. People who may want to modify + * rs_interrupt() should try to keep the interrupt handler as fast as + * possible. After you are done making modifications, it is not a bad + * idea to do: + * + * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c + * + * and look at the resulting assemble code in serial.s. + * + * - Ted Ts'o (tytso@mit.edu), 7-Mar-93 + * ----------------------------------------------------------------------- + */ + +/* + * This routine is used by the interrupt handler to schedule + * processing in the software interrupt portion of the driver. + */ +static void rs_sched_event(struct async_struct *info, + int event) +{ + info->event |= 1 << event; + tasklet_schedule(&info->tlet); +} + +static void receive_chars(struct async_struct *info) +{ + int status; + int serdatr; + struct tty_struct *tty = info->tty; + unsigned char ch, flag; + struct async_icount *icount; + int oe = 0; + + icount = &info->state->icount; + + status = UART_LSR_DR; /* We obviously have a character! */ + serdatr = custom.serdatr; + mb(); + custom.intreq = IF_RBF; + mb(); + + if((serdatr & 0x1ff) == 0) + status |= UART_LSR_BI; + if(serdatr & SDR_OVRUN) + status |= UART_LSR_OE; + + ch = serdatr & 0xff; + icount->rx++; + +#ifdef SERIAL_DEBUG_INTR + printk("DR%02x:%02x...", ch, status); +#endif + flag = TTY_NORMAL; + + /* + * We don't handle parity or frame errors - but I have left + * the code in, since I'm not sure that the errors can't be + * detected. + */ + + if (status & (UART_LSR_BI | UART_LSR_PE | + UART_LSR_FE | UART_LSR_OE)) { + /* + * For statistics only + */ + if (status & UART_LSR_BI) { + status &= ~(UART_LSR_FE | UART_LSR_PE); + icount->brk++; + } else if (status & UART_LSR_PE) + icount->parity++; + else if (status & UART_LSR_FE) + icount->frame++; + if (status & UART_LSR_OE) + icount->overrun++; + + /* + * Now check to see if character should be + * ignored, and mask off conditions which + * should be ignored. + */ + if (status & info->ignore_status_mask) + goto out; + + status &= info->read_status_mask; + + if (status & (UART_LSR_BI)) { +#ifdef SERIAL_DEBUG_INTR + printk("handling break...."); +#endif + flag = TTY_BREAK; + if (info->flags & ASYNC_SAK) + do_SAK(tty); + } else if (status & UART_LSR_PE) + flag = TTY_PARITY; + else if (status & UART_LSR_FE) + flag = TTY_FRAME; + if (status & UART_LSR_OE) { + /* + * Overrun is special, since it's + * reported immediately, and doesn't + * affect the current character + */ + oe = 1; + } + } + tty_insert_flip_char(tty, ch, flag); + if (oe == 1) + tty_insert_flip_char(tty, 0, TTY_OVERRUN); + tty_flip_buffer_push(tty); +out: + return; +} + +static void transmit_chars(struct async_struct *info) +{ + custom.intreq = IF_TBE; + mb(); + if (info->x_char) { + custom.serdat = info->x_char | 0x100; + mb(); + info->state->icount.tx++; + info->x_char = 0; + return; + } + if (info->xmit.head == info->xmit.tail + || info->tty->stopped + || info->tty->hw_stopped) { + info->IER &= ~UART_IER_THRI; + custom.intena = IF_TBE; + mb(); + return; + } + + custom.serdat = info->xmit.buf[info->xmit.tail++] | 0x100; + mb(); + info->xmit.tail = info->xmit.tail & (SERIAL_XMIT_SIZE-1); + info->state->icount.tx++; + + if (CIRC_CNT(info->xmit.head, + info->xmit.tail, + SERIAL_XMIT_SIZE) < WAKEUP_CHARS) + rs_sched_event(info, RS_EVENT_WRITE_WAKEUP); + +#ifdef SERIAL_DEBUG_INTR + printk("THRE..."); +#endif + if (info->xmit.head == info->xmit.tail) { + custom.intena = IF_TBE; + mb(); + info->IER &= ~UART_IER_THRI; + } +} + +static void check_modem_status(struct async_struct *info) +{ + unsigned char status = ciab.pra & (SER_DCD | SER_CTS | SER_DSR); + unsigned char dstatus; + struct async_icount *icount; + + /* Determine bits that have changed */ + dstatus = status ^ current_ctl_bits; + current_ctl_bits = status; + + if (dstatus) { + icount = &info->state->icount; + /* update input line counters */ + if (dstatus & SER_DSR) + icount->dsr++; + if (dstatus & SER_DCD) { + icount->dcd++; +#ifdef CONFIG_HARD_PPS + if ((info->flags & ASYNC_HARDPPS_CD) && + !(status & SER_DCD)) + hardpps(); +#endif + } + if (dstatus & SER_CTS) + icount->cts++; + wake_up_interruptible(&info->delta_msr_wait); + } + + if ((info->flags & ASYNC_CHECK_CD) && (dstatus & SER_DCD)) { +#if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR)) + printk("ttyS%d CD now %s...", info->line, + (!(status & SER_DCD)) ? "on" : "off"); +#endif + if (!(status & SER_DCD)) + wake_up_interruptible(&info->open_wait); + else { +#ifdef SERIAL_DEBUG_OPEN + printk("doing serial hangup..."); +#endif + if (info->tty) + tty_hangup(info->tty); + } + } + if (info->flags & ASYNC_CTS_FLOW) { + if (info->tty->hw_stopped) { + if (!(status & SER_CTS)) { +#if (defined(SERIAL_DEBUG_INTR) || defined(SERIAL_DEBUG_FLOW)) + printk("CTS tx start..."); +#endif + info->tty->hw_stopped = 0; + info->IER |= UART_IER_THRI; + custom.intena = IF_SETCLR | IF_TBE; + mb(); + /* set a pending Tx Interrupt, transmitter should restart now */ + custom.intreq = IF_SETCLR | IF_TBE; + mb(); + rs_sched_event(info, RS_EVENT_WRITE_WAKEUP); + return; + } + } else { + if ((status & SER_CTS)) { +#if (defined(SERIAL_DEBUG_INTR) || defined(SERIAL_DEBUG_FLOW)) + printk("CTS tx stop..."); +#endif + info->tty->hw_stopped = 1; + info->IER &= ~UART_IER_THRI; + /* disable Tx interrupt and remove any pending interrupts */ + custom.intena = IF_TBE; + mb(); + custom.intreq = IF_TBE; + mb(); + } + } + } +} + +static irqreturn_t ser_vbl_int( int irq, void *data) +{ + /* vbl is just a periodic interrupt we tie into to update modem status */ + struct async_struct * info = IRQ_ports; + /* + * TBD - is it better to unregister from this interrupt or to + * ignore it if MSI is clear ? + */ + if(info->IER & UART_IER_MSI) + check_modem_status(info); + return IRQ_HANDLED; +} + +static irqreturn_t ser_rx_int(int irq, void *dev_id) +{ + struct async_struct * info; + +#ifdef SERIAL_DEBUG_INTR + printk("ser_rx_int..."); +#endif + + info = IRQ_ports; + if (!info || !info->tty) + return IRQ_NONE; + + receive_chars(info); + info->last_active = jiffies; +#ifdef SERIAL_DEBUG_INTR + printk("end.\n"); +#endif + return IRQ_HANDLED; +} + +static irqreturn_t ser_tx_int(int irq, void *dev_id) +{ + struct async_struct * info; + + if (custom.serdatr & SDR_TBE) { +#ifdef SERIAL_DEBUG_INTR + printk("ser_tx_int..."); +#endif + + info = IRQ_ports; + if (!info || !info->tty) + return IRQ_NONE; + + transmit_chars(info); + info->last_active = jiffies; +#ifdef SERIAL_DEBUG_INTR + printk("end.\n"); +#endif + } + return IRQ_HANDLED; +} + +/* + * ------------------------------------------------------------------- + * Here ends the serial interrupt routines. + * ------------------------------------------------------------------- + */ + +/* + * This routine is used to handle the "bottom half" processing for the + * serial driver, known also the "software interrupt" processing. + * This processing is done at the kernel interrupt level, after the + * rs_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON. This + * is where time-consuming activities which can not be done in the + * interrupt driver proper are done; the interrupt driver schedules + * them using rs_sched_event(), and they get done here. + */ + +static void do_softint(unsigned long private_) +{ + struct async_struct *info = (struct async_struct *) private_; + struct tty_struct *tty; + + tty = info->tty; + if (!tty) + return; + + if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &info->event)) + tty_wakeup(tty); +} + +/* + * --------------------------------------------------------------- + * Low level utility subroutines for the serial driver: routines to + * figure out the appropriate timeout for an interrupt chain, routines + * to initialize and startup a serial port, and routines to shutdown a + * serial port. Useful stuff like that. + * --------------------------------------------------------------- + */ + +static int startup(struct async_struct * info) +{ + unsigned long flags; + int retval=0; + unsigned long page; + + page = get_zeroed_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + local_irq_save(flags); + + if (info->flags & ASYNC_INITIALIZED) { + free_page(page); + goto errout; + } + + if (info->xmit.buf) + free_page(page); + else + info->xmit.buf = (unsigned char *) page; + +#ifdef SERIAL_DEBUG_OPEN + printk("starting up ttys%d ...", info->line); +#endif + + /* Clear anything in the input buffer */ + + custom.intreq = IF_RBF; + mb(); + + retval = request_irq(IRQ_AMIGA_VERTB, ser_vbl_int, 0, "serial status", info); + if (retval) { + if (serial_isroot()) { + if (info->tty) + set_bit(TTY_IO_ERROR, + &info->tty->flags); + retval = 0; + } + goto errout; + } + + /* enable both Rx and Tx interrupts */ + custom.intena = IF_SETCLR | IF_RBF | IF_TBE; + mb(); + info->IER = UART_IER_MSI; + + /* remember current state of the DCD and CTS bits */ + current_ctl_bits = ciab.pra & (SER_DCD | SER_CTS | SER_DSR); + + IRQ_ports = info; + + info->MCR = 0; + if (info->tty->termios->c_cflag & CBAUD) + info->MCR = SER_DTR | SER_RTS; + rtsdtr_ctrl(info->MCR); + + if (info->tty) + clear_bit(TTY_IO_ERROR, &info->tty->flags); + info->xmit.head = info->xmit.tail = 0; + + /* + * Set up the tty->alt_speed kludge + */ + if (info->tty) { + if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) + info->tty->alt_speed = 57600; + if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) + info->tty->alt_speed = 115200; + if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) + info->tty->alt_speed = 230400; + if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) + info->tty->alt_speed = 460800; + } + + /* + * and set the speed of the serial port + */ + change_speed(info, NULL); + + info->flags |= ASYNC_INITIALIZED; + local_irq_restore(flags); + return 0; + +errout: + local_irq_restore(flags); + return retval; +} + +/* + * This routine will shutdown a serial port; interrupts are disabled, and + * DTR is dropped if the hangup on close termio flag is on. + */ +static void shutdown(struct async_struct * info) +{ + unsigned long flags; + struct serial_state *state; + + if (!(info->flags & ASYNC_INITIALIZED)) + return; + + state = info->state; + +#ifdef SERIAL_DEBUG_OPEN + printk("Shutting down serial port %d ....\n", info->line); +#endif + + local_irq_save(flags); /* Disable interrupts */ + + /* + * clear delta_msr_wait queue to avoid mem leaks: we may free the irq + * here so the queue might never be waken up + */ + wake_up_interruptible(&info->delta_msr_wait); + + IRQ_ports = NULL; + + /* + * Free the IRQ, if necessary + */ + free_irq(IRQ_AMIGA_VERTB, info); + + if (info->xmit.buf) { + free_page((unsigned long) info->xmit.buf); + info->xmit.buf = NULL; + } + + info->IER = 0; + custom.intena = IF_RBF | IF_TBE; + mb(); + + /* disable break condition */ + custom.adkcon = AC_UARTBRK; + mb(); + + if (!info->tty || (info->tty->termios->c_cflag & HUPCL)) + info->MCR &= ~(SER_DTR|SER_RTS); + rtsdtr_ctrl(info->MCR); + + if (info->tty) + set_bit(TTY_IO_ERROR, &info->tty->flags); + + info->flags &= ~ASYNC_INITIALIZED; + local_irq_restore(flags); +} + + +/* + * This routine is called to set the UART divisor registers to match + * the specified baud rate for a serial port. + */ +static void change_speed(struct async_struct *info, + struct ktermios *old_termios) +{ + int quot = 0, baud_base, baud; + unsigned cflag, cval = 0; + int bits; + unsigned long flags; + + if (!info->tty || !info->tty->termios) + return; + cflag = info->tty->termios->c_cflag; + + /* Byte size is always 8 bits plus parity bit if requested */ + + cval = 3; bits = 10; + if (cflag & CSTOPB) { + cval |= 0x04; + bits++; + } + if (cflag & PARENB) { + cval |= UART_LCR_PARITY; + bits++; + } + if (!(cflag & PARODD)) + cval |= UART_LCR_EPAR; +#ifdef CMSPAR + if (cflag & CMSPAR) + cval |= UART_LCR_SPAR; +#endif + + /* Determine divisor based on baud rate */ + baud = tty_get_baud_rate(info->tty); + if (!baud) + baud = 9600; /* B0 transition handled in rs_set_termios */ + baud_base = info->state->baud_base; + if (baud == 38400 && + ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)) + quot = info->state->custom_divisor; + else { + if (baud == 134) + /* Special case since 134 is really 134.5 */ + quot = (2*baud_base / 269); + else if (baud) + quot = baud_base / baud; + } + /* If the quotient is zero refuse the change */ + if (!quot && old_termios) { + /* FIXME: Will need updating for new tty in the end */ + info->tty->termios->c_cflag &= ~CBAUD; + info->tty->termios->c_cflag |= (old_termios->c_cflag & CBAUD); + baud = tty_get_baud_rate(info->tty); + if (!baud) + baud = 9600; + if (baud == 38400 && + ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)) + quot = info->state->custom_divisor; + else { + if (baud == 134) + /* Special case since 134 is really 134.5 */ + quot = (2*baud_base / 269); + else if (baud) + quot = baud_base / baud; + } + } + /* As a last resort, if the quotient is zero, default to 9600 bps */ + if (!quot) + quot = baud_base / 9600; + info->quot = quot; + info->timeout = ((info->xmit_fifo_size*HZ*bits*quot) / baud_base); + info->timeout += HZ/50; /* Add .02 seconds of slop */ + + /* CTS flow control flag and modem status interrupts */ + info->IER &= ~UART_IER_MSI; + if (info->flags & ASYNC_HARDPPS_CD) + info->IER |= UART_IER_MSI; + if (cflag & CRTSCTS) { + info->flags |= ASYNC_CTS_FLOW; + info->IER |= UART_IER_MSI; + } else + info->flags &= ~ASYNC_CTS_FLOW; + if (cflag & CLOCAL) + info->flags &= ~ASYNC_CHECK_CD; + else { + info->flags |= ASYNC_CHECK_CD; + info->IER |= UART_IER_MSI; + } + /* TBD: + * Does clearing IER_MSI imply that we should disable the VBL interrupt ? + */ + + /* + * Set up parity check flag + */ + + info->read_status_mask = UART_LSR_OE | UART_LSR_DR; + if (I_INPCK(info->tty)) + info->read_status_mask |= UART_LSR_FE | UART_LSR_PE; + if (I_BRKINT(info->tty) || I_PARMRK(info->tty)) + info->read_status_mask |= UART_LSR_BI; + + /* + * Characters to ignore + */ + info->ignore_status_mask = 0; + if (I_IGNPAR(info->tty)) + info->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; + if (I_IGNBRK(info->tty)) { + info->ignore_status_mask |= UART_LSR_BI; + /* + * If we're ignore parity and break indicators, ignore + * overruns too. (For real raw support). + */ + if (I_IGNPAR(info->tty)) + info->ignore_status_mask |= UART_LSR_OE; + } + /* + * !!! ignore all characters if CREAD is not set + */ + if ((cflag & CREAD) == 0) + info->ignore_status_mask |= UART_LSR_DR; + local_irq_save(flags); + + { + short serper; + + /* Set up the baud rate */ + serper = quot - 1; + + /* Enable or disable parity bit */ + + if(cval & UART_LCR_PARITY) + serper |= (SERPER_PARENB); + + custom.serper = serper; + mb(); + } + + info->LCR = cval; /* Save LCR */ + local_irq_restore(flags); +} + +static int rs_put_char(struct tty_struct *tty, unsigned char ch) +{ + struct async_struct *info; + unsigned long flags; + + info = tty->driver_data; + + if (serial_paranoia_check(info, tty->name, "rs_put_char")) + return 0; + + if (!info->xmit.buf) + return 0; + + local_irq_save(flags); + if (CIRC_SPACE(info->xmit.head, + info->xmit.tail, + SERIAL_XMIT_SIZE) == 0) { + local_irq_restore(flags); + return 0; + } + + info->xmit.buf[info->xmit.head++] = ch; + info->xmit.head &= SERIAL_XMIT_SIZE-1; + local_irq_restore(flags); + return 1; +} + +static void rs_flush_chars(struct tty_struct *tty) +{ + struct async_struct *info = tty->driver_data; + unsigned long flags; + + if (serial_paranoia_check(info, tty->name, "rs_flush_chars")) + return; + + if (info->xmit.head == info->xmit.tail + || tty->stopped + || tty->hw_stopped + || !info->xmit.buf) + return; + + local_irq_save(flags); + info->IER |= UART_IER_THRI; + custom.intena = IF_SETCLR | IF_TBE; + mb(); + /* set a pending Tx Interrupt, transmitter should restart now */ + custom.intreq = IF_SETCLR | IF_TBE; + mb(); + local_irq_restore(flags); +} + +static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count) +{ + int c, ret = 0; + struct async_struct *info; + unsigned long flags; + + info = tty->driver_data; + + if (serial_paranoia_check(info, tty->name, "rs_write")) + return 0; + + if (!info->xmit.buf) + return 0; + + local_irq_save(flags); + while (1) { + c = CIRC_SPACE_TO_END(info->xmit.head, + info->xmit.tail, + SERIAL_XMIT_SIZE); + if (count < c) + c = count; + if (c <= 0) { + break; + } + memcpy(info->xmit.buf + info->xmit.head, buf, c); + info->xmit.head = ((info->xmit.head + c) & + (SERIAL_XMIT_SIZE-1)); + buf += c; + count -= c; + ret += c; + } + local_irq_restore(flags); + + if (info->xmit.head != info->xmit.tail + && !tty->stopped + && !tty->hw_stopped + && !(info->IER & UART_IER_THRI)) { + info->IER |= UART_IER_THRI; + local_irq_disable(); + custom.intena = IF_SETCLR | IF_TBE; + mb(); + /* set a pending Tx Interrupt, transmitter should restart now */ + custom.intreq = IF_SETCLR | IF_TBE; + mb(); + local_irq_restore(flags); + } + return ret; +} + +static int rs_write_room(struct tty_struct *tty) +{ + struct async_struct *info = tty->driver_data; + + if (serial_paranoia_check(info, tty->name, "rs_write_room")) + return 0; + return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); +} + +static int rs_chars_in_buffer(struct tty_struct *tty) +{ + struct async_struct *info = tty->driver_data; + + if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer")) + return 0; + return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); +} + +static void rs_flush_buffer(struct tty_struct *tty) +{ + struct async_struct *info = tty->driver_data; + unsigned long flags; + + if (serial_paranoia_check(info, tty->name, "rs_flush_buffer")) + return; + local_irq_save(flags); + info->xmit.head = info->xmit.tail = 0; + local_irq_restore(flags); + tty_wakeup(tty); +} + +/* + * This function is used to send a high-priority XON/XOFF character to + * the device + */ +static void rs_send_xchar(struct tty_struct *tty, char ch) +{ + struct async_struct *info = tty->driver_data; + unsigned long flags; + + if (serial_paranoia_check(info, tty->name, "rs_send_char")) + return; + + info->x_char = ch; + if (ch) { + /* Make sure transmit interrupts are on */ + + /* Check this ! */ + local_irq_save(flags); + if(!(custom.intenar & IF_TBE)) { + custom.intena = IF_SETCLR | IF_TBE; + mb(); + /* set a pending Tx Interrupt, transmitter should restart now */ + custom.intreq = IF_SETCLR | IF_TBE; + mb(); + } + local_irq_restore(flags); + + info->IER |= UART_IER_THRI; + } +} + +/* + * ------------------------------------------------------------ + * rs_throttle() + * + * This routine is called by the upper-layer tty layer to signal that + * incoming characters should be throttled. + * ------------------------------------------------------------ + */ +static void rs_throttle(struct tty_struct * tty) +{ + struct async_struct *info = tty->driver_data; + unsigned long flags; +#ifdef SERIAL_DEBUG_THROTTLE + char buf[64]; + + printk("throttle %s: %d....\n", tty_name(tty, buf), + tty->ldisc.chars_in_buffer(tty)); +#endif + + if (serial_paranoia_check(info, tty->name, "rs_throttle")) + return; + + if (I_IXOFF(tty)) + rs_send_xchar(tty, STOP_CHAR(tty)); + + if (tty->termios->c_cflag & CRTSCTS) + info->MCR &= ~SER_RTS; + + local_irq_save(flags); + rtsdtr_ctrl(info->MCR); + local_irq_restore(flags); +} + +static void rs_unthrottle(struct tty_struct * tty) +{ + struct async_struct *info = tty->driver_data; + unsigned long flags; +#ifdef SERIAL_DEBUG_THROTTLE + char buf[64]; + + printk("unthrottle %s: %d....\n", tty_name(tty, buf), + tty->ldisc.chars_in_buffer(tty)); +#endif + + if (serial_paranoia_check(info, tty->name, "rs_unthrottle")) + return; + + if (I_IXOFF(tty)) { + if (info->x_char) + info->x_char = 0; + else + rs_send_xchar(tty, START_CHAR(tty)); + } + if (tty->termios->c_cflag & CRTSCTS) + info->MCR |= SER_RTS; + local_irq_save(flags); + rtsdtr_ctrl(info->MCR); + local_irq_restore(flags); +} + +/* + * ------------------------------------------------------------ + * rs_ioctl() and friends + * ------------------------------------------------------------ + */ + +static int get_serial_info(struct async_struct * info, + struct serial_struct __user * retinfo) +{ + struct serial_struct tmp; + struct serial_state *state = info->state; + + if (!retinfo) + return -EFAULT; + memset(&tmp, 0, sizeof(tmp)); + tty_lock(); + tmp.type = state->type; + tmp.line = state->line; + tmp.port = state->port; + tmp.irq = state->irq; + tmp.flags = state->flags; + tmp.xmit_fifo_size = state->xmit_fifo_size; + tmp.baud_base = state->baud_base; + tmp.close_delay = state->close_delay; + tmp.closing_wait = state->closing_wait; + tmp.custom_divisor = state->custom_divisor; + tty_unlock(); + if (copy_to_user(retinfo,&tmp,sizeof(*retinfo))) + return -EFAULT; + return 0; +} + +static int set_serial_info(struct async_struct * info, + struct serial_struct __user * new_info) +{ + struct serial_struct new_serial; + struct serial_state old_state, *state; + unsigned int change_irq,change_port; + int retval = 0; + + if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) + return -EFAULT; + + tty_lock(); + state = info->state; + old_state = *state; + + change_irq = new_serial.irq != state->irq; + change_port = (new_serial.port != state->port); + if(change_irq || change_port || (new_serial.xmit_fifo_size != state->xmit_fifo_size)) { + tty_unlock(); + return -EINVAL; + } + + if (!serial_isroot()) { + if ((new_serial.baud_base != state->baud_base) || + (new_serial.close_delay != state->close_delay) || + (new_serial.xmit_fifo_size != state->xmit_fifo_size) || + ((new_serial.flags & ~ASYNC_USR_MASK) != + (state->flags & ~ASYNC_USR_MASK))) + return -EPERM; + state->flags = ((state->flags & ~ASYNC_USR_MASK) | + (new_serial.flags & ASYNC_USR_MASK)); + info->flags = ((info->flags & ~ASYNC_USR_MASK) | + (new_serial.flags & ASYNC_USR_MASK)); + state->custom_divisor = new_serial.custom_divisor; + goto check_and_exit; + } + + if (new_serial.baud_base < 9600) { + tty_unlock(); + return -EINVAL; + } + + /* + * OK, past this point, all the error checking has been done. + * At this point, we start making changes..... + */ + + state->baud_base = new_serial.baud_base; + state->flags = ((state->flags & ~ASYNC_FLAGS) | + (new_serial.flags & ASYNC_FLAGS)); + info->flags = ((state->flags & ~ASYNC_INTERNAL_FLAGS) | + (info->flags & ASYNC_INTERNAL_FLAGS)); + state->custom_divisor = new_serial.custom_divisor; + state->close_delay = new_serial.close_delay * HZ/100; + state->closing_wait = new_serial.closing_wait * HZ/100; + info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; + +check_and_exit: + if (info->flags & ASYNC_INITIALIZED) { + if (((old_state.flags & ASYNC_SPD_MASK) != + (state->flags & ASYNC_SPD_MASK)) || + (old_state.custom_divisor != state->custom_divisor)) { + if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) + info->tty->alt_speed = 57600; + if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) + info->tty->alt_speed = 115200; + if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) + info->tty->alt_speed = 230400; + if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) + info->tty->alt_speed = 460800; + change_speed(info, NULL); + } + } else + retval = startup(info); + tty_unlock(); + return retval; +} + + +/* + * get_lsr_info - get line status register info + * + * Purpose: Let user call ioctl() to get info when the UART physically + * is emptied. On bus types like RS485, the transmitter must + * release the bus after transmitting. This must be done when + * the transmit shift register is empty, not be done when the + * transmit holding register is empty. This functionality + * allows an RS485 driver to be written in user space. + */ +static int get_lsr_info(struct async_struct * info, unsigned int __user *value) +{ + unsigned char status; + unsigned int result; + unsigned long flags; + + local_irq_save(flags); + status = custom.serdatr; + mb(); + local_irq_restore(flags); + result = ((status & SDR_TSRE) ? TIOCSER_TEMT : 0); + if (copy_to_user(value, &result, sizeof(int))) + return -EFAULT; + return 0; +} + + +static int rs_tiocmget(struct tty_struct *tty) +{ + struct async_struct * info = tty->driver_data; + unsigned char control, status; + unsigned long flags; + + if (serial_paranoia_check(info, tty->name, "rs_ioctl")) + return -ENODEV; + if (tty->flags & (1 << TTY_IO_ERROR)) + return -EIO; + + control = info->MCR; + local_irq_save(flags); + status = ciab.pra; + local_irq_restore(flags); + return ((control & SER_RTS) ? TIOCM_RTS : 0) + | ((control & SER_DTR) ? TIOCM_DTR : 0) + | (!(status & SER_DCD) ? TIOCM_CAR : 0) + | (!(status & SER_DSR) ? TIOCM_DSR : 0) + | (!(status & SER_CTS) ? TIOCM_CTS : 0); +} + +static int rs_tiocmset(struct tty_struct *tty, unsigned int set, + unsigned int clear) +{ + struct async_struct * info = tty->driver_data; + unsigned long flags; + + if (serial_paranoia_check(info, tty->name, "rs_ioctl")) + return -ENODEV; + if (tty->flags & (1 << TTY_IO_ERROR)) + return -EIO; + + local_irq_save(flags); + if (set & TIOCM_RTS) + info->MCR |= SER_RTS; + if (set & TIOCM_DTR) + info->MCR |= SER_DTR; + if (clear & TIOCM_RTS) + info->MCR &= ~SER_RTS; + if (clear & TIOCM_DTR) + info->MCR &= ~SER_DTR; + rtsdtr_ctrl(info->MCR); + local_irq_restore(flags); + return 0; +} + +/* + * rs_break() --- routine which turns the break handling on or off + */ +static int rs_break(struct tty_struct *tty, int break_state) +{ + struct async_struct * info = tty->driver_data; + unsigned long flags; + + if (serial_paranoia_check(info, tty->name, "rs_break")) + return -EINVAL; + + local_irq_save(flags); + if (break_state == -1) + custom.adkcon = AC_SETCLR | AC_UARTBRK; + else + custom.adkcon = AC_UARTBRK; + mb(); + local_irq_restore(flags); + return 0; +} + +/* + * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) + * Return: write counters to the user passed counter struct + * NB: both 1->0 and 0->1 transitions are counted except for + * RI where only 0->1 is counted. + */ +static int rs_get_icount(struct tty_struct *tty, + struct serial_icounter_struct *icount) +{ + struct async_struct *info = tty->driver_data; + struct async_icount cnow; + unsigned long flags; + + local_irq_save(flags); + cnow = info->state->icount; + local_irq_restore(flags); + icount->cts = cnow.cts; + icount->dsr = cnow.dsr; + icount->rng = cnow.rng; + icount->dcd = cnow.dcd; + icount->rx = cnow.rx; + icount->tx = cnow.tx; + icount->frame = cnow.frame; + icount->overrun = cnow.overrun; + icount->parity = cnow.parity; + icount->brk = cnow.brk; + icount->buf_overrun = cnow.buf_overrun; + + return 0; +} + +static int rs_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + struct async_struct * info = tty->driver_data; + struct async_icount cprev, cnow; /* kernel counter temps */ + void __user *argp = (void __user *)arg; + unsigned long flags; + + if (serial_paranoia_check(info, tty->name, "rs_ioctl")) + return -ENODEV; + + if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && + (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && + (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { + if (tty->flags & (1 << TTY_IO_ERROR)) + return -EIO; + } + + switch (cmd) { + case TIOCGSERIAL: + return get_serial_info(info, argp); + case TIOCSSERIAL: + return set_serial_info(info, argp); + case TIOCSERCONFIG: + return 0; + + case TIOCSERGETLSR: /* Get line status register */ + return get_lsr_info(info, argp); + + case TIOCSERGSTRUCT: + if (copy_to_user(argp, + info, sizeof(struct async_struct))) + return -EFAULT; + return 0; + + /* + * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change + * - mask passed in arg for lines of interest + * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) + * Caller should use TIOCGICOUNT to see which one it was + */ + case TIOCMIWAIT: + local_irq_save(flags); + /* note the counters on entry */ + cprev = info->state->icount; + local_irq_restore(flags); + while (1) { + interruptible_sleep_on(&info->delta_msr_wait); + /* see if a signal did it */ + if (signal_pending(current)) + return -ERESTARTSYS; + local_irq_save(flags); + cnow = info->state->icount; /* atomic copy */ + local_irq_restore(flags); + if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && + cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) + return -EIO; /* no change => error */ + if ( ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || + ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || + ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || + ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts)) ) { + return 0; + } + cprev = cnow; + } + /* NOTREACHED */ + + case TIOCSERGWILD: + case TIOCSERSWILD: + /* "setserial -W" is called in Debian boot */ + printk ("TIOCSER?WILD ioctl obsolete, ignored.\n"); + return 0; + + default: + return -ENOIOCTLCMD; + } + return 0; +} + +static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) +{ + struct async_struct *info = tty->driver_data; + unsigned long flags; + unsigned int cflag = tty->termios->c_cflag; + + change_speed(info, old_termios); + + /* Handle transition to B0 status */ + if ((old_termios->c_cflag & CBAUD) && + !(cflag & CBAUD)) { + info->MCR &= ~(SER_DTR|SER_RTS); + local_irq_save(flags); + rtsdtr_ctrl(info->MCR); + local_irq_restore(flags); + } + + /* Handle transition away from B0 status */ + if (!(old_termios->c_cflag & CBAUD) && + (cflag & CBAUD)) { + info->MCR |= SER_DTR; + if (!(tty->termios->c_cflag & CRTSCTS) || + !test_bit(TTY_THROTTLED, &tty->flags)) { + info->MCR |= SER_RTS; + } + local_irq_save(flags); + rtsdtr_ctrl(info->MCR); + local_irq_restore(flags); + } + + /* Handle turning off CRTSCTS */ + if ((old_termios->c_cflag & CRTSCTS) && + !(tty->termios->c_cflag & CRTSCTS)) { + tty->hw_stopped = 0; + rs_start(tty); + } + +#if 0 + /* + * No need to wake up processes in open wait, since they + * sample the CLOCAL flag once, and don't recheck it. + * XXX It's not clear whether the current behavior is correct + * or not. Hence, this may change..... + */ + if (!(old_termios->c_cflag & CLOCAL) && + (tty->termios->c_cflag & CLOCAL)) + wake_up_interruptible(&info->open_wait); +#endif +} + +/* + * ------------------------------------------------------------ + * rs_close() + * + * This routine is called when the serial port gets closed. First, we + * wait for the last remaining data to be sent. Then, we unlink its + * async structure from the interrupt chain if necessary, and we free + * that IRQ if nothing is left in the chain. + * ------------------------------------------------------------ + */ +static void rs_close(struct tty_struct *tty, struct file * filp) +{ + struct async_struct * info = tty->driver_data; + struct serial_state *state; + unsigned long flags; + + if (!info || serial_paranoia_check(info, tty->name, "rs_close")) + return; + + state = info->state; + + local_irq_save(flags); + + if (tty_hung_up_p(filp)) { + DBG_CNT("before DEC-hung"); + local_irq_restore(flags); + return; + } + +#ifdef SERIAL_DEBUG_OPEN + printk("rs_close ttys%d, count = %d\n", info->line, state->count); +#endif + if ((tty->count == 1) && (state->count != 1)) { + /* + * Uh, oh. tty->count is 1, which means that the tty + * structure will be freed. state->count should always + * be one in these conditions. If it's greater than + * one, we've got real problems, since it means the + * serial port won't be shutdown. + */ + printk("rs_close: bad serial port count; tty->count is 1, " + "state->count is %d\n", state->count); + state->count = 1; + } + if (--state->count < 0) { + printk("rs_close: bad serial port count for ttys%d: %d\n", + info->line, state->count); + state->count = 0; + } + if (state->count) { + DBG_CNT("before DEC-2"); + local_irq_restore(flags); + return; + } + info->flags |= ASYNC_CLOSING; + /* + * Now we wait for the transmit buffer to clear; and we notify + * the line discipline to only process XON/XOFF characters. + */ + tty->closing = 1; + if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) + tty_wait_until_sent(tty, info->closing_wait); + /* + * At this point we stop accepting input. To do this, we + * disable the receive line status interrupts, and tell the + * interrupt driver to stop checking the data ready bit in the + * line status register. + */ + info->read_status_mask &= ~UART_LSR_DR; + if (info->flags & ASYNC_INITIALIZED) { + /* disable receive interrupts */ + custom.intena = IF_RBF; + mb(); + /* clear any pending receive interrupt */ + custom.intreq = IF_RBF; + mb(); + + /* + * Before we drop DTR, make sure the UART transmitter + * has completely drained; this is especially + * important if there is a transmit FIFO! + */ + rs_wait_until_sent(tty, info->timeout); + } + shutdown(info); + rs_flush_buffer(tty); + + tty_ldisc_flush(tty); + tty->closing = 0; + info->event = 0; + info->tty = NULL; + if (info->blocked_open) { + if (info->close_delay) { + msleep_interruptible(jiffies_to_msecs(info->close_delay)); + } + wake_up_interruptible(&info->open_wait); + } + info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); + wake_up_interruptible(&info->close_wait); + local_irq_restore(flags); +} + +/* + * rs_wait_until_sent() --- wait until the transmitter is empty + */ +static void rs_wait_until_sent(struct tty_struct *tty, int timeout) +{ + struct async_struct * info = tty->driver_data; + unsigned long orig_jiffies, char_time; + int tty_was_locked = tty_locked(); + int lsr; + + if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent")) + return; + + if (info->xmit_fifo_size == 0) + return; /* Just in case.... */ + + orig_jiffies = jiffies; + + /* + * tty_wait_until_sent is called from lots of places, + * with or without the BTM. + */ + if (!tty_was_locked) + tty_lock(); + /* + * Set the check interval to be 1/5 of the estimated time to + * send a single character, and make it at least 1. The check + * interval should also be less than the timeout. + * + * Note: we have to use pretty tight timings here to satisfy + * the NIST-PCTS. + */ + char_time = (info->timeout - HZ/50) / info->xmit_fifo_size; + char_time = char_time / 5; + if (char_time == 0) + char_time = 1; + if (timeout) + char_time = min_t(unsigned long, char_time, timeout); + /* + * If the transmitter hasn't cleared in twice the approximate + * amount of time to send the entire FIFO, it probably won't + * ever clear. This assumes the UART isn't doing flow + * control, which is currently the case. Hence, if it ever + * takes longer than info->timeout, this is probably due to a + * UART bug of some kind. So, we clamp the timeout parameter at + * 2*info->timeout. + */ + if (!timeout || timeout > 2*info->timeout) + timeout = 2*info->timeout; +#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT + printk("In rs_wait_until_sent(%d) check=%lu...", timeout, char_time); + printk("jiff=%lu...", jiffies); +#endif + while(!((lsr = custom.serdatr) & SDR_TSRE)) { +#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT + printk("serdatr = %d (jiff=%lu)...", lsr, jiffies); +#endif + msleep_interruptible(jiffies_to_msecs(char_time)); + if (signal_pending(current)) + break; + if (timeout && time_after(jiffies, orig_jiffies + timeout)) + break; + } + __set_current_state(TASK_RUNNING); + if (!tty_was_locked) + tty_unlock(); +#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT + printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies); +#endif +} + +/* + * rs_hangup() --- called by tty_hangup() when a hangup is signaled. + */ +static void rs_hangup(struct tty_struct *tty) +{ + struct async_struct * info = tty->driver_data; + struct serial_state *state = info->state; + + if (serial_paranoia_check(info, tty->name, "rs_hangup")) + return; + + state = info->state; + + rs_flush_buffer(tty); + shutdown(info); + info->event = 0; + state->count = 0; + info->flags &= ~ASYNC_NORMAL_ACTIVE; + info->tty = NULL; + wake_up_interruptible(&info->open_wait); +} + +/* + * ------------------------------------------------------------ + * rs_open() and friends + * ------------------------------------------------------------ + */ +static int block_til_ready(struct tty_struct *tty, struct file * filp, + struct async_struct *info) +{ +#ifdef DECLARE_WAITQUEUE + DECLARE_WAITQUEUE(wait, current); +#else + struct wait_queue wait = { current, NULL }; +#endif + struct serial_state *state = info->state; + int retval; + int do_clocal = 0, extra_count = 0; + unsigned long flags; + + /* + * If the device is in the middle of being closed, then block + * until it's done, and then try again. + */ + if (tty_hung_up_p(filp) || + (info->flags & ASYNC_CLOSING)) { + if (info->flags & ASYNC_CLOSING) + interruptible_sleep_on(&info->close_wait); +#ifdef SERIAL_DO_RESTART + return ((info->flags & ASYNC_HUP_NOTIFY) ? + -EAGAIN : -ERESTARTSYS); +#else + return -EAGAIN; +#endif + } + + /* + * If non-blocking mode is set, or the port is not enabled, + * then make the check up front and then exit. + */ + if ((filp->f_flags & O_NONBLOCK) || + (tty->flags & (1 << TTY_IO_ERROR))) { + info->flags |= ASYNC_NORMAL_ACTIVE; + return 0; + } + + if (tty->termios->c_cflag & CLOCAL) + do_clocal = 1; + + /* + * Block waiting for the carrier detect and the line to become + * free (i.e., not in use by the callout). While we are in + * this loop, state->count is dropped by one, so that + * rs_close() knows when to free things. We restore it upon + * exit, either normal or abnormal. + */ + retval = 0; + add_wait_queue(&info->open_wait, &wait); +#ifdef SERIAL_DEBUG_OPEN + printk("block_til_ready before block: ttys%d, count = %d\n", + state->line, state->count); +#endif + local_irq_save(flags); + if (!tty_hung_up_p(filp)) { + extra_count = 1; + state->count--; + } + local_irq_restore(flags); + info->blocked_open++; + while (1) { + local_irq_save(flags); + if (tty->termios->c_cflag & CBAUD) + rtsdtr_ctrl(SER_DTR|SER_RTS); + local_irq_restore(flags); + set_current_state(TASK_INTERRUPTIBLE); + if (tty_hung_up_p(filp) || + !(info->flags & ASYNC_INITIALIZED)) { +#ifdef SERIAL_DO_RESTART + if (info->flags & ASYNC_HUP_NOTIFY) + retval = -EAGAIN; + else + retval = -ERESTARTSYS; +#else + retval = -EAGAIN; +#endif + break; + } + if (!(info->flags & ASYNC_CLOSING) && + (do_clocal || (!(ciab.pra & SER_DCD)) )) + break; + if (signal_pending(current)) { + retval = -ERESTARTSYS; + break; + } +#ifdef SERIAL_DEBUG_OPEN + printk("block_til_ready blocking: ttys%d, count = %d\n", + info->line, state->count); +#endif + tty_unlock(); + schedule(); + tty_lock(); + } + __set_current_state(TASK_RUNNING); + remove_wait_queue(&info->open_wait, &wait); + if (extra_count) + state->count++; + info->blocked_open--; +#ifdef SERIAL_DEBUG_OPEN + printk("block_til_ready after blocking: ttys%d, count = %d\n", + info->line, state->count); +#endif + if (retval) + return retval; + info->flags |= ASYNC_NORMAL_ACTIVE; + return 0; +} + +static int get_async_struct(int line, struct async_struct **ret_info) +{ + struct async_struct *info; + struct serial_state *sstate; + + sstate = rs_table + line; + sstate->count++; + if (sstate->info) { + *ret_info = sstate->info; + return 0; + } + info = kzalloc(sizeof(struct async_struct), GFP_KERNEL); + if (!info) { + sstate->count--; + return -ENOMEM; + } +#ifdef DECLARE_WAITQUEUE + init_waitqueue_head(&info->open_wait); + init_waitqueue_head(&info->close_wait); + init_waitqueue_head(&info->delta_msr_wait); +#endif + info->magic = SERIAL_MAGIC; + info->port = sstate->port; + info->flags = sstate->flags; + info->xmit_fifo_size = sstate->xmit_fifo_size; + info->line = line; + tasklet_init(&info->tlet, do_softint, (unsigned long)info); + info->state = sstate; + if (sstate->info) { + kfree(info); + *ret_info = sstate->info; + return 0; + } + *ret_info = sstate->info = info; + return 0; +} + +/* + * This routine is called whenever a serial port is opened. It + * enables interrupts for a serial port, linking in its async structure into + * the IRQ chain. It also performs the serial-specific + * initialization for the tty structure. + */ +static int rs_open(struct tty_struct *tty, struct file * filp) +{ + struct async_struct *info; + int retval, line; + + line = tty->index; + if ((line < 0) || (line >= NR_PORTS)) { + return -ENODEV; + } + retval = get_async_struct(line, &info); + if (retval) { + return retval; + } + tty->driver_data = info; + info->tty = tty; + if (serial_paranoia_check(info, tty->name, "rs_open")) + return -ENODEV; + +#ifdef SERIAL_DEBUG_OPEN + printk("rs_open %s, count = %d\n", tty->name, info->state->count); +#endif + info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; + + /* + * If the port is the middle of closing, bail out now + */ + if (tty_hung_up_p(filp) || + (info->flags & ASYNC_CLOSING)) { + if (info->flags & ASYNC_CLOSING) + interruptible_sleep_on(&info->close_wait); +#ifdef SERIAL_DO_RESTART + return ((info->flags & ASYNC_HUP_NOTIFY) ? + -EAGAIN : -ERESTARTSYS); +#else + return -EAGAIN; +#endif + } + + /* + * Start up serial port + */ + retval = startup(info); + if (retval) { + return retval; + } + + retval = block_til_ready(tty, filp, info); + if (retval) { +#ifdef SERIAL_DEBUG_OPEN + printk("rs_open returning after block_til_ready with %d\n", + retval); +#endif + return retval; + } + +#ifdef SERIAL_DEBUG_OPEN + printk("rs_open %s successful...", tty->name); +#endif + return 0; +} + +/* + * /proc fs routines.... + */ + +static inline void line_info(struct seq_file *m, struct serial_state *state) +{ + struct async_struct *info = state->info, scr_info; + char stat_buf[30], control, status; + unsigned long flags; + + seq_printf(m, "%d: uart:amiga_builtin",state->line); + + /* + * Figure out the current RS-232 lines + */ + if (!info) { + info = &scr_info; /* This is just for serial_{in,out} */ + + info->magic = SERIAL_MAGIC; + info->flags = state->flags; + info->quot = 0; + info->tty = NULL; + } + local_irq_save(flags); + status = ciab.pra; + control = info ? info->MCR : status; + local_irq_restore(flags); + + stat_buf[0] = 0; + stat_buf[1] = 0; + if(!(control & SER_RTS)) + strcat(stat_buf, "|RTS"); + if(!(status & SER_CTS)) + strcat(stat_buf, "|CTS"); + if(!(control & SER_DTR)) + strcat(stat_buf, "|DTR"); + if(!(status & SER_DSR)) + strcat(stat_buf, "|DSR"); + if(!(status & SER_DCD)) + strcat(stat_buf, "|CD"); + + if (info->quot) { + seq_printf(m, " baud:%d", state->baud_base / info->quot); + } + + seq_printf(m, " tx:%d rx:%d", state->icount.tx, state->icount.rx); + + if (state->icount.frame) + seq_printf(m, " fe:%d", state->icount.frame); + + if (state->icount.parity) + seq_printf(m, " pe:%d", state->icount.parity); + + if (state->icount.brk) + seq_printf(m, " brk:%d", state->icount.brk); + + if (state->icount.overrun) + seq_printf(m, " oe:%d", state->icount.overrun); + + /* + * Last thing is the RS-232 status lines + */ + seq_printf(m, " %s\n", stat_buf+1); +} + +static int rs_proc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "serinfo:1.0 driver:%s\n", serial_version); + line_info(m, &rs_table[0]); + return 0; +} + +static int rs_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, rs_proc_show, NULL); +} + +static const struct file_operations rs_proc_fops = { + .owner = THIS_MODULE, + .open = rs_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * --------------------------------------------------------------------- + * rs_init() and friends + * + * rs_init() is called at boot-time to initialize the serial driver. + * --------------------------------------------------------------------- + */ + +/* + * This routine prints out the appropriate serial driver version + * number, and identifies which options were configured into this + * driver. + */ +static void show_serial_version(void) +{ + printk(KERN_INFO "%s version %s\n", serial_name, serial_version); +} + + +static const struct tty_operations serial_ops = { + .open = rs_open, + .close = rs_close, + .write = rs_write, + .put_char = rs_put_char, + .flush_chars = rs_flush_chars, + .write_room = rs_write_room, + .chars_in_buffer = rs_chars_in_buffer, + .flush_buffer = rs_flush_buffer, + .ioctl = rs_ioctl, + .throttle = rs_throttle, + .unthrottle = rs_unthrottle, + .set_termios = rs_set_termios, + .stop = rs_stop, + .start = rs_start, + .hangup = rs_hangup, + .break_ctl = rs_break, + .send_xchar = rs_send_xchar, + .wait_until_sent = rs_wait_until_sent, + .tiocmget = rs_tiocmget, + .tiocmset = rs_tiocmset, + .get_icount = rs_get_icount, + .proc_fops = &rs_proc_fops, +}; + +/* + * The serial driver boot-time initialization code! + */ +static int __init amiga_serial_probe(struct platform_device *pdev) +{ + unsigned long flags; + struct serial_state * state; + int error; + + serial_driver = alloc_tty_driver(1); + if (!serial_driver) + return -ENOMEM; + + IRQ_ports = NULL; + + show_serial_version(); + + /* Initialize the tty_driver structure */ + + serial_driver->owner = THIS_MODULE; + serial_driver->driver_name = "amiserial"; + serial_driver->name = "ttyS"; + serial_driver->major = TTY_MAJOR; + serial_driver->minor_start = 64; + serial_driver->type = TTY_DRIVER_TYPE_SERIAL; + serial_driver->subtype = SERIAL_TYPE_NORMAL; + serial_driver->init_termios = tty_std_termios; + serial_driver->init_termios.c_cflag = + B9600 | CS8 | CREAD | HUPCL | CLOCAL; + serial_driver->flags = TTY_DRIVER_REAL_RAW; + tty_set_operations(serial_driver, &serial_ops); + + error = tty_register_driver(serial_driver); + if (error) + goto fail_put_tty_driver; + + state = rs_table; + state->magic = SSTATE_MAGIC; + state->port = (int)&custom.serdatr; /* Just to give it a value */ + state->line = 0; + state->custom_divisor = 0; + state->close_delay = 5*HZ/10; + state->closing_wait = 30*HZ; + state->icount.cts = state->icount.dsr = + state->icount.rng = state->icount.dcd = 0; + state->icount.rx = state->icount.tx = 0; + state->icount.frame = state->icount.parity = 0; + state->icount.overrun = state->icount.brk = 0; + + printk(KERN_INFO "ttyS%d is the amiga builtin serial port\n", + state->line); + + /* Hardware set up */ + + state->baud_base = amiga_colorclock; + state->xmit_fifo_size = 1; + + /* set ISRs, and then disable the rx interrupts */ + error = request_irq(IRQ_AMIGA_TBE, ser_tx_int, 0, "serial TX", state); + if (error) + goto fail_unregister; + + error = request_irq(IRQ_AMIGA_RBF, ser_rx_int, IRQF_DISABLED, + "serial RX", state); + if (error) + goto fail_free_irq; + + local_irq_save(flags); + + /* turn off Rx and Tx interrupts */ + custom.intena = IF_RBF | IF_TBE; + mb(); + + /* clear any pending interrupt */ + custom.intreq = IF_RBF | IF_TBE; + mb(); + + local_irq_restore(flags); + + /* + * set the appropriate directions for the modem control flags, + * and clear RTS and DTR + */ + ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */ + ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */ + + platform_set_drvdata(pdev, state); + + return 0; + +fail_free_irq: + free_irq(IRQ_AMIGA_TBE, state); +fail_unregister: + tty_unregister_driver(serial_driver); +fail_put_tty_driver: + put_tty_driver(serial_driver); + return error; +} + +static int __exit amiga_serial_remove(struct platform_device *pdev) +{ + int error; + struct serial_state *state = platform_get_drvdata(pdev); + struct async_struct *info = state->info; + + /* printk("Unloading %s: version %s\n", serial_name, serial_version); */ + tasklet_kill(&info->tlet); + if ((error = tty_unregister_driver(serial_driver))) + printk("SERIAL: failed to unregister serial driver (%d)\n", + error); + put_tty_driver(serial_driver); + + rs_table[0].info = NULL; + kfree(info); + + free_irq(IRQ_AMIGA_TBE, rs_table); + free_irq(IRQ_AMIGA_RBF, rs_table); + + platform_set_drvdata(pdev, NULL); + + return error; +} + +static struct platform_driver amiga_serial_driver = { + .remove = __exit_p(amiga_serial_remove), + .driver = { + .name = "amiga-serial", + .owner = THIS_MODULE, + }, +}; + +static int __init amiga_serial_init(void) +{ + return platform_driver_probe(&amiga_serial_driver, amiga_serial_probe); +} + +module_init(amiga_serial_init); + +static void __exit amiga_serial_exit(void) +{ + platform_driver_unregister(&amiga_serial_driver); +} + +module_exit(amiga_serial_exit); + + +#if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE) + +/* + * ------------------------------------------------------------ + * Serial console driver + * ------------------------------------------------------------ + */ + +static void amiga_serial_putc(char c) +{ + custom.serdat = (unsigned char)c | 0x100; + while (!(custom.serdatr & 0x2000)) + barrier(); +} + +/* + * Print a string to the serial port trying not to disturb + * any possible real use of the port... + * + * The console must be locked when we get here. + */ +static void serial_console_write(struct console *co, const char *s, + unsigned count) +{ + unsigned short intena = custom.intenar; + + custom.intena = IF_TBE; + + while (count--) { + if (*s == '\n') + amiga_serial_putc('\r'); + amiga_serial_putc(*s++); + } + + custom.intena = IF_SETCLR | (intena & IF_TBE); +} + +static struct tty_driver *serial_console_device(struct console *c, int *index) +{ + *index = 0; + return serial_driver; +} + +static struct console sercons = { + .name = "ttyS", + .write = serial_console_write, + .device = serial_console_device, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +/* + * Register console. + */ +static int __init amiserial_console_init(void) +{ + register_console(&sercons); + return 0; +} +console_initcall(amiserial_console_init); + +#endif /* CONFIG_SERIAL_CONSOLE && !MODULE */ + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:amiga-serial"); diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c new file mode 100644 index 00000000000..16402445f2b --- /dev/null +++ b/drivers/tty/bfin_jtag_comm.c @@ -0,0 +1,366 @@ +/* + * TTY over Blackfin JTAG Communication + * + * Copyright 2008-2009 Analog Devices Inc. + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Licensed under the GPL-2 or later. + */ + +#define DRV_NAME "bfin-jtag-comm" +#define DEV_NAME "ttyBFJC" +#define pr_fmt(fmt) DRV_NAME ": " fmt + +#include <linux/circ_buf.h> +#include <linux/console.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/tty_flip.h> +#include <asm/atomic.h> + +#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); }) + +/* See the Debug/Emulation chapter in the HRM */ +#define EMUDOF 0x00000001 /* EMUDAT_OUT full & valid */ +#define EMUDIF 0x00000002 /* EMUDAT_IN full & valid */ +#define EMUDOOVF 0x00000004 /* EMUDAT_OUT overflow */ +#define EMUDIOVF 0x00000008 /* EMUDAT_IN overflow */ + +static inline uint32_t bfin_write_emudat(uint32_t emudat) +{ + __asm__ __volatile__("emudat = %0;" : : "d"(emudat)); + return emudat; +} + +static inline uint32_t bfin_read_emudat(void) +{ + uint32_t emudat; + __asm__ __volatile__("%0 = emudat;" : "=d"(emudat)); + return emudat; +} + +static inline uint32_t bfin_write_emudat_chars(char a, char b, char c, char d) +{ + return bfin_write_emudat((a << 0) | (b << 8) | (c << 16) | (d << 24)); +} + +#define CIRC_SIZE 2048 /* see comment in tty_io.c:do_tty_write() */ +#define CIRC_MASK (CIRC_SIZE - 1) +#define circ_empty(circ) ((circ)->head == (circ)->tail) +#define circ_free(circ) CIRC_SPACE((circ)->head, (circ)->tail, CIRC_SIZE) +#define circ_cnt(circ) CIRC_CNT((circ)->head, (circ)->tail, CIRC_SIZE) +#define circ_byte(circ, idx) ((circ)->buf[(idx) & CIRC_MASK]) + +static struct tty_driver *bfin_jc_driver; +static struct task_struct *bfin_jc_kthread; +static struct tty_struct * volatile bfin_jc_tty; +static unsigned long bfin_jc_count; +static DEFINE_MUTEX(bfin_jc_tty_mutex); +static volatile struct circ_buf bfin_jc_write_buf; + +static int +bfin_jc_emudat_manager(void *arg) +{ + uint32_t inbound_len = 0, outbound_len = 0; + + while (!kthread_should_stop()) { + /* no one left to give data to, so sleep */ + if (bfin_jc_tty == NULL && circ_empty(&bfin_jc_write_buf)) { + pr_debug("waiting for readers\n"); + __set_current_state(TASK_UNINTERRUPTIBLE); + schedule(); + __set_current_state(TASK_RUNNING); + } + + /* no data available, so just chill */ + if (!(bfin_read_DBGSTAT() & EMUDIF) && circ_empty(&bfin_jc_write_buf)) { + pr_debug("waiting for data (in_len = %i) (circ: %i %i)\n", + inbound_len, bfin_jc_write_buf.tail, bfin_jc_write_buf.head); + if (inbound_len) + schedule(); + else + schedule_timeout_interruptible(HZ); + continue; + } + + /* if incoming data is ready, eat it */ + if (bfin_read_DBGSTAT() & EMUDIF) { + struct tty_struct *tty; + mutex_lock(&bfin_jc_tty_mutex); + tty = (struct tty_struct *)bfin_jc_tty; + if (tty != NULL) { + uint32_t emudat = bfin_read_emudat(); + if (inbound_len == 0) { + pr_debug("incoming length: 0x%08x\n", emudat); + inbound_len = emudat; + } else { + size_t num_chars = (4 <= inbound_len ? 4 : inbound_len); + pr_debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars); + inbound_len -= num_chars; + tty_insert_flip_string(tty, (unsigned char *)&emudat, num_chars); + tty_flip_buffer_push(tty); + } + } + mutex_unlock(&bfin_jc_tty_mutex); + } + + /* if outgoing data is ready, post it */ + if (!(bfin_read_DBGSTAT() & EMUDOF) && !circ_empty(&bfin_jc_write_buf)) { + if (outbound_len == 0) { + outbound_len = circ_cnt(&bfin_jc_write_buf); + bfin_write_emudat(outbound_len); + pr_debug("outgoing length: 0x%08x\n", outbound_len); + } else { + struct tty_struct *tty; + int tail = bfin_jc_write_buf.tail; + size_t ate = (4 <= outbound_len ? 4 : outbound_len); + uint32_t emudat = + bfin_write_emudat_chars( + circ_byte(&bfin_jc_write_buf, tail + 0), + circ_byte(&bfin_jc_write_buf, tail + 1), + circ_byte(&bfin_jc_write_buf, tail + 2), + circ_byte(&bfin_jc_write_buf, tail + 3) + ); + bfin_jc_write_buf.tail += ate; + outbound_len -= ate; + mutex_lock(&bfin_jc_tty_mutex); + tty = (struct tty_struct *)bfin_jc_tty; + if (tty) + tty_wakeup(tty); + mutex_unlock(&bfin_jc_tty_mutex); + pr_debug(" outgoing data: 0x%08x (pushing %zu)\n", emudat, ate); + } + } + } + + __set_current_state(TASK_RUNNING); + return 0; +} + +static int +bfin_jc_open(struct tty_struct *tty, struct file *filp) +{ + mutex_lock(&bfin_jc_tty_mutex); + pr_debug("open %lu\n", bfin_jc_count); + ++bfin_jc_count; + bfin_jc_tty = tty; + wake_up_process(bfin_jc_kthread); + mutex_unlock(&bfin_jc_tty_mutex); + return 0; +} + +static void +bfin_jc_close(struct tty_struct *tty, struct file *filp) +{ + mutex_lock(&bfin_jc_tty_mutex); + pr_debug("close %lu\n", bfin_jc_count); + if (--bfin_jc_count == 0) + bfin_jc_tty = NULL; + wake_up_process(bfin_jc_kthread); + mutex_unlock(&bfin_jc_tty_mutex); +} + +/* XXX: we dont handle the put_char() case where we must handle count = 1 */ +static int +bfin_jc_circ_write(const unsigned char *buf, int count) +{ + int i; + count = min(count, circ_free(&bfin_jc_write_buf)); + pr_debug("going to write chunk of %i bytes\n", count); + for (i = 0; i < count; ++i) + circ_byte(&bfin_jc_write_buf, bfin_jc_write_buf.head + i) = buf[i]; + bfin_jc_write_buf.head += i; + return i; +} + +#ifndef CONFIG_BFIN_JTAG_COMM_CONSOLE +# define console_lock() +# define console_unlock() +#endif +static int +bfin_jc_write(struct tty_struct *tty, const unsigned char *buf, int count) +{ + int i; + console_lock(); + i = bfin_jc_circ_write(buf, count); + console_unlock(); + wake_up_process(bfin_jc_kthread); + return i; +} + +static void +bfin_jc_flush_chars(struct tty_struct *tty) +{ + wake_up_process(bfin_jc_kthread); +} + +static int +bfin_jc_write_room(struct tty_struct *tty) +{ + return circ_free(&bfin_jc_write_buf); +} + +static int +bfin_jc_chars_in_buffer(struct tty_struct *tty) +{ + return circ_cnt(&bfin_jc_write_buf); +} + +static void +bfin_jc_wait_until_sent(struct tty_struct *tty, int timeout) +{ + unsigned long expire = jiffies + timeout; + while (!circ_empty(&bfin_jc_write_buf)) { + if (signal_pending(current)) + break; + if (time_after(jiffies, expire)) + break; + } +} + +static const struct tty_operations bfin_jc_ops = { + .open = bfin_jc_open, + .close = bfin_jc_close, + .write = bfin_jc_write, + /*.put_char = bfin_jc_put_char,*/ + .flush_chars = bfin_jc_flush_chars, + .write_room = bfin_jc_write_room, + .chars_in_buffer = bfin_jc_chars_in_buffer, + .wait_until_sent = bfin_jc_wait_until_sent, +}; + +static int __init bfin_jc_init(void) +{ + int ret; + + bfin_jc_kthread = kthread_create(bfin_jc_emudat_manager, NULL, DRV_NAME); + if (IS_ERR(bfin_jc_kthread)) + return PTR_ERR(bfin_jc_kthread); + + ret = -ENOMEM; + + bfin_jc_write_buf.head = bfin_jc_write_buf.tail = 0; + bfin_jc_write_buf.buf = kmalloc(CIRC_SIZE, GFP_KERNEL); + if (!bfin_jc_write_buf.buf) + goto err; + + bfin_jc_driver = alloc_tty_driver(1); + if (!bfin_jc_driver) + goto err; + + bfin_jc_driver->owner = THIS_MODULE; + bfin_jc_driver->driver_name = DRV_NAME; + bfin_jc_driver->name = DEV_NAME; + bfin_jc_driver->type = TTY_DRIVER_TYPE_SERIAL; + bfin_jc_driver->subtype = SERIAL_TYPE_NORMAL; + bfin_jc_driver->init_termios = tty_std_termios; + tty_set_operations(bfin_jc_driver, &bfin_jc_ops); + + ret = tty_register_driver(bfin_jc_driver); + if (ret) + goto err; + + pr_init(KERN_INFO DRV_NAME ": initialized\n"); + + return 0; + + err: + put_tty_driver(bfin_jc_driver); + kfree(bfin_jc_write_buf.buf); + kthread_stop(bfin_jc_kthread); + return ret; +} +module_init(bfin_jc_init); + +static void __exit bfin_jc_exit(void) +{ + kthread_stop(bfin_jc_kthread); + kfree(bfin_jc_write_buf.buf); + tty_unregister_driver(bfin_jc_driver); + put_tty_driver(bfin_jc_driver); +} +module_exit(bfin_jc_exit); + +#if defined(CONFIG_BFIN_JTAG_COMM_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +static void +bfin_jc_straight_buffer_write(const char *buf, unsigned count) +{ + unsigned ate = 0; + while (bfin_read_DBGSTAT() & EMUDOF) + continue; + bfin_write_emudat(count); + while (ate < count) { + while (bfin_read_DBGSTAT() & EMUDOF) + continue; + bfin_write_emudat_chars(buf[ate], buf[ate+1], buf[ate+2], buf[ate+3]); + ate += 4; + } +} +#endif + +#ifdef CONFIG_BFIN_JTAG_COMM_CONSOLE +static void +bfin_jc_console_write(struct console *co, const char *buf, unsigned count) +{ + if (bfin_jc_kthread == NULL) + bfin_jc_straight_buffer_write(buf, count); + else + bfin_jc_circ_write(buf, count); +} + +static struct tty_driver * +bfin_jc_console_device(struct console *co, int *index) +{ + *index = co->index; + return bfin_jc_driver; +} + +static struct console bfin_jc_console = { + .name = DEV_NAME, + .write = bfin_jc_console_write, + .device = bfin_jc_console_device, + .flags = CON_ANYTIME | CON_PRINTBUFFER, + .index = -1, +}; + +static int __init bfin_jc_console_init(void) +{ + register_console(&bfin_jc_console); + return 0; +} +console_initcall(bfin_jc_console_init); +#endif + +#ifdef CONFIG_EARLY_PRINTK +static void __init +bfin_jc_early_write(struct console *co, const char *buf, unsigned int count) +{ + bfin_jc_straight_buffer_write(buf, count); +} + +static struct __initdata console bfin_jc_early_console = { + .name = "early_BFJC", + .write = bfin_jc_early_write, + .flags = CON_ANYTIME | CON_PRINTBUFFER, + .index = -1, +}; + +struct console * __init +bfin_jc_early_init(unsigned int port, unsigned int cflag) +{ + return &bfin_jc_early_console; +} +#endif + +MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>"); +MODULE_DESCRIPTION("TTY over Blackfin JTAG Communication"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c new file mode 100644 index 00000000000..c99728f0cd9 --- /dev/null +++ b/drivers/tty/cyclades.c @@ -0,0 +1,4200 @@ +#undef BLOCKMOVE +#define Z_WAKE +#undef Z_EXT_CHARS_IN_BUFFER + +/* + * linux/drivers/char/cyclades.c + * + * This file contains the driver for the Cyclades async multiport + * serial boards. + * + * Initially written by Randolph Bentson <bentson@grieg.seaslug.org>. + * Modified and maintained by Marcio Saito <marcio@cyclades.com>. + * + * Copyright (C) 2007-2009 Jiri Slaby <jirislaby@gmail.com> + * + * Much of the design and some of the code came from serial.c + * which was copyright (C) 1991, 1992 Linus Torvalds. It was + * extensively rewritten by Theodore Ts'o, 8/16/92 -- 9/14/92, + * and then fixed as suggested by Michael K. Johnson 12/12/92. + * Converted to pci probing and cleaned up by Jiri Slaby. + * + */ + +#define CY_VERSION "2.6" + +/* If you need to install more boards than NR_CARDS, change the constant + in the definition below. No other change is necessary to support up to + eight boards. Beyond that you'll have to extend cy_isa_addresses. */ + +#define NR_CARDS 4 + +/* + If the total number of ports is larger than NR_PORTS, change this + constant in the definition below. No other change is necessary to + support more boards/ports. */ + +#define NR_PORTS 256 + +#define ZO_V1 0 +#define ZO_V2 1 +#define ZE_V1 2 + +#define SERIAL_PARANOIA_CHECK +#undef CY_DEBUG_OPEN +#undef CY_DEBUG_THROTTLE +#undef CY_DEBUG_OTHER +#undef CY_DEBUG_IO +#undef CY_DEBUG_COUNT +#undef CY_DEBUG_DTR +#undef CY_DEBUG_WAIT_UNTIL_SENT +#undef CY_DEBUG_INTERRUPTS +#undef CY_16Y_HACK +#undef CY_ENABLE_MONITORING +#undef CY_PCI_DEBUG + +/* + * Include section + */ +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/timer.h> +#include <linux/interrupt.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/serial.h> +#include <linux/major.h> +#include <linux/string.h> +#include <linux/fcntl.h> +#include <linux/ptrace.h> +#include <linux/cyclades.h> +#include <linux/mm.h> +#include <linux/ioport.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/spinlock.h> +#include <linux/bitops.h> +#include <linux/firmware.h> +#include <linux/device.h> +#include <linux/slab.h> + +#include <linux/io.h> +#include <linux/uaccess.h> + +#include <linux/kernel.h> +#include <linux/pci.h> + +#include <linux/stat.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> + +static void cy_send_xchar(struct tty_struct *tty, char ch); + +#ifndef SERIAL_XMIT_SIZE +#define SERIAL_XMIT_SIZE (min(PAGE_SIZE, 4096)) +#endif + +#define STD_COM_FLAGS (0) + +/* firmware stuff */ +#define ZL_MAX_BLOCKS 16 +#define DRIVER_VERSION 0x02010203 +#define RAM_SIZE 0x80000 + +enum zblock_type { + ZBLOCK_PRG = 0, + ZBLOCK_FPGA = 1 +}; + +struct zfile_header { + char name[64]; + char date[32]; + char aux[32]; + u32 n_config; + u32 config_offset; + u32 n_blocks; + u32 block_offset; + u32 reserved[9]; +} __attribute__ ((packed)); + +struct zfile_config { + char name[64]; + u32 mailbox; + u32 function; + u32 n_blocks; + u32 block_list[ZL_MAX_BLOCKS]; +} __attribute__ ((packed)); + +struct zfile_block { + u32 type; + u32 file_offset; + u32 ram_offset; + u32 size; +} __attribute__ ((packed)); + +static struct tty_driver *cy_serial_driver; + +#ifdef CONFIG_ISA +/* This is the address lookup table. The driver will probe for + Cyclom-Y/ISA boards at all addresses in here. If you want the + driver to probe addresses at a different address, add it to + this table. If the driver is probing some other board and + causing problems, remove the offending address from this table. +*/ + +static unsigned int cy_isa_addresses[] = { + 0xD0000, + 0xD2000, + 0xD4000, + 0xD6000, + 0xD8000, + 0xDA000, + 0xDC000, + 0xDE000, + 0, 0, 0, 0, 0, 0, 0, 0 +}; + +#define NR_ISA_ADDRS ARRAY_SIZE(cy_isa_addresses) + +static long maddr[NR_CARDS]; +static int irq[NR_CARDS]; + +module_param_array(maddr, long, NULL, 0); +module_param_array(irq, int, NULL, 0); + +#endif /* CONFIG_ISA */ + +/* This is the per-card data structure containing address, irq, number of + channels, etc. This driver supports a maximum of NR_CARDS cards. +*/ +static struct cyclades_card cy_card[NR_CARDS]; + +static int cy_next_channel; /* next minor available */ + +/* + * This is used to look up the divisor speeds and the timeouts + * We're normally limited to 15 distinct baud rates. The extra + * are accessed via settings in info->port.flags. + * 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + * 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + * HI VHI + * 20 + */ +static const int baud_table[] = { + 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, + 1800, 2400, 4800, 9600, 19200, 38400, 57600, 76800, 115200, 150000, + 230400, 0 +}; + +static const char baud_co_25[] = { /* 25 MHz clock option table */ + /* value => 00 01 02 03 04 */ + /* divide by 8 32 128 512 2048 */ + 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x02, + 0x02, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +static const char baud_bpr_25[] = { /* 25 MHz baud rate period table */ + 0x00, 0xf5, 0xa3, 0x6f, 0x5c, 0x51, 0xf5, 0xa3, 0x51, 0xa3, + 0x6d, 0x51, 0xa3, 0x51, 0xa3, 0x51, 0x36, 0x29, 0x1b, 0x15 +}; + +static const char baud_co_60[] = { /* 60 MHz clock option table (CD1400 J) */ + /* value => 00 01 02 03 04 */ + /* divide by 8 32 128 512 2048 */ + 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, + 0x03, 0x02, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00 +}; + +static const char baud_bpr_60[] = { /* 60 MHz baud rate period table (CD1400 J) */ + 0x00, 0x82, 0x21, 0xff, 0xdb, 0xc3, 0x92, 0x62, 0xc3, 0x62, + 0x41, 0xc3, 0x62, 0xc3, 0x62, 0xc3, 0x82, 0x62, 0x41, 0x32, + 0x21 +}; + +static const char baud_cor3[] = { /* receive threshold */ + 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, + 0x0a, 0x0a, 0x0a, 0x09, 0x09, 0x08, 0x08, 0x08, 0x08, 0x07, + 0x07 +}; + +/* + * The Cyclades driver implements HW flow control as any serial driver. + * The cyclades_port structure member rflow and the vector rflow_thr + * allows us to take advantage of a special feature in the CD1400 to avoid + * data loss even when the system interrupt latency is too high. These flags + * are to be used only with very special applications. Setting these flags + * requires the use of a special cable (DTR and RTS reversed). In the new + * CD1400-based boards (rev. 6.00 or later), there is no need for special + * cables. + */ + +static const char rflow_thr[] = { /* rflow threshold */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, + 0x0a +}; + +/* The Cyclom-Ye has placed the sequential chips in non-sequential + * address order. This look-up table overcomes that problem. + */ +static const unsigned int cy_chip_offset[] = { 0x0000, + 0x0400, + 0x0800, + 0x0C00, + 0x0200, + 0x0600, + 0x0A00, + 0x0E00 +}; + +/* PCI related definitions */ + +#ifdef CONFIG_PCI +static const struct pci_device_id cy_pci_dev_id[] = { + /* PCI < 1Mb */ + { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Lo) }, + /* PCI > 1Mb */ + { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Hi) }, + /* 4Y PCI < 1Mb */ + { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Lo) }, + /* 4Y PCI > 1Mb */ + { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Hi) }, + /* 8Y PCI < 1Mb */ + { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Lo) }, + /* 8Y PCI > 1Mb */ + { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Hi) }, + /* Z PCI < 1Mb */ + { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Lo) }, + /* Z PCI > 1Mb */ + { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Hi) }, + { } /* end of table */ +}; +MODULE_DEVICE_TABLE(pci, cy_pci_dev_id); +#endif + +static void cy_start(struct tty_struct *); +static void cy_set_line_char(struct cyclades_port *, struct tty_struct *); +static int cyz_issue_cmd(struct cyclades_card *, __u32, __u8, __u32); +#ifdef CONFIG_ISA +static unsigned detect_isa_irq(void __iomem *); +#endif /* CONFIG_ISA */ + +#ifndef CONFIG_CYZ_INTR +static void cyz_poll(unsigned long); + +/* The Cyclades-Z polling cycle is defined by this variable */ +static long cyz_polling_cycle = CZ_DEF_POLL; + +static DEFINE_TIMER(cyz_timerlist, cyz_poll, 0, 0); + +#else /* CONFIG_CYZ_INTR */ +static void cyz_rx_restart(unsigned long); +static struct timer_list cyz_rx_full_timer[NR_PORTS]; +#endif /* CONFIG_CYZ_INTR */ + +static inline void cyy_writeb(struct cyclades_port *port, u32 reg, u8 val) +{ + struct cyclades_card *card = port->card; + + cy_writeb(port->u.cyy.base_addr + (reg << card->bus_index), val); +} + +static inline u8 cyy_readb(struct cyclades_port *port, u32 reg) +{ + struct cyclades_card *card = port->card; + + return readb(port->u.cyy.base_addr + (reg << card->bus_index)); +} + +static inline bool cy_is_Z(struct cyclades_card *card) +{ + return card->num_chips == (unsigned int)-1; +} + +static inline bool __cyz_fpga_loaded(struct RUNTIME_9060 __iomem *ctl_addr) +{ + return readl(&ctl_addr->init_ctrl) & (1 << 17); +} + +static inline bool cyz_fpga_loaded(struct cyclades_card *card) +{ + return __cyz_fpga_loaded(card->ctl_addr.p9060); +} + +static inline bool cyz_is_loaded(struct cyclades_card *card) +{ + struct FIRM_ID __iomem *fw_id = card->base_addr + ID_ADDRESS; + + return (card->hw_ver == ZO_V1 || cyz_fpga_loaded(card)) && + readl(&fw_id->signature) == ZFIRM_ID; +} + +static inline int serial_paranoia_check(struct cyclades_port *info, + const char *name, const char *routine) +{ +#ifdef SERIAL_PARANOIA_CHECK + if (!info) { + printk(KERN_WARNING "cyc Warning: null cyclades_port for (%s) " + "in %s\n", name, routine); + return 1; + } + + if (info->magic != CYCLADES_MAGIC) { + printk(KERN_WARNING "cyc Warning: bad magic number for serial " + "struct (%s) in %s\n", name, routine); + return 1; + } +#endif + return 0; +} + +/***********************************************************/ +/********* Start of block of Cyclom-Y specific code ********/ + +/* This routine waits up to 1000 micro-seconds for the previous + command to the Cirrus chip to complete and then issues the + new command. An error is returned if the previous command + didn't finish within the time limit. + + This function is only called from inside spinlock-protected code. + */ +static int __cyy_issue_cmd(void __iomem *base_addr, u8 cmd, int index) +{ + void __iomem *ccr = base_addr + (CyCCR << index); + unsigned int i; + + /* Check to see that the previous command has completed */ + for (i = 0; i < 100; i++) { + if (readb(ccr) == 0) + break; + udelay(10L); + } + /* if the CCR never cleared, the previous command + didn't finish within the "reasonable time" */ + if (i == 100) + return -1; + + /* Issue the new command */ + cy_writeb(ccr, cmd); + + return 0; +} + +static inline int cyy_issue_cmd(struct cyclades_port *port, u8 cmd) +{ + return __cyy_issue_cmd(port->u.cyy.base_addr, cmd, + port->card->bus_index); +} + +#ifdef CONFIG_ISA +/* ISA interrupt detection code */ +static unsigned detect_isa_irq(void __iomem *address) +{ + int irq; + unsigned long irqs, flags; + int save_xir, save_car; + int index = 0; /* IRQ probing is only for ISA */ + + /* forget possible initially masked and pending IRQ */ + irq = probe_irq_off(probe_irq_on()); + + /* Clear interrupts on the board first */ + cy_writeb(address + (Cy_ClrIntr << index), 0); + /* Cy_ClrIntr is 0x1800 */ + + irqs = probe_irq_on(); + /* Wait ... */ + msleep(5); + + /* Enable the Tx interrupts on the CD1400 */ + local_irq_save(flags); + cy_writeb(address + (CyCAR << index), 0); + __cyy_issue_cmd(address, CyCHAN_CTL | CyENB_XMTR, index); + + cy_writeb(address + (CyCAR << index), 0); + cy_writeb(address + (CySRER << index), + readb(address + (CySRER << index)) | CyTxRdy); + local_irq_restore(flags); + + /* Wait ... */ + msleep(5); + + /* Check which interrupt is in use */ + irq = probe_irq_off(irqs); + + /* Clean up */ + save_xir = (u_char) readb(address + (CyTIR << index)); + save_car = readb(address + (CyCAR << index)); + cy_writeb(address + (CyCAR << index), (save_xir & 0x3)); + cy_writeb(address + (CySRER << index), + readb(address + (CySRER << index)) & ~CyTxRdy); + cy_writeb(address + (CyTIR << index), (save_xir & 0x3f)); + cy_writeb(address + (CyCAR << index), (save_car)); + cy_writeb(address + (Cy_ClrIntr << index), 0); + /* Cy_ClrIntr is 0x1800 */ + + return (irq > 0) ? irq : 0; +} +#endif /* CONFIG_ISA */ + +static void cyy_chip_rx(struct cyclades_card *cinfo, int chip, + void __iomem *base_addr) +{ + struct cyclades_port *info; + struct tty_struct *tty; + int len, index = cinfo->bus_index; + u8 ivr, save_xir, channel, save_car, data, char_count; + +#ifdef CY_DEBUG_INTERRUPTS + printk(KERN_DEBUG "cyy_interrupt: rcvd intr, chip %d\n", chip); +#endif + /* determine the channel & change to that context */ + save_xir = readb(base_addr + (CyRIR << index)); + channel = save_xir & CyIRChannel; + info = &cinfo->ports[channel + chip * 4]; + save_car = cyy_readb(info, CyCAR); + cyy_writeb(info, CyCAR, save_xir); + ivr = cyy_readb(info, CyRIVR) & CyIVRMask; + + tty = tty_port_tty_get(&info->port); + /* if there is nowhere to put the data, discard it */ + if (tty == NULL) { + if (ivr == CyIVRRxEx) { /* exception */ + data = cyy_readb(info, CyRDSR); + } else { /* normal character reception */ + char_count = cyy_readb(info, CyRDCR); + while (char_count--) + data = cyy_readb(info, CyRDSR); + } + goto end; + } + /* there is an open port for this data */ + if (ivr == CyIVRRxEx) { /* exception */ + data = cyy_readb(info, CyRDSR); + + /* For statistics only */ + if (data & CyBREAK) + info->icount.brk++; + else if (data & CyFRAME) + info->icount.frame++; + else if (data & CyPARITY) + info->icount.parity++; + else if (data & CyOVERRUN) + info->icount.overrun++; + + if (data & info->ignore_status_mask) { + info->icount.rx++; + tty_kref_put(tty); + return; + } + if (tty_buffer_request_room(tty, 1)) { + if (data & info->read_status_mask) { + if (data & CyBREAK) { + tty_insert_flip_char(tty, + cyy_readb(info, CyRDSR), + TTY_BREAK); + info->icount.rx++; + if (info->port.flags & ASYNC_SAK) + do_SAK(tty); + } else if (data & CyFRAME) { + tty_insert_flip_char(tty, + cyy_readb(info, CyRDSR), + TTY_FRAME); + info->icount.rx++; + info->idle_stats.frame_errs++; + } else if (data & CyPARITY) { + /* Pieces of seven... */ + tty_insert_flip_char(tty, + cyy_readb(info, CyRDSR), + TTY_PARITY); + info->icount.rx++; + info->idle_stats.parity_errs++; + } else if (data & CyOVERRUN) { + tty_insert_flip_char(tty, 0, + TTY_OVERRUN); + info->icount.rx++; + /* If the flip buffer itself is + overflowing, we still lose + the next incoming character. + */ + tty_insert_flip_char(tty, + cyy_readb(info, CyRDSR), + TTY_FRAME); + info->icount.rx++; + info->idle_stats.overruns++; + /* These two conditions may imply */ + /* a normal read should be done. */ + /* } else if(data & CyTIMEOUT) { */ + /* } else if(data & CySPECHAR) { */ + } else { + tty_insert_flip_char(tty, 0, + TTY_NORMAL); + info->icount.rx++; + } + } else { + tty_insert_flip_char(tty, 0, TTY_NORMAL); + info->icount.rx++; + } + } else { + /* there was a software buffer overrun and nothing + * could be done about it!!! */ + info->icount.buf_overrun++; + info->idle_stats.overruns++; + } + } else { /* normal character reception */ + /* load # chars available from the chip */ + char_count = cyy_readb(info, CyRDCR); + +#ifdef CY_ENABLE_MONITORING + ++info->mon.int_count; + info->mon.char_count += char_count; + if (char_count > info->mon.char_max) + info->mon.char_max = char_count; + info->mon.char_last = char_count; +#endif + len = tty_buffer_request_room(tty, char_count); + while (len--) { + data = cyy_readb(info, CyRDSR); + tty_insert_flip_char(tty, data, TTY_NORMAL); + info->idle_stats.recv_bytes++; + info->icount.rx++; +#ifdef CY_16Y_HACK + udelay(10L); +#endif + } + info->idle_stats.recv_idle = jiffies; + } + tty_schedule_flip(tty); + tty_kref_put(tty); +end: + /* end of service */ + cyy_writeb(info, CyRIR, save_xir & 0x3f); + cyy_writeb(info, CyCAR, save_car); +} + +static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip, + void __iomem *base_addr) +{ + struct cyclades_port *info; + struct tty_struct *tty; + int char_count, index = cinfo->bus_index; + u8 save_xir, channel, save_car, outch; + + /* Since we only get here when the transmit buffer + is empty, we know we can always stuff a dozen + characters. */ +#ifdef CY_DEBUG_INTERRUPTS + printk(KERN_DEBUG "cyy_interrupt: xmit intr, chip %d\n", chip); +#endif + + /* determine the channel & change to that context */ + save_xir = readb(base_addr + (CyTIR << index)); + channel = save_xir & CyIRChannel; + save_car = readb(base_addr + (CyCAR << index)); + cy_writeb(base_addr + (CyCAR << index), save_xir); + + info = &cinfo->ports[channel + chip * 4]; + tty = tty_port_tty_get(&info->port); + if (tty == NULL) { + cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy); + goto end; + } + + /* load the on-chip space for outbound data */ + char_count = info->xmit_fifo_size; + + if (info->x_char) { /* send special char */ + outch = info->x_char; + cyy_writeb(info, CyTDR, outch); + char_count--; + info->icount.tx++; + info->x_char = 0; + } + + if (info->breakon || info->breakoff) { + if (info->breakon) { + cyy_writeb(info, CyTDR, 0); + cyy_writeb(info, CyTDR, 0x81); + info->breakon = 0; + char_count -= 2; + } + if (info->breakoff) { + cyy_writeb(info, CyTDR, 0); + cyy_writeb(info, CyTDR, 0x83); + info->breakoff = 0; + char_count -= 2; + } + } + + while (char_count-- > 0) { + if (!info->xmit_cnt) { + if (cyy_readb(info, CySRER) & CyTxMpty) { + cyy_writeb(info, CySRER, + cyy_readb(info, CySRER) & ~CyTxMpty); + } else { + cyy_writeb(info, CySRER, CyTxMpty | + (cyy_readb(info, CySRER) & ~CyTxRdy)); + } + goto done; + } + if (info->port.xmit_buf == NULL) { + cyy_writeb(info, CySRER, + cyy_readb(info, CySRER) & ~CyTxRdy); + goto done; + } + if (tty->stopped || tty->hw_stopped) { + cyy_writeb(info, CySRER, + cyy_readb(info, CySRER) & ~CyTxRdy); + goto done; + } + /* Because the Embedded Transmit Commands have been enabled, + * we must check to see if the escape character, NULL, is being + * sent. If it is, we must ensure that there is room for it to + * be doubled in the output stream. Therefore we no longer + * advance the pointer when the character is fetched, but + * rather wait until after the check for a NULL output + * character. This is necessary because there may not be room + * for the two chars needed to send a NULL.) + */ + outch = info->port.xmit_buf[info->xmit_tail]; + if (outch) { + info->xmit_cnt--; + info->xmit_tail = (info->xmit_tail + 1) & + (SERIAL_XMIT_SIZE - 1); + cyy_writeb(info, CyTDR, outch); + info->icount.tx++; + } else { + if (char_count > 1) { + info->xmit_cnt--; + info->xmit_tail = (info->xmit_tail + 1) & + (SERIAL_XMIT_SIZE - 1); + cyy_writeb(info, CyTDR, outch); + cyy_writeb(info, CyTDR, 0); + info->icount.tx++; + char_count--; + } + } + } + +done: + tty_wakeup(tty); + tty_kref_put(tty); +end: + /* end of service */ + cyy_writeb(info, CyTIR, save_xir & 0x3f); + cyy_writeb(info, CyCAR, save_car); +} + +static void cyy_chip_modem(struct cyclades_card *cinfo, int chip, + void __iomem *base_addr) +{ + struct cyclades_port *info; + struct tty_struct *tty; + int index = cinfo->bus_index; + u8 save_xir, channel, save_car, mdm_change, mdm_status; + + /* determine the channel & change to that context */ + save_xir = readb(base_addr + (CyMIR << index)); + channel = save_xir & CyIRChannel; + info = &cinfo->ports[channel + chip * 4]; + save_car = cyy_readb(info, CyCAR); + cyy_writeb(info, CyCAR, save_xir); + + mdm_change = cyy_readb(info, CyMISR); + mdm_status = cyy_readb(info, CyMSVR1); + + tty = tty_port_tty_get(&info->port); + if (!tty) + goto end; + + if (mdm_change & CyANY_DELTA) { + /* For statistics only */ + if (mdm_change & CyDCD) + info->icount.dcd++; + if (mdm_change & CyCTS) + info->icount.cts++; + if (mdm_change & CyDSR) + info->icount.dsr++; + if (mdm_change & CyRI) + info->icount.rng++; + + wake_up_interruptible(&info->port.delta_msr_wait); + } + + if ((mdm_change & CyDCD) && (info->port.flags & ASYNC_CHECK_CD)) { + if (mdm_status & CyDCD) + wake_up_interruptible(&info->port.open_wait); + else + tty_hangup(tty); + } + if ((mdm_change & CyCTS) && (info->port.flags & ASYNC_CTS_FLOW)) { + if (tty->hw_stopped) { + if (mdm_status & CyCTS) { + /* cy_start isn't used + because... !!! */ + tty->hw_stopped = 0; + cyy_writeb(info, CySRER, + cyy_readb(info, CySRER) | CyTxRdy); + tty_wakeup(tty); + } + } else { + if (!(mdm_status & CyCTS)) { + /* cy_stop isn't used + because ... !!! */ + tty->hw_stopped = 1; + cyy_writeb(info, CySRER, + cyy_readb(info, CySRER) & ~CyTxRdy); + } + } + } +/* if (mdm_change & CyDSR) { + } + if (mdm_change & CyRI) { + }*/ + tty_kref_put(tty); +end: + /* end of service */ + cyy_writeb(info, CyMIR, save_xir & 0x3f); + cyy_writeb(info, CyCAR, save_car); +} + +/* The real interrupt service routine is called + whenever the card wants its hand held--chars + received, out buffer empty, modem change, etc. + */ +static irqreturn_t cyy_interrupt(int irq, void *dev_id) +{ + int status; + struct cyclades_card *cinfo = dev_id; + void __iomem *base_addr, *card_base_addr; + unsigned int chip, too_many, had_work; + int index; + + if (unlikely(cinfo == NULL)) { +#ifdef CY_DEBUG_INTERRUPTS + printk(KERN_DEBUG "cyy_interrupt: spurious interrupt %d\n", + irq); +#endif + return IRQ_NONE; /* spurious interrupt */ + } + + card_base_addr = cinfo->base_addr; + index = cinfo->bus_index; + + /* card was not initialized yet (e.g. DEBUG_SHIRQ) */ + if (unlikely(card_base_addr == NULL)) + return IRQ_HANDLED; + + /* This loop checks all chips in the card. Make a note whenever + _any_ chip had some work to do, as this is considered an + indication that there will be more to do. Only when no chip + has any work does this outermost loop exit. + */ + do { + had_work = 0; + for (chip = 0; chip < cinfo->num_chips; chip++) { + base_addr = cinfo->base_addr + + (cy_chip_offset[chip] << index); + too_many = 0; + while ((status = readb(base_addr + + (CySVRR << index))) != 0x00) { + had_work++; + /* The purpose of the following test is to ensure that + no chip can monopolize the driver. This forces the + chips to be checked in a round-robin fashion (after + draining each of a bunch (1000) of characters). + */ + if (1000 < too_many++) + break; + spin_lock(&cinfo->card_lock); + if (status & CySRReceive) /* rx intr */ + cyy_chip_rx(cinfo, chip, base_addr); + if (status & CySRTransmit) /* tx intr */ + cyy_chip_tx(cinfo, chip, base_addr); + if (status & CySRModem) /* modem intr */ + cyy_chip_modem(cinfo, chip, base_addr); + spin_unlock(&cinfo->card_lock); + } + } + } while (had_work); + + /* clear interrupts */ + spin_lock(&cinfo->card_lock); + cy_writeb(card_base_addr + (Cy_ClrIntr << index), 0); + /* Cy_ClrIntr is 0x1800 */ + spin_unlock(&cinfo->card_lock); + return IRQ_HANDLED; +} /* cyy_interrupt */ + +static void cyy_change_rts_dtr(struct cyclades_port *info, unsigned int set, + unsigned int clear) +{ + struct cyclades_card *card = info->card; + int channel = info->line - card->first_line; + u32 rts, dtr, msvrr, msvrd; + + channel &= 0x03; + + if (info->rtsdtr_inv) { + msvrr = CyMSVR2; + msvrd = CyMSVR1; + rts = CyDTR; + dtr = CyRTS; + } else { + msvrr = CyMSVR1; + msvrd = CyMSVR2; + rts = CyRTS; + dtr = CyDTR; + } + if (set & TIOCM_RTS) { + cyy_writeb(info, CyCAR, channel); + cyy_writeb(info, msvrr, rts); + } + if (clear & TIOCM_RTS) { + cyy_writeb(info, CyCAR, channel); + cyy_writeb(info, msvrr, ~rts); + } + if (set & TIOCM_DTR) { + cyy_writeb(info, CyCAR, channel); + cyy_writeb(info, msvrd, dtr); +#ifdef CY_DEBUG_DTR + printk(KERN_DEBUG "cyc:set_modem_info raising DTR\n"); + printk(KERN_DEBUG " status: 0x%x, 0x%x\n", + cyy_readb(info, CyMSVR1), + cyy_readb(info, CyMSVR2)); +#endif + } + if (clear & TIOCM_DTR) { + cyy_writeb(info, CyCAR, channel); + cyy_writeb(info, msvrd, ~dtr); +#ifdef CY_DEBUG_DTR + printk(KERN_DEBUG "cyc:set_modem_info dropping DTR\n"); + printk(KERN_DEBUG " status: 0x%x, 0x%x\n", + cyy_readb(info, CyMSVR1), + cyy_readb(info, CyMSVR2)); +#endif + } +} + +/***********************************************************/ +/********* End of block of Cyclom-Y specific code **********/ +/******** Start of block of Cyclades-Z specific code *******/ +/***********************************************************/ + +static int +cyz_fetch_msg(struct cyclades_card *cinfo, + __u32 *channel, __u8 *cmd, __u32 *param) +{ + struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl; + unsigned long loc_doorbell; + + loc_doorbell = readl(&cinfo->ctl_addr.p9060->loc_doorbell); + if (loc_doorbell) { + *cmd = (char)(0xff & loc_doorbell); + *channel = readl(&board_ctrl->fwcmd_channel); + *param = (__u32) readl(&board_ctrl->fwcmd_param); + cy_writel(&cinfo->ctl_addr.p9060->loc_doorbell, 0xffffffff); + return 1; + } + return 0; +} /* cyz_fetch_msg */ + +static int +cyz_issue_cmd(struct cyclades_card *cinfo, + __u32 channel, __u8 cmd, __u32 param) +{ + struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl; + __u32 __iomem *pci_doorbell; + unsigned int index; + + if (!cyz_is_loaded(cinfo)) + return -1; + + index = 0; + pci_doorbell = &cinfo->ctl_addr.p9060->pci_doorbell; + while ((readl(pci_doorbell) & 0xff) != 0) { + if (index++ == 1000) + return (int)(readl(pci_doorbell) & 0xff); + udelay(50L); + } + cy_writel(&board_ctrl->hcmd_channel, channel); + cy_writel(&board_ctrl->hcmd_param, param); + cy_writel(pci_doorbell, (long)cmd); + + return 0; +} /* cyz_issue_cmd */ + +static void cyz_handle_rx(struct cyclades_port *info, struct tty_struct *tty) +{ + struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl; + struct cyclades_card *cinfo = info->card; + unsigned int char_count; + int len; +#ifdef BLOCKMOVE + unsigned char *buf; +#else + char data; +#endif + __u32 rx_put, rx_get, new_rx_get, rx_bufsize, rx_bufaddr; + + rx_get = new_rx_get = readl(&buf_ctrl->rx_get); + rx_put = readl(&buf_ctrl->rx_put); + rx_bufsize = readl(&buf_ctrl->rx_bufsize); + rx_bufaddr = readl(&buf_ctrl->rx_bufaddr); + if (rx_put >= rx_get) + char_count = rx_put - rx_get; + else + char_count = rx_put - rx_get + rx_bufsize; + + if (char_count) { +#ifdef CY_ENABLE_MONITORING + info->mon.int_count++; + info->mon.char_count += char_count; + if (char_count > info->mon.char_max) + info->mon.char_max = char_count; + info->mon.char_last = char_count; +#endif + if (tty == NULL) { + /* flush received characters */ + new_rx_get = (new_rx_get + char_count) & + (rx_bufsize - 1); + info->rflush_count++; + } else { +#ifdef BLOCKMOVE + /* we'd like to use memcpy(t, f, n) and memset(s, c, count) + for performance, but because of buffer boundaries, there + may be several steps to the operation */ + while (1) { + len = tty_prepare_flip_string(tty, &buf, + char_count); + if (!len) + break; + + len = min_t(unsigned int, min(len, char_count), + rx_bufsize - new_rx_get); + + memcpy_fromio(buf, cinfo->base_addr + + rx_bufaddr + new_rx_get, len); + + new_rx_get = (new_rx_get + len) & + (rx_bufsize - 1); + char_count -= len; + info->icount.rx += len; + info->idle_stats.recv_bytes += len; + } +#else + len = tty_buffer_request_room(tty, char_count); + while (len--) { + data = readb(cinfo->base_addr + rx_bufaddr + + new_rx_get); + new_rx_get = (new_rx_get + 1) & + (rx_bufsize - 1); + tty_insert_flip_char(tty, data, TTY_NORMAL); + info->idle_stats.recv_bytes++; + info->icount.rx++; + } +#endif +#ifdef CONFIG_CYZ_INTR + /* Recalculate the number of chars in the RX buffer and issue + a cmd in case it's higher than the RX high water mark */ + rx_put = readl(&buf_ctrl->rx_put); + if (rx_put >= rx_get) + char_count = rx_put - rx_get; + else + char_count = rx_put - rx_get + rx_bufsize; + if (char_count >= readl(&buf_ctrl->rx_threshold) && + !timer_pending(&cyz_rx_full_timer[ + info->line])) + mod_timer(&cyz_rx_full_timer[info->line], + jiffies + 1); +#endif + info->idle_stats.recv_idle = jiffies; + tty_schedule_flip(tty); + } + /* Update rx_get */ + cy_writel(&buf_ctrl->rx_get, new_rx_get); + } +} + +static void cyz_handle_tx(struct cyclades_port *info, struct tty_struct *tty) +{ + struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl; + struct cyclades_card *cinfo = info->card; + u8 data; + unsigned int char_count; +#ifdef BLOCKMOVE + int small_count; +#endif + __u32 tx_put, tx_get, tx_bufsize, tx_bufaddr; + + if (info->xmit_cnt <= 0) /* Nothing to transmit */ + return; + + tx_get = readl(&buf_ctrl->tx_get); + tx_put = readl(&buf_ctrl->tx_put); + tx_bufsize = readl(&buf_ctrl->tx_bufsize); + tx_bufaddr = readl(&buf_ctrl->tx_bufaddr); + if (tx_put >= tx_get) + char_count = tx_get - tx_put - 1 + tx_bufsize; + else + char_count = tx_get - tx_put - 1; + + if (char_count) { + + if (tty == NULL) + goto ztxdone; + + if (info->x_char) { /* send special char */ + data = info->x_char; + + cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data); + tx_put = (tx_put + 1) & (tx_bufsize - 1); + info->x_char = 0; + char_count--; + info->icount.tx++; + } +#ifdef BLOCKMOVE + while (0 < (small_count = min_t(unsigned int, + tx_bufsize - tx_put, min_t(unsigned int, + (SERIAL_XMIT_SIZE - info->xmit_tail), + min_t(unsigned int, info->xmit_cnt, + char_count))))) { + + memcpy_toio((char *)(cinfo->base_addr + tx_bufaddr + + tx_put), + &info->port.xmit_buf[info->xmit_tail], + small_count); + + tx_put = (tx_put + small_count) & (tx_bufsize - 1); + char_count -= small_count; + info->icount.tx += small_count; + info->xmit_cnt -= small_count; + info->xmit_tail = (info->xmit_tail + small_count) & + (SERIAL_XMIT_SIZE - 1); + } +#else + while (info->xmit_cnt && char_count) { + data = info->port.xmit_buf[info->xmit_tail]; + info->xmit_cnt--; + info->xmit_tail = (info->xmit_tail + 1) & + (SERIAL_XMIT_SIZE - 1); + + cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data); + tx_put = (tx_put + 1) & (tx_bufsize - 1); + char_count--; + info->icount.tx++; + } +#endif + tty_wakeup(tty); +ztxdone: + /* Update tx_put */ + cy_writel(&buf_ctrl->tx_put, tx_put); + } +} + +static void cyz_handle_cmd(struct cyclades_card *cinfo) +{ + struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl; + struct tty_struct *tty; + struct cyclades_port *info; + __u32 channel, param, fw_ver; + __u8 cmd; + int special_count; + int delta_count; + + fw_ver = readl(&board_ctrl->fw_version); + + while (cyz_fetch_msg(cinfo, &channel, &cmd, ¶m) == 1) { + special_count = 0; + delta_count = 0; + info = &cinfo->ports[channel]; + tty = tty_port_tty_get(&info->port); + if (tty == NULL) + continue; + + switch (cmd) { + case C_CM_PR_ERROR: + tty_insert_flip_char(tty, 0, TTY_PARITY); + info->icount.rx++; + special_count++; + break; + case C_CM_FR_ERROR: + tty_insert_flip_char(tty, 0, TTY_FRAME); + info->icount.rx++; + special_count++; + break; + case C_CM_RXBRK: + tty_insert_flip_char(tty, 0, TTY_BREAK); + info->icount.rx++; + special_count++; + break; + case C_CM_MDCD: + info->icount.dcd++; + delta_count++; + if (info->port.flags & ASYNC_CHECK_CD) { + u32 dcd = fw_ver > 241 ? param : + readl(&info->u.cyz.ch_ctrl->rs_status); + if (dcd & C_RS_DCD) + wake_up_interruptible(&info->port.open_wait); + else + tty_hangup(tty); + } + break; + case C_CM_MCTS: + info->icount.cts++; + delta_count++; + break; + case C_CM_MRI: + info->icount.rng++; + delta_count++; + break; + case C_CM_MDSR: + info->icount.dsr++; + delta_count++; + break; +#ifdef Z_WAKE + case C_CM_IOCTLW: + complete(&info->shutdown_wait); + break; +#endif +#ifdef CONFIG_CYZ_INTR + case C_CM_RXHIWM: + case C_CM_RXNNDT: + case C_CM_INTBACK2: + /* Reception Interrupt */ +#ifdef CY_DEBUG_INTERRUPTS + printk(KERN_DEBUG "cyz_interrupt: rcvd intr, card %d, " + "port %ld\n", info->card, channel); +#endif + cyz_handle_rx(info, tty); + break; + case C_CM_TXBEMPTY: + case C_CM_TXLOWWM: + case C_CM_INTBACK: + /* Transmission Interrupt */ +#ifdef CY_DEBUG_INTERRUPTS + printk(KERN_DEBUG "cyz_interrupt: xmit intr, card %d, " + "port %ld\n", info->card, channel); +#endif + cyz_handle_tx(info, tty); + break; +#endif /* CONFIG_CYZ_INTR */ + case C_CM_FATAL: + /* should do something with this !!! */ + break; + default: + break; + } + if (delta_count) + wake_up_interruptible(&info->port.delta_msr_wait); + if (special_count) + tty_schedule_flip(tty); + tty_kref_put(tty); + } +} + +#ifdef CONFIG_CYZ_INTR +static irqreturn_t cyz_interrupt(int irq, void *dev_id) +{ + struct cyclades_card *cinfo = dev_id; + + if (unlikely(!cyz_is_loaded(cinfo))) { +#ifdef CY_DEBUG_INTERRUPTS + printk(KERN_DEBUG "cyz_interrupt: board not yet loaded " + "(IRQ%d).\n", irq); +#endif + return IRQ_NONE; + } + + /* Handle the interrupts */ + cyz_handle_cmd(cinfo); + + return IRQ_HANDLED; +} /* cyz_interrupt */ + +static void cyz_rx_restart(unsigned long arg) +{ + struct cyclades_port *info = (struct cyclades_port *)arg; + struct cyclades_card *card = info->card; + int retval; + __u32 channel = info->line - card->first_line; + unsigned long flags; + + spin_lock_irqsave(&card->card_lock, flags); + retval = cyz_issue_cmd(card, channel, C_CM_INTBACK2, 0L); + if (retval != 0) { + printk(KERN_ERR "cyc:cyz_rx_restart retval on ttyC%d was %x\n", + info->line, retval); + } + spin_unlock_irqrestore(&card->card_lock, flags); +} + +#else /* CONFIG_CYZ_INTR */ + +static void cyz_poll(unsigned long arg) +{ + struct cyclades_card *cinfo; + struct cyclades_port *info; + unsigned long expires = jiffies + HZ; + unsigned int port, card; + + for (card = 0; card < NR_CARDS; card++) { + cinfo = &cy_card[card]; + + if (!cy_is_Z(cinfo)) + continue; + if (!cyz_is_loaded(cinfo)) + continue; + + /* Skip first polling cycle to avoid racing conditions with the FW */ + if (!cinfo->intr_enabled) { + cinfo->intr_enabled = 1; + continue; + } + + cyz_handle_cmd(cinfo); + + for (port = 0; port < cinfo->nports; port++) { + struct tty_struct *tty; + + info = &cinfo->ports[port]; + tty = tty_port_tty_get(&info->port); + /* OK to pass NULL to the handle functions below. + They need to drop the data in that case. */ + + if (!info->throttle) + cyz_handle_rx(info, tty); + cyz_handle_tx(info, tty); + tty_kref_put(tty); + } + /* poll every 'cyz_polling_cycle' period */ + expires = jiffies + cyz_polling_cycle; + } + mod_timer(&cyz_timerlist, expires); +} /* cyz_poll */ + +#endif /* CONFIG_CYZ_INTR */ + +/********** End of block of Cyclades-Z specific code *********/ +/***********************************************************/ + +/* This is called whenever a port becomes active; + interrupts are enabled and DTR & RTS are turned on. + */ +static int cy_startup(struct cyclades_port *info, struct tty_struct *tty) +{ + struct cyclades_card *card; + unsigned long flags; + int retval = 0; + int channel; + unsigned long page; + + card = info->card; + channel = info->line - card->first_line; + + page = get_zeroed_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + spin_lock_irqsave(&card->card_lock, flags); + + if (info->port.flags & ASYNC_INITIALIZED) + goto errout; + + if (!info->type) { + set_bit(TTY_IO_ERROR, &tty->flags); + goto errout; + } + + if (info->port.xmit_buf) + free_page(page); + else + info->port.xmit_buf = (unsigned char *)page; + + spin_unlock_irqrestore(&card->card_lock, flags); + + cy_set_line_char(info, tty); + + if (!cy_is_Z(card)) { + channel &= 0x03; + + spin_lock_irqsave(&card->card_lock, flags); + + cyy_writeb(info, CyCAR, channel); + + cyy_writeb(info, CyRTPR, + (info->default_timeout ? info->default_timeout : 0x02)); + /* 10ms rx timeout */ + + cyy_issue_cmd(info, CyCHAN_CTL | CyENB_RCVR | CyENB_XMTR); + + cyy_change_rts_dtr(info, TIOCM_RTS | TIOCM_DTR, 0); + + cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyRxData); + } else { + struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; + + if (!cyz_is_loaded(card)) + return -ENODEV; + +#ifdef CY_DEBUG_OPEN + printk(KERN_DEBUG "cyc startup Z card %d, channel %d, " + "base_addr %p\n", card, channel, card->base_addr); +#endif + spin_lock_irqsave(&card->card_lock, flags); + + cy_writel(&ch_ctrl->op_mode, C_CH_ENABLE); +#ifdef Z_WAKE +#ifdef CONFIG_CYZ_INTR + cy_writel(&ch_ctrl->intr_enable, + C_IN_TXBEMPTY | C_IN_TXLOWWM | C_IN_RXHIWM | + C_IN_RXNNDT | C_IN_IOCTLW | C_IN_MDCD); +#else + cy_writel(&ch_ctrl->intr_enable, + C_IN_IOCTLW | C_IN_MDCD); +#endif /* CONFIG_CYZ_INTR */ +#else +#ifdef CONFIG_CYZ_INTR + cy_writel(&ch_ctrl->intr_enable, + C_IN_TXBEMPTY | C_IN_TXLOWWM | C_IN_RXHIWM | + C_IN_RXNNDT | C_IN_MDCD); +#else + cy_writel(&ch_ctrl->intr_enable, C_IN_MDCD); +#endif /* CONFIG_CYZ_INTR */ +#endif /* Z_WAKE */ + + retval = cyz_issue_cmd(card, channel, C_CM_IOCTL, 0L); + if (retval != 0) { + printk(KERN_ERR "cyc:startup(1) retval on ttyC%d was " + "%x\n", info->line, retval); + } + + /* Flush RX buffers before raising DTR and RTS */ + retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_RX, 0L); + if (retval != 0) { + printk(KERN_ERR "cyc:startup(2) retval on ttyC%d was " + "%x\n", info->line, retval); + } + + /* set timeout !!! */ + /* set RTS and DTR !!! */ + tty_port_raise_dtr_rts(&info->port); + + /* enable send, recv, modem !!! */ + } + + info->port.flags |= ASYNC_INITIALIZED; + + clear_bit(TTY_IO_ERROR, &tty->flags); + info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; + info->breakon = info->breakoff = 0; + memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats)); + info->idle_stats.in_use = + info->idle_stats.recv_idle = + info->idle_stats.xmit_idle = jiffies; + + spin_unlock_irqrestore(&card->card_lock, flags); + +#ifdef CY_DEBUG_OPEN + printk(KERN_DEBUG "cyc startup done\n"); +#endif + return 0; + +errout: + spin_unlock_irqrestore(&card->card_lock, flags); + free_page(page); + return retval; +} /* startup */ + +static void start_xmit(struct cyclades_port *info) +{ + struct cyclades_card *card = info->card; + unsigned long flags; + int channel = info->line - card->first_line; + + if (!cy_is_Z(card)) { + spin_lock_irqsave(&card->card_lock, flags); + cyy_writeb(info, CyCAR, channel & 0x03); + cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyTxRdy); + spin_unlock_irqrestore(&card->card_lock, flags); + } else { +#ifdef CONFIG_CYZ_INTR + int retval; + + spin_lock_irqsave(&card->card_lock, flags); + retval = cyz_issue_cmd(card, channel, C_CM_INTBACK, 0L); + if (retval != 0) { + printk(KERN_ERR "cyc:start_xmit retval on ttyC%d was " + "%x\n", info->line, retval); + } + spin_unlock_irqrestore(&card->card_lock, flags); +#else /* CONFIG_CYZ_INTR */ + /* Don't have to do anything at this time */ +#endif /* CONFIG_CYZ_INTR */ + } +} /* start_xmit */ + +/* + * This routine shuts down a serial port; interrupts are disabled, + * and DTR is dropped if the hangup on close termio flag is on. + */ +static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty) +{ + struct cyclades_card *card; + unsigned long flags; + int channel; + + if (!(info->port.flags & ASYNC_INITIALIZED)) + return; + + card = info->card; + channel = info->line - card->first_line; + if (!cy_is_Z(card)) { + spin_lock_irqsave(&card->card_lock, flags); + + /* Clear delta_msr_wait queue to avoid mem leaks. */ + wake_up_interruptible(&info->port.delta_msr_wait); + + if (info->port.xmit_buf) { + unsigned char *temp; + temp = info->port.xmit_buf; + info->port.xmit_buf = NULL; + free_page((unsigned long)temp); + } + if (tty->termios->c_cflag & HUPCL) + cyy_change_rts_dtr(info, 0, TIOCM_RTS | TIOCM_DTR); + + cyy_issue_cmd(info, CyCHAN_CTL | CyDIS_RCVR); + /* it may be appropriate to clear _XMIT at + some later date (after testing)!!! */ + + set_bit(TTY_IO_ERROR, &tty->flags); + info->port.flags &= ~ASYNC_INITIALIZED; + spin_unlock_irqrestore(&card->card_lock, flags); + } else { +#ifdef CY_DEBUG_OPEN + printk(KERN_DEBUG "cyc shutdown Z card %d, channel %d, " + "base_addr %p\n", card, channel, card->base_addr); +#endif + + if (!cyz_is_loaded(card)) + return; + + spin_lock_irqsave(&card->card_lock, flags); + + if (info->port.xmit_buf) { + unsigned char *temp; + temp = info->port.xmit_buf; + info->port.xmit_buf = NULL; + free_page((unsigned long)temp); + } + + if (tty->termios->c_cflag & HUPCL) + tty_port_lower_dtr_rts(&info->port); + + set_bit(TTY_IO_ERROR, &tty->flags); + info->port.flags &= ~ASYNC_INITIALIZED; + + spin_unlock_irqrestore(&card->card_lock, flags); + } + +#ifdef CY_DEBUG_OPEN + printk(KERN_DEBUG "cyc shutdown done\n"); +#endif +} /* shutdown */ + +/* + * ------------------------------------------------------------ + * cy_open() and friends + * ------------------------------------------------------------ + */ + +/* + * This routine is called whenever a serial port is opened. It + * performs the serial-specific initialization for the tty structure. + */ +static int cy_open(struct tty_struct *tty, struct file *filp) +{ + struct cyclades_port *info; + unsigned int i, line; + int retval; + + line = tty->index; + if (tty->index < 0 || NR_PORTS <= line) + return -ENODEV; + + for (i = 0; i < NR_CARDS; i++) + if (line < cy_card[i].first_line + cy_card[i].nports && + line >= cy_card[i].first_line) + break; + if (i >= NR_CARDS) + return -ENODEV; + info = &cy_card[i].ports[line - cy_card[i].first_line]; + if (info->line < 0) + return -ENODEV; + + /* If the card's firmware hasn't been loaded, + treat it as absent from the system. This + will make the user pay attention. + */ + if (cy_is_Z(info->card)) { + struct cyclades_card *cinfo = info->card; + struct FIRM_ID __iomem *firm_id = cinfo->base_addr + ID_ADDRESS; + + if (!cyz_is_loaded(cinfo)) { + if (cinfo->hw_ver == ZE_V1 && cyz_fpga_loaded(cinfo) && + readl(&firm_id->signature) == + ZFIRM_HLT) { + printk(KERN_ERR "cyc:Cyclades-Z Error: you " + "need an external power supply for " + "this number of ports.\nFirmware " + "halted.\n"); + } else { + printk(KERN_ERR "cyc:Cyclades-Z firmware not " + "yet loaded\n"); + } + return -ENODEV; + } +#ifdef CONFIG_CYZ_INTR + else { + /* In case this Z board is operating in interrupt mode, its + interrupts should be enabled as soon as the first open + happens to one of its ports. */ + if (!cinfo->intr_enabled) { + u16 intr; + + /* Enable interrupts on the PLX chip */ + intr = readw(&cinfo->ctl_addr.p9060-> + intr_ctrl_stat) | 0x0900; + cy_writew(&cinfo->ctl_addr.p9060-> + intr_ctrl_stat, intr); + /* Enable interrupts on the FW */ + retval = cyz_issue_cmd(cinfo, 0, + C_CM_IRQ_ENBL, 0L); + if (retval != 0) { + printk(KERN_ERR "cyc:IRQ enable retval " + "was %x\n", retval); + } + cinfo->intr_enabled = 1; + } + } +#endif /* CONFIG_CYZ_INTR */ + /* Make sure this Z port really exists in hardware */ + if (info->line > (cinfo->first_line + cinfo->nports - 1)) + return -ENODEV; + } +#ifdef CY_DEBUG_OTHER + printk(KERN_DEBUG "cyc:cy_open ttyC%d\n", info->line); +#endif + tty->driver_data = info; + if (serial_paranoia_check(info, tty->name, "cy_open")) + return -ENODEV; + +#ifdef CY_DEBUG_OPEN + printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line, + info->port.count); +#endif + info->port.count++; +#ifdef CY_DEBUG_COUNT + printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n", + current->pid, info->port.count); +#endif + + /* + * If the port is the middle of closing, bail out now + */ + if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) { + wait_event_interruptible_tty(info->port.close_wait, + !(info->port.flags & ASYNC_CLOSING)); + return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS; + } + + /* + * Start up serial port + */ + retval = cy_startup(info, tty); + if (retval) + return retval; + + retval = tty_port_block_til_ready(&info->port, tty, filp); + if (retval) { +#ifdef CY_DEBUG_OPEN + printk(KERN_DEBUG "cyc:cy_open returning after block_til_ready " + "with %d\n", retval); +#endif + return retval; + } + + info->throttle = 0; + tty_port_tty_set(&info->port, tty); + +#ifdef CY_DEBUG_OPEN + printk(KERN_DEBUG "cyc:cy_open done\n"); +#endif + return 0; +} /* cy_open */ + +/* + * cy_wait_until_sent() --- wait until the transmitter is empty + */ +static void cy_wait_until_sent(struct tty_struct *tty, int timeout) +{ + struct cyclades_card *card; + struct cyclades_port *info = tty->driver_data; + unsigned long orig_jiffies; + int char_time; + + if (serial_paranoia_check(info, tty->name, "cy_wait_until_sent")) + return; + + if (info->xmit_fifo_size == 0) + return; /* Just in case.... */ + + orig_jiffies = jiffies; + /* + * Set the check interval to be 1/5 of the estimated time to + * send a single character, and make it at least 1. The check + * interval should also be less than the timeout. + * + * Note: we have to use pretty tight timings here to satisfy + * the NIST-PCTS. + */ + char_time = (info->timeout - HZ / 50) / info->xmit_fifo_size; + char_time = char_time / 5; + if (char_time <= 0) + char_time = 1; + if (timeout < 0) + timeout = 0; + if (timeout) + char_time = min(char_time, timeout); + /* + * If the transmitter hasn't cleared in twice the approximate + * amount of time to send the entire FIFO, it probably won't + * ever clear. This assumes the UART isn't doing flow + * control, which is currently the case. Hence, if it ever + * takes longer than info->timeout, this is probably due to a + * UART bug of some kind. So, we clamp the timeout parameter at + * 2*info->timeout. + */ + if (!timeout || timeout > 2 * info->timeout) + timeout = 2 * info->timeout; +#ifdef CY_DEBUG_WAIT_UNTIL_SENT + printk(KERN_DEBUG "In cy_wait_until_sent(%d) check=%d, jiff=%lu...", + timeout, char_time, jiffies); +#endif + card = info->card; + if (!cy_is_Z(card)) { + while (cyy_readb(info, CySRER) & CyTxRdy) { +#ifdef CY_DEBUG_WAIT_UNTIL_SENT + printk(KERN_DEBUG "Not clean (jiff=%lu)...", jiffies); +#endif + if (msleep_interruptible(jiffies_to_msecs(char_time))) + break; + if (timeout && time_after(jiffies, orig_jiffies + + timeout)) + break; + } + } + /* Run one more char cycle */ + msleep_interruptible(jiffies_to_msecs(char_time * 5)); +#ifdef CY_DEBUG_WAIT_UNTIL_SENT + printk(KERN_DEBUG "Clean (jiff=%lu)...done\n", jiffies); +#endif +} + +static void cy_flush_buffer(struct tty_struct *tty) +{ + struct cyclades_port *info = tty->driver_data; + struct cyclades_card *card; + int channel, retval; + unsigned long flags; + +#ifdef CY_DEBUG_IO + printk(KERN_DEBUG "cyc:cy_flush_buffer ttyC%d\n", info->line); +#endif + + if (serial_paranoia_check(info, tty->name, "cy_flush_buffer")) + return; + + card = info->card; + channel = info->line - card->first_line; + + spin_lock_irqsave(&card->card_lock, flags); + info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; + spin_unlock_irqrestore(&card->card_lock, flags); + + if (cy_is_Z(card)) { /* If it is a Z card, flush the on-board + buffers as well */ + spin_lock_irqsave(&card->card_lock, flags); + retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L); + if (retval != 0) { + printk(KERN_ERR "cyc: flush_buffer retval on ttyC%d " + "was %x\n", info->line, retval); + } + spin_unlock_irqrestore(&card->card_lock, flags); + } + tty_wakeup(tty); +} /* cy_flush_buffer */ + + +static void cy_do_close(struct tty_port *port) +{ + struct cyclades_port *info = container_of(port, struct cyclades_port, + port); + struct cyclades_card *card; + unsigned long flags; + int channel; + + card = info->card; + channel = info->line - card->first_line; + spin_lock_irqsave(&card->card_lock, flags); + + if (!cy_is_Z(card)) { + /* Stop accepting input */ + cyy_writeb(info, CyCAR, channel & 0x03); + cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyRxData); + if (info->port.flags & ASYNC_INITIALIZED) { + /* Waiting for on-board buffers to be empty before + closing the port */ + spin_unlock_irqrestore(&card->card_lock, flags); + cy_wait_until_sent(port->tty, info->timeout); + spin_lock_irqsave(&card->card_lock, flags); + } + } else { +#ifdef Z_WAKE + /* Waiting for on-board buffers to be empty before closing + the port */ + struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; + int retval; + + if (readl(&ch_ctrl->flow_status) != C_FS_TXIDLE) { + retval = cyz_issue_cmd(card, channel, C_CM_IOCTLW, 0L); + if (retval != 0) { + printk(KERN_DEBUG "cyc:cy_close retval on " + "ttyC%d was %x\n", info->line, retval); + } + spin_unlock_irqrestore(&card->card_lock, flags); + wait_for_completion_interruptible(&info->shutdown_wait); + spin_lock_irqsave(&card->card_lock, flags); + } +#endif + } + spin_unlock_irqrestore(&card->card_lock, flags); + cy_shutdown(info, port->tty); +} + +/* + * This routine is called when a particular tty device is closed. + */ +static void cy_close(struct tty_struct *tty, struct file *filp) +{ + struct cyclades_port *info = tty->driver_data; + if (!info || serial_paranoia_check(info, tty->name, "cy_close")) + return; + tty_port_close(&info->port, tty, filp); +} /* cy_close */ + +/* This routine gets called when tty_write has put something into + * the write_queue. The characters may come from user space or + * kernel space. + * + * This routine will return the number of characters actually + * accepted for writing. + * + * If the port is not already transmitting stuff, start it off by + * enabling interrupts. The interrupt service routine will then + * ensure that the characters are sent. + * If the port is already active, there is no need to kick it. + * + */ +static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count) +{ + struct cyclades_port *info = tty->driver_data; + unsigned long flags; + int c, ret = 0; + +#ifdef CY_DEBUG_IO + printk(KERN_DEBUG "cyc:cy_write ttyC%d\n", info->line); +#endif + + if (serial_paranoia_check(info, tty->name, "cy_write")) + return 0; + + if (!info->port.xmit_buf) + return 0; + + spin_lock_irqsave(&info->card->card_lock, flags); + while (1) { + c = min(count, (int)(SERIAL_XMIT_SIZE - info->xmit_cnt - 1)); + c = min(c, (int)(SERIAL_XMIT_SIZE - info->xmit_head)); + + if (c <= 0) + break; + + memcpy(info->port.xmit_buf + info->xmit_head, buf, c); + info->xmit_head = (info->xmit_head + c) & + (SERIAL_XMIT_SIZE - 1); + info->xmit_cnt += c; + buf += c; + count -= c; + ret += c; + } + spin_unlock_irqrestore(&info->card->card_lock, flags); + + info->idle_stats.xmit_bytes += ret; + info->idle_stats.xmit_idle = jiffies; + + if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) + start_xmit(info); + + return ret; +} /* cy_write */ + +/* + * This routine is called by the kernel to write a single + * character to the tty device. If the kernel uses this routine, + * it must call the flush_chars() routine (if defined) when it is + * done stuffing characters into the driver. If there is no room + * in the queue, the character is ignored. + */ +static int cy_put_char(struct tty_struct *tty, unsigned char ch) +{ + struct cyclades_port *info = tty->driver_data; + unsigned long flags; + +#ifdef CY_DEBUG_IO + printk(KERN_DEBUG "cyc:cy_put_char ttyC%d\n", info->line); +#endif + + if (serial_paranoia_check(info, tty->name, "cy_put_char")) + return 0; + + if (!info->port.xmit_buf) + return 0; + + spin_lock_irqsave(&info->card->card_lock, flags); + if (info->xmit_cnt >= (int)(SERIAL_XMIT_SIZE - 1)) { + spin_unlock_irqrestore(&info->card->card_lock, flags); + return 0; + } + + info->port.xmit_buf[info->xmit_head++] = ch; + info->xmit_head &= SERIAL_XMIT_SIZE - 1; + info->xmit_cnt++; + info->idle_stats.xmit_bytes++; + info->idle_stats.xmit_idle = jiffies; + spin_unlock_irqrestore(&info->card->card_lock, flags); + return 1; +} /* cy_put_char */ + +/* + * This routine is called by the kernel after it has written a + * series of characters to the tty device using put_char(). + */ +static void cy_flush_chars(struct tty_struct *tty) +{ + struct cyclades_port *info = tty->driver_data; + +#ifdef CY_DEBUG_IO + printk(KERN_DEBUG "cyc:cy_flush_chars ttyC%d\n", info->line); +#endif + + if (serial_paranoia_check(info, tty->name, "cy_flush_chars")) + return; + + if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || + !info->port.xmit_buf) + return; + + start_xmit(info); +} /* cy_flush_chars */ + +/* + * This routine returns the numbers of characters the tty driver + * will accept for queuing to be written. This number is subject + * to change as output buffers get emptied, or if the output flow + * control is activated. + */ +static int cy_write_room(struct tty_struct *tty) +{ + struct cyclades_port *info = tty->driver_data; + int ret; + +#ifdef CY_DEBUG_IO + printk(KERN_DEBUG "cyc:cy_write_room ttyC%d\n", info->line); +#endif + + if (serial_paranoia_check(info, tty->name, "cy_write_room")) + return 0; + ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1; + if (ret < 0) + ret = 0; + return ret; +} /* cy_write_room */ + +static int cy_chars_in_buffer(struct tty_struct *tty) +{ + struct cyclades_port *info = tty->driver_data; + + if (serial_paranoia_check(info, tty->name, "cy_chars_in_buffer")) + return 0; + +#ifdef Z_EXT_CHARS_IN_BUFFER + if (!cy_is_Z(info->card)) { +#endif /* Z_EXT_CHARS_IN_BUFFER */ +#ifdef CY_DEBUG_IO + printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n", + info->line, info->xmit_cnt); +#endif + return info->xmit_cnt; +#ifdef Z_EXT_CHARS_IN_BUFFER + } else { + struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl; + int char_count; + __u32 tx_put, tx_get, tx_bufsize; + + tx_get = readl(&buf_ctrl->tx_get); + tx_put = readl(&buf_ctrl->tx_put); + tx_bufsize = readl(&buf_ctrl->tx_bufsize); + if (tx_put >= tx_get) + char_count = tx_put - tx_get; + else + char_count = tx_put - tx_get + tx_bufsize; +#ifdef CY_DEBUG_IO + printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n", + info->line, info->xmit_cnt + char_count); +#endif + return info->xmit_cnt + char_count; + } +#endif /* Z_EXT_CHARS_IN_BUFFER */ +} /* cy_chars_in_buffer */ + +/* + * ------------------------------------------------------------ + * cy_ioctl() and friends + * ------------------------------------------------------------ + */ + +static void cyy_baud_calc(struct cyclades_port *info, __u32 baud) +{ + int co, co_val, bpr; + __u32 cy_clock = ((info->chip_rev >= CD1400_REV_J) ? 60000000 : + 25000000); + + if (baud == 0) { + info->tbpr = info->tco = info->rbpr = info->rco = 0; + return; + } + + /* determine which prescaler to use */ + for (co = 4, co_val = 2048; co; co--, co_val >>= 2) { + if (cy_clock / co_val / baud > 63) + break; + } + + bpr = (cy_clock / co_val * 2 / baud + 1) / 2; + if (bpr > 255) + bpr = 255; + + info->tbpr = info->rbpr = bpr; + info->tco = info->rco = co; +} + +/* + * This routine finds or computes the various line characteristics. + * It used to be called config_setup + */ +static void cy_set_line_char(struct cyclades_port *info, struct tty_struct *tty) +{ + struct cyclades_card *card; + unsigned long flags; + int channel; + unsigned cflag, iflag; + int baud, baud_rate = 0; + int i; + + if (!tty->termios) /* XXX can this happen at all? */ + return; + + if (info->line == -1) + return; + + cflag = tty->termios->c_cflag; + iflag = tty->termios->c_iflag; + + /* + * Set up the tty->alt_speed kludge + */ + if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) + tty->alt_speed = 57600; + if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) + tty->alt_speed = 115200; + if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) + tty->alt_speed = 230400; + if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) + tty->alt_speed = 460800; + + card = info->card; + channel = info->line - card->first_line; + + if (!cy_is_Z(card)) { + u32 cflags; + + /* baud rate */ + baud = tty_get_baud_rate(tty); + if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) == + ASYNC_SPD_CUST) { + if (info->custom_divisor) + baud_rate = info->baud / info->custom_divisor; + else + baud_rate = info->baud; + } else if (baud > CD1400_MAX_SPEED) { + baud = CD1400_MAX_SPEED; + } + /* find the baud index */ + for (i = 0; i < 20; i++) { + if (baud == baud_table[i]) + break; + } + if (i == 20) + i = 19; /* CD1400_MAX_SPEED */ + + if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) == + ASYNC_SPD_CUST) { + cyy_baud_calc(info, baud_rate); + } else { + if (info->chip_rev >= CD1400_REV_J) { + /* It is a CD1400 rev. J or later */ + info->tbpr = baud_bpr_60[i]; /* Tx BPR */ + info->tco = baud_co_60[i]; /* Tx CO */ + info->rbpr = baud_bpr_60[i]; /* Rx BPR */ + info->rco = baud_co_60[i]; /* Rx CO */ + } else { + info->tbpr = baud_bpr_25[i]; /* Tx BPR */ + info->tco = baud_co_25[i]; /* Tx CO */ + info->rbpr = baud_bpr_25[i]; /* Rx BPR */ + info->rco = baud_co_25[i]; /* Rx CO */ + } + } + if (baud_table[i] == 134) { + /* get it right for 134.5 baud */ + info->timeout = (info->xmit_fifo_size * HZ * 30 / 269) + + 2; + } else if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) == + ASYNC_SPD_CUST) { + info->timeout = (info->xmit_fifo_size * HZ * 15 / + baud_rate) + 2; + } else if (baud_table[i]) { + info->timeout = (info->xmit_fifo_size * HZ * 15 / + baud_table[i]) + 2; + /* this needs to be propagated into the card info */ + } else { + info->timeout = 0; + } + /* By tradition (is it a standard?) a baud rate of zero + implies the line should be/has been closed. A bit + later in this routine such a test is performed. */ + + /* byte size and parity */ + info->cor5 = 0; + info->cor4 = 0; + /* receive threshold */ + info->cor3 = (info->default_threshold ? + info->default_threshold : baud_cor3[i]); + info->cor2 = CyETC; + switch (cflag & CSIZE) { + case CS5: + info->cor1 = Cy_5_BITS; + break; + case CS6: + info->cor1 = Cy_6_BITS; + break; + case CS7: + info->cor1 = Cy_7_BITS; + break; + case CS8: + info->cor1 = Cy_8_BITS; + break; + } + if (cflag & CSTOPB) + info->cor1 |= Cy_2_STOP; + + if (cflag & PARENB) { + if (cflag & PARODD) + info->cor1 |= CyPARITY_O; + else + info->cor1 |= CyPARITY_E; + } else + info->cor1 |= CyPARITY_NONE; + + /* CTS flow control flag */ + if (cflag & CRTSCTS) { + info->port.flags |= ASYNC_CTS_FLOW; + info->cor2 |= CyCtsAE; + } else { + info->port.flags &= ~ASYNC_CTS_FLOW; + info->cor2 &= ~CyCtsAE; + } + if (cflag & CLOCAL) + info->port.flags &= ~ASYNC_CHECK_CD; + else + info->port.flags |= ASYNC_CHECK_CD; + + /*********************************************** + The hardware option, CyRtsAO, presents RTS when + the chip has characters to send. Since most modems + use RTS as reverse (inbound) flow control, this + option is not used. If inbound flow control is + necessary, DTR can be programmed to provide the + appropriate signals for use with a non-standard + cable. Contact Marcio Saito for details. + ***********************************************/ + + channel &= 0x03; + + spin_lock_irqsave(&card->card_lock, flags); + cyy_writeb(info, CyCAR, channel); + + /* tx and rx baud rate */ + + cyy_writeb(info, CyTCOR, info->tco); + cyy_writeb(info, CyTBPR, info->tbpr); + cyy_writeb(info, CyRCOR, info->rco); + cyy_writeb(info, CyRBPR, info->rbpr); + + /* set line characteristics according configuration */ + + cyy_writeb(info, CySCHR1, START_CHAR(tty)); + cyy_writeb(info, CySCHR2, STOP_CHAR(tty)); + cyy_writeb(info, CyCOR1, info->cor1); + cyy_writeb(info, CyCOR2, info->cor2); + cyy_writeb(info, CyCOR3, info->cor3); + cyy_writeb(info, CyCOR4, info->cor4); + cyy_writeb(info, CyCOR5, info->cor5); + + cyy_issue_cmd(info, CyCOR_CHANGE | CyCOR1ch | CyCOR2ch | + CyCOR3ch); + + /* !!! Is this needed? */ + cyy_writeb(info, CyCAR, channel); + cyy_writeb(info, CyRTPR, + (info->default_timeout ? info->default_timeout : 0x02)); + /* 10ms rx timeout */ + + cflags = CyCTS; + if (!C_CLOCAL(tty)) + cflags |= CyDSR | CyRI | CyDCD; + /* without modem intr */ + cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyMdmCh); + /* act on 1->0 modem transitions */ + if ((cflag & CRTSCTS) && info->rflow) + cyy_writeb(info, CyMCOR1, cflags | rflow_thr[i]); + else + cyy_writeb(info, CyMCOR1, cflags); + /* act on 0->1 modem transitions */ + cyy_writeb(info, CyMCOR2, cflags); + + if (i == 0) /* baud rate is zero, turn off line */ + cyy_change_rts_dtr(info, 0, TIOCM_DTR); + else + cyy_change_rts_dtr(info, TIOCM_DTR, 0); + + clear_bit(TTY_IO_ERROR, &tty->flags); + spin_unlock_irqrestore(&card->card_lock, flags); + + } else { + struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; + __u32 sw_flow; + int retval; + + if (!cyz_is_loaded(card)) + return; + + /* baud rate */ + baud = tty_get_baud_rate(tty); + if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) == + ASYNC_SPD_CUST) { + if (info->custom_divisor) + baud_rate = info->baud / info->custom_divisor; + else + baud_rate = info->baud; + } else if (baud > CYZ_MAX_SPEED) { + baud = CYZ_MAX_SPEED; + } + cy_writel(&ch_ctrl->comm_baud, baud); + + if (baud == 134) { + /* get it right for 134.5 baud */ + info->timeout = (info->xmit_fifo_size * HZ * 30 / 269) + + 2; + } else if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) == + ASYNC_SPD_CUST) { + info->timeout = (info->xmit_fifo_size * HZ * 15 / + baud_rate) + 2; + } else if (baud) { + info->timeout = (info->xmit_fifo_size * HZ * 15 / + baud) + 2; + /* this needs to be propagated into the card info */ + } else { + info->timeout = 0; + } + + /* byte size and parity */ + switch (cflag & CSIZE) { + case CS5: + cy_writel(&ch_ctrl->comm_data_l, C_DL_CS5); + break; + case CS6: + cy_writel(&ch_ctrl->comm_data_l, C_DL_CS6); + break; + case CS7: + cy_writel(&ch_ctrl->comm_data_l, C_DL_CS7); + break; + case CS8: + cy_writel(&ch_ctrl->comm_data_l, C_DL_CS8); + break; + } + if (cflag & CSTOPB) { + cy_writel(&ch_ctrl->comm_data_l, + readl(&ch_ctrl->comm_data_l) | C_DL_2STOP); + } else { + cy_writel(&ch_ctrl->comm_data_l, + readl(&ch_ctrl->comm_data_l) | C_DL_1STOP); + } + if (cflag & PARENB) { + if (cflag & PARODD) + cy_writel(&ch_ctrl->comm_parity, C_PR_ODD); + else + cy_writel(&ch_ctrl->comm_parity, C_PR_EVEN); + } else + cy_writel(&ch_ctrl->comm_parity, C_PR_NONE); + + /* CTS flow control flag */ + if (cflag & CRTSCTS) { + cy_writel(&ch_ctrl->hw_flow, + readl(&ch_ctrl->hw_flow) | C_RS_CTS | C_RS_RTS); + } else { + cy_writel(&ch_ctrl->hw_flow, readl(&ch_ctrl->hw_flow) & + ~(C_RS_CTS | C_RS_RTS)); + } + /* As the HW flow control is done in firmware, the driver + doesn't need to care about it */ + info->port.flags &= ~ASYNC_CTS_FLOW; + + /* XON/XOFF/XANY flow control flags */ + sw_flow = 0; + if (iflag & IXON) { + sw_flow |= C_FL_OXX; + if (iflag & IXANY) + sw_flow |= C_FL_OIXANY; + } + cy_writel(&ch_ctrl->sw_flow, sw_flow); + + retval = cyz_issue_cmd(card, channel, C_CM_IOCTL, 0L); + if (retval != 0) { + printk(KERN_ERR "cyc:set_line_char retval on ttyC%d " + "was %x\n", info->line, retval); + } + + /* CD sensitivity */ + if (cflag & CLOCAL) + info->port.flags &= ~ASYNC_CHECK_CD; + else + info->port.flags |= ASYNC_CHECK_CD; + + if (baud == 0) { /* baud rate is zero, turn off line */ + cy_writel(&ch_ctrl->rs_control, + readl(&ch_ctrl->rs_control) & ~C_RS_DTR); +#ifdef CY_DEBUG_DTR + printk(KERN_DEBUG "cyc:set_line_char dropping Z DTR\n"); +#endif + } else { + cy_writel(&ch_ctrl->rs_control, + readl(&ch_ctrl->rs_control) | C_RS_DTR); +#ifdef CY_DEBUG_DTR + printk(KERN_DEBUG "cyc:set_line_char raising Z DTR\n"); +#endif + } + + retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L); + if (retval != 0) { + printk(KERN_ERR "cyc:set_line_char(2) retval on ttyC%d " + "was %x\n", info->line, retval); + } + + clear_bit(TTY_IO_ERROR, &tty->flags); + } +} /* set_line_char */ + +static int cy_get_serial_info(struct cyclades_port *info, + struct serial_struct __user *retinfo) +{ + struct cyclades_card *cinfo = info->card; + struct serial_struct tmp = { + .type = info->type, + .line = info->line, + .port = (info->card - cy_card) * 0x100 + info->line - + cinfo->first_line, + .irq = cinfo->irq, + .flags = info->port.flags, + .close_delay = info->port.close_delay, + .closing_wait = info->port.closing_wait, + .baud_base = info->baud, + .custom_divisor = info->custom_divisor, + .hub6 = 0, /*!!! */ + }; + return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0; +} + +static int +cy_set_serial_info(struct cyclades_port *info, struct tty_struct *tty, + struct serial_struct __user *new_info) +{ + struct serial_struct new_serial; + int ret; + + if (copy_from_user(&new_serial, new_info, sizeof(new_serial))) + return -EFAULT; + + mutex_lock(&info->port.mutex); + if (!capable(CAP_SYS_ADMIN)) { + if (new_serial.close_delay != info->port.close_delay || + new_serial.baud_base != info->baud || + (new_serial.flags & ASYNC_FLAGS & + ~ASYNC_USR_MASK) != + (info->port.flags & ASYNC_FLAGS & ~ASYNC_USR_MASK)) + { + mutex_unlock(&info->port.mutex); + return -EPERM; + } + info->port.flags = (info->port.flags & ~ASYNC_USR_MASK) | + (new_serial.flags & ASYNC_USR_MASK); + info->baud = new_serial.baud_base; + info->custom_divisor = new_serial.custom_divisor; + goto check_and_exit; + } + + /* + * OK, past this point, all the error checking has been done. + * At this point, we start making changes..... + */ + + info->baud = new_serial.baud_base; + info->custom_divisor = new_serial.custom_divisor; + info->port.flags = (info->port.flags & ~ASYNC_FLAGS) | + (new_serial.flags & ASYNC_FLAGS); + info->port.close_delay = new_serial.close_delay * HZ / 100; + info->port.closing_wait = new_serial.closing_wait * HZ / 100; + +check_and_exit: + if (info->port.flags & ASYNC_INITIALIZED) { + cy_set_line_char(info, tty); + ret = 0; + } else { + ret = cy_startup(info, tty); + } + mutex_unlock(&info->port.mutex); + return ret; +} /* set_serial_info */ + +/* + * get_lsr_info - get line status register info + * + * Purpose: Let user call ioctl() to get info when the UART physically + * is emptied. On bus types like RS485, the transmitter must + * release the bus after transmitting. This must be done when + * the transmit shift register is empty, not be done when the + * transmit holding register is empty. This functionality + * allows an RS485 driver to be written in user space. + */ +static int get_lsr_info(struct cyclades_port *info, unsigned int __user *value) +{ + struct cyclades_card *card = info->card; + unsigned int result; + unsigned long flags; + u8 status; + + if (!cy_is_Z(card)) { + spin_lock_irqsave(&card->card_lock, flags); + status = cyy_readb(info, CySRER) & (CyTxRdy | CyTxMpty); + spin_unlock_irqrestore(&card->card_lock, flags); + result = (status ? 0 : TIOCSER_TEMT); + } else { + /* Not supported yet */ + return -EINVAL; + } + return put_user(result, (unsigned long __user *)value); +} + +static int cy_tiocmget(struct tty_struct *tty) +{ + struct cyclades_port *info = tty->driver_data; + struct cyclades_card *card; + int result; + + if (serial_paranoia_check(info, tty->name, __func__)) + return -ENODEV; + + card = info->card; + + if (!cy_is_Z(card)) { + unsigned long flags; + int channel = info->line - card->first_line; + u8 status; + + spin_lock_irqsave(&card->card_lock, flags); + cyy_writeb(info, CyCAR, channel & 0x03); + status = cyy_readb(info, CyMSVR1); + status |= cyy_readb(info, CyMSVR2); + spin_unlock_irqrestore(&card->card_lock, flags); + + if (info->rtsdtr_inv) { + result = ((status & CyRTS) ? TIOCM_DTR : 0) | + ((status & CyDTR) ? TIOCM_RTS : 0); + } else { + result = ((status & CyRTS) ? TIOCM_RTS : 0) | + ((status & CyDTR) ? TIOCM_DTR : 0); + } + result |= ((status & CyDCD) ? TIOCM_CAR : 0) | + ((status & CyRI) ? TIOCM_RNG : 0) | + ((status & CyDSR) ? TIOCM_DSR : 0) | + ((status & CyCTS) ? TIOCM_CTS : 0); + } else { + u32 lstatus; + + if (!cyz_is_loaded(card)) { + result = -ENODEV; + goto end; + } + + lstatus = readl(&info->u.cyz.ch_ctrl->rs_status); + result = ((lstatus & C_RS_RTS) ? TIOCM_RTS : 0) | + ((lstatus & C_RS_DTR) ? TIOCM_DTR : 0) | + ((lstatus & C_RS_DCD) ? TIOCM_CAR : 0) | + ((lstatus & C_RS_RI) ? TIOCM_RNG : 0) | + ((lstatus & C_RS_DSR) ? TIOCM_DSR : 0) | + ((lstatus & C_RS_CTS) ? TIOCM_CTS : 0); + } +end: + return result; +} /* cy_tiomget */ + +static int +cy_tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear) +{ + struct cyclades_port *info = tty->driver_data; + struct cyclades_card *card; + unsigned long flags; + + if (serial_paranoia_check(info, tty->name, __func__)) + return -ENODEV; + + card = info->card; + if (!cy_is_Z(card)) { + spin_lock_irqsave(&card->card_lock, flags); + cyy_change_rts_dtr(info, set, clear); + spin_unlock_irqrestore(&card->card_lock, flags); + } else { + struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; + int retval, channel = info->line - card->first_line; + u32 rs; + + if (!cyz_is_loaded(card)) + return -ENODEV; + + spin_lock_irqsave(&card->card_lock, flags); + rs = readl(&ch_ctrl->rs_control); + if (set & TIOCM_RTS) + rs |= C_RS_RTS; + if (clear & TIOCM_RTS) + rs &= ~C_RS_RTS; + if (set & TIOCM_DTR) { + rs |= C_RS_DTR; +#ifdef CY_DEBUG_DTR + printk(KERN_DEBUG "cyc:set_modem_info raising Z DTR\n"); +#endif + } + if (clear & TIOCM_DTR) { + rs &= ~C_RS_DTR; +#ifdef CY_DEBUG_DTR + printk(KERN_DEBUG "cyc:set_modem_info clearing " + "Z DTR\n"); +#endif + } + cy_writel(&ch_ctrl->rs_control, rs); + retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L); + spin_unlock_irqrestore(&card->card_lock, flags); + if (retval != 0) { + printk(KERN_ERR "cyc:set_modem_info retval on ttyC%d " + "was %x\n", info->line, retval); + } + } + return 0; +} + +/* + * cy_break() --- routine which turns the break handling on or off + */ +static int cy_break(struct tty_struct *tty, int break_state) +{ + struct cyclades_port *info = tty->driver_data; + struct cyclades_card *card; + unsigned long flags; + int retval = 0; + + if (serial_paranoia_check(info, tty->name, "cy_break")) + return -EINVAL; + + card = info->card; + + spin_lock_irqsave(&card->card_lock, flags); + if (!cy_is_Z(card)) { + /* Let the transmit ISR take care of this (since it + requires stuffing characters into the output stream). + */ + if (break_state == -1) { + if (!info->breakon) { + info->breakon = 1; + if (!info->xmit_cnt) { + spin_unlock_irqrestore(&card->card_lock, flags); + start_xmit(info); + spin_lock_irqsave(&card->card_lock, flags); + } + } + } else { + if (!info->breakoff) { + info->breakoff = 1; + if (!info->xmit_cnt) { + spin_unlock_irqrestore(&card->card_lock, flags); + start_xmit(info); + spin_lock_irqsave(&card->card_lock, flags); + } + } + } + } else { + if (break_state == -1) { + retval = cyz_issue_cmd(card, + info->line - card->first_line, + C_CM_SET_BREAK, 0L); + if (retval != 0) { + printk(KERN_ERR "cyc:cy_break (set) retval on " + "ttyC%d was %x\n", info->line, retval); + } + } else { + retval = cyz_issue_cmd(card, + info->line - card->first_line, + C_CM_CLR_BREAK, 0L); + if (retval != 0) { + printk(KERN_DEBUG "cyc:cy_break (clr) retval " + "on ttyC%d was %x\n", info->line, + retval); + } + } + } + spin_unlock_irqrestore(&card->card_lock, flags); + return retval; +} /* cy_break */ + +static int set_threshold(struct cyclades_port *info, unsigned long value) +{ + struct cyclades_card *card = info->card; + unsigned long flags; + + if (!cy_is_Z(card)) { + info->cor3 &= ~CyREC_FIFO; + info->cor3 |= value & CyREC_FIFO; + + spin_lock_irqsave(&card->card_lock, flags); + cyy_writeb(info, CyCOR3, info->cor3); + cyy_issue_cmd(info, CyCOR_CHANGE | CyCOR3ch); + spin_unlock_irqrestore(&card->card_lock, flags); + } + return 0; +} /* set_threshold */ + +static int get_threshold(struct cyclades_port *info, + unsigned long __user *value) +{ + struct cyclades_card *card = info->card; + + if (!cy_is_Z(card)) { + u8 tmp = cyy_readb(info, CyCOR3) & CyREC_FIFO; + return put_user(tmp, value); + } + return 0; +} /* get_threshold */ + +static int set_timeout(struct cyclades_port *info, unsigned long value) +{ + struct cyclades_card *card = info->card; + unsigned long flags; + + if (!cy_is_Z(card)) { + spin_lock_irqsave(&card->card_lock, flags); + cyy_writeb(info, CyRTPR, value & 0xff); + spin_unlock_irqrestore(&card->card_lock, flags); + } + return 0; +} /* set_timeout */ + +static int get_timeout(struct cyclades_port *info, + unsigned long __user *value) +{ + struct cyclades_card *card = info->card; + + if (!cy_is_Z(card)) { + u8 tmp = cyy_readb(info, CyRTPR); + return put_user(tmp, value); + } + return 0; +} /* get_timeout */ + +static int cy_cflags_changed(struct cyclades_port *info, unsigned long arg, + struct cyclades_icount *cprev) +{ + struct cyclades_icount cnow; + unsigned long flags; + int ret; + + spin_lock_irqsave(&info->card->card_lock, flags); + cnow = info->icount; /* atomic copy */ + spin_unlock_irqrestore(&info->card->card_lock, flags); + + ret = ((arg & TIOCM_RNG) && (cnow.rng != cprev->rng)) || + ((arg & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) || + ((arg & TIOCM_CD) && (cnow.dcd != cprev->dcd)) || + ((arg & TIOCM_CTS) && (cnow.cts != cprev->cts)); + + *cprev = cnow; + + return ret; +} + +/* + * This routine allows the tty driver to implement device- + * specific ioctl's. If the ioctl number passed in cmd is + * not recognized by the driver, it should return ENOIOCTLCMD. + */ +static int +cy_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + struct cyclades_port *info = tty->driver_data; + struct cyclades_icount cnow; /* kernel counter temps */ + int ret_val = 0; + unsigned long flags; + void __user *argp = (void __user *)arg; + + if (serial_paranoia_check(info, tty->name, "cy_ioctl")) + return -ENODEV; + +#ifdef CY_DEBUG_OTHER + printk(KERN_DEBUG "cyc:cy_ioctl ttyC%d, cmd = %x arg = %lx\n", + info->line, cmd, arg); +#endif + + switch (cmd) { + case CYGETMON: + if (copy_to_user(argp, &info->mon, sizeof(info->mon))) { + ret_val = -EFAULT; + break; + } + memset(&info->mon, 0, sizeof(info->mon)); + break; + case CYGETTHRESH: + ret_val = get_threshold(info, argp); + break; + case CYSETTHRESH: + ret_val = set_threshold(info, arg); + break; + case CYGETDEFTHRESH: + ret_val = put_user(info->default_threshold, + (unsigned long __user *)argp); + break; + case CYSETDEFTHRESH: + info->default_threshold = arg & 0x0f; + break; + case CYGETTIMEOUT: + ret_val = get_timeout(info, argp); + break; + case CYSETTIMEOUT: + ret_val = set_timeout(info, arg); + break; + case CYGETDEFTIMEOUT: + ret_val = put_user(info->default_timeout, + (unsigned long __user *)argp); + break; + case CYSETDEFTIMEOUT: + info->default_timeout = arg & 0xff; + break; + case CYSETRFLOW: + info->rflow = (int)arg; + break; + case CYGETRFLOW: + ret_val = info->rflow; + break; + case CYSETRTSDTR_INV: + info->rtsdtr_inv = (int)arg; + break; + case CYGETRTSDTR_INV: + ret_val = info->rtsdtr_inv; + break; + case CYGETCD1400VER: + ret_val = info->chip_rev; + break; +#ifndef CONFIG_CYZ_INTR + case CYZSETPOLLCYCLE: + cyz_polling_cycle = (arg * HZ) / 1000; + break; + case CYZGETPOLLCYCLE: + ret_val = (cyz_polling_cycle * 1000) / HZ; + break; +#endif /* CONFIG_CYZ_INTR */ + case CYSETWAIT: + info->port.closing_wait = (unsigned short)arg * HZ / 100; + break; + case CYGETWAIT: + ret_val = info->port.closing_wait / (HZ / 100); + break; + case TIOCGSERIAL: + ret_val = cy_get_serial_info(info, argp); + break; + case TIOCSSERIAL: + ret_val = cy_set_serial_info(info, tty, argp); + break; + case TIOCSERGETLSR: /* Get line status register */ + ret_val = get_lsr_info(info, argp); + break; + /* + * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change + * - mask passed in arg for lines of interest + * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) + * Caller should use TIOCGICOUNT to see which one it was + */ + case TIOCMIWAIT: + spin_lock_irqsave(&info->card->card_lock, flags); + /* note the counters on entry */ + cnow = info->icount; + spin_unlock_irqrestore(&info->card->card_lock, flags); + ret_val = wait_event_interruptible(info->port.delta_msr_wait, + cy_cflags_changed(info, arg, &cnow)); + break; + + /* + * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) + * Return: write counters to the user passed counter struct + * NB: both 1->0 and 0->1 transitions are counted except for + * RI where only 0->1 is counted. + */ + default: + ret_val = -ENOIOCTLCMD; + } + +#ifdef CY_DEBUG_OTHER + printk(KERN_DEBUG "cyc:cy_ioctl done\n"); +#endif + return ret_val; +} /* cy_ioctl */ + +static int cy_get_icount(struct tty_struct *tty, + struct serial_icounter_struct *sic) +{ + struct cyclades_port *info = tty->driver_data; + struct cyclades_icount cnow; /* Used to snapshot */ + unsigned long flags; + + spin_lock_irqsave(&info->card->card_lock, flags); + cnow = info->icount; + spin_unlock_irqrestore(&info->card->card_lock, flags); + + sic->cts = cnow.cts; + sic->dsr = cnow.dsr; + sic->rng = cnow.rng; + sic->dcd = cnow.dcd; + sic->rx = cnow.rx; + sic->tx = cnow.tx; + sic->frame = cnow.frame; + sic->overrun = cnow.overrun; + sic->parity = cnow.parity; + sic->brk = cnow.brk; + sic->buf_overrun = cnow.buf_overrun; + return 0; +} + +/* + * This routine allows the tty driver to be notified when + * device's termios settings have changed. Note that a + * well-designed tty driver should be prepared to accept the case + * where old == NULL, and try to do something rational. + */ +static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios) +{ + struct cyclades_port *info = tty->driver_data; + +#ifdef CY_DEBUG_OTHER + printk(KERN_DEBUG "cyc:cy_set_termios ttyC%d\n", info->line); +#endif + + cy_set_line_char(info, tty); + + if ((old_termios->c_cflag & CRTSCTS) && + !(tty->termios->c_cflag & CRTSCTS)) { + tty->hw_stopped = 0; + cy_start(tty); + } +#if 0 + /* + * No need to wake up processes in open wait, since they + * sample the CLOCAL flag once, and don't recheck it. + * XXX It's not clear whether the current behavior is correct + * or not. Hence, this may change..... + */ + if (!(old_termios->c_cflag & CLOCAL) && + (tty->termios->c_cflag & CLOCAL)) + wake_up_interruptible(&info->port.open_wait); +#endif +} /* cy_set_termios */ + +/* This function is used to send a high-priority XON/XOFF character to + the device. +*/ +static void cy_send_xchar(struct tty_struct *tty, char ch) +{ + struct cyclades_port *info = tty->driver_data; + struct cyclades_card *card; + int channel; + + if (serial_paranoia_check(info, tty->name, "cy_send_xchar")) + return; + + info->x_char = ch; + + if (ch) + cy_start(tty); + + card = info->card; + channel = info->line - card->first_line; + + if (cy_is_Z(card)) { + if (ch == STOP_CHAR(tty)) + cyz_issue_cmd(card, channel, C_CM_SENDXOFF, 0L); + else if (ch == START_CHAR(tty)) + cyz_issue_cmd(card, channel, C_CM_SENDXON, 0L); + } +} + +/* This routine is called by the upper-layer tty layer to signal + that incoming characters should be throttled because the input + buffers are close to full. + */ +static void cy_throttle(struct tty_struct *tty) +{ + struct cyclades_port *info = tty->driver_data; + struct cyclades_card *card; + unsigned long flags; + +#ifdef CY_DEBUG_THROTTLE + char buf[64]; + + printk(KERN_DEBUG "cyc:throttle %s: %ld...ttyC%d\n", tty_name(tty, buf), + tty->ldisc.chars_in_buffer(tty), info->line); +#endif + + if (serial_paranoia_check(info, tty->name, "cy_throttle")) + return; + + card = info->card; + + if (I_IXOFF(tty)) { + if (!cy_is_Z(card)) + cy_send_xchar(tty, STOP_CHAR(tty)); + else + info->throttle = 1; + } + + if (tty->termios->c_cflag & CRTSCTS) { + if (!cy_is_Z(card)) { + spin_lock_irqsave(&card->card_lock, flags); + cyy_change_rts_dtr(info, 0, TIOCM_RTS); + spin_unlock_irqrestore(&card->card_lock, flags); + } else { + info->throttle = 1; + } + } +} /* cy_throttle */ + +/* + * This routine notifies the tty driver that it should signal + * that characters can now be sent to the tty without fear of + * overrunning the input buffers of the line disciplines. + */ +static void cy_unthrottle(struct tty_struct *tty) +{ + struct cyclades_port *info = tty->driver_data; + struct cyclades_card *card; + unsigned long flags; + +#ifdef CY_DEBUG_THROTTLE + char buf[64]; + + printk(KERN_DEBUG "cyc:unthrottle %s: %ld...ttyC%d\n", + tty_name(tty, buf), tty_chars_in_buffer(tty), info->line); +#endif + + if (serial_paranoia_check(info, tty->name, "cy_unthrottle")) + return; + + if (I_IXOFF(tty)) { + if (info->x_char) + info->x_char = 0; + else + cy_send_xchar(tty, START_CHAR(tty)); + } + + if (tty->termios->c_cflag & CRTSCTS) { + card = info->card; + if (!cy_is_Z(card)) { + spin_lock_irqsave(&card->card_lock, flags); + cyy_change_rts_dtr(info, TIOCM_RTS, 0); + spin_unlock_irqrestore(&card->card_lock, flags); + } else { + info->throttle = 0; + } + } +} /* cy_unthrottle */ + +/* cy_start and cy_stop provide software output flow control as a + function of XON/XOFF, software CTS, and other such stuff. +*/ +static void cy_stop(struct tty_struct *tty) +{ + struct cyclades_card *cinfo; + struct cyclades_port *info = tty->driver_data; + int channel; + unsigned long flags; + +#ifdef CY_DEBUG_OTHER + printk(KERN_DEBUG "cyc:cy_stop ttyC%d\n", info->line); +#endif + + if (serial_paranoia_check(info, tty->name, "cy_stop")) + return; + + cinfo = info->card; + channel = info->line - cinfo->first_line; + if (!cy_is_Z(cinfo)) { + spin_lock_irqsave(&cinfo->card_lock, flags); + cyy_writeb(info, CyCAR, channel & 0x03); + cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy); + spin_unlock_irqrestore(&cinfo->card_lock, flags); + } +} /* cy_stop */ + +static void cy_start(struct tty_struct *tty) +{ + struct cyclades_card *cinfo; + struct cyclades_port *info = tty->driver_data; + int channel; + unsigned long flags; + +#ifdef CY_DEBUG_OTHER + printk(KERN_DEBUG "cyc:cy_start ttyC%d\n", info->line); +#endif + + if (serial_paranoia_check(info, tty->name, "cy_start")) + return; + + cinfo = info->card; + channel = info->line - cinfo->first_line; + if (!cy_is_Z(cinfo)) { + spin_lock_irqsave(&cinfo->card_lock, flags); + cyy_writeb(info, CyCAR, channel & 0x03); + cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyTxRdy); + spin_unlock_irqrestore(&cinfo->card_lock, flags); + } +} /* cy_start */ + +/* + * cy_hangup() --- called by tty_hangup() when a hangup is signaled. + */ +static void cy_hangup(struct tty_struct *tty) +{ + struct cyclades_port *info = tty->driver_data; + +#ifdef CY_DEBUG_OTHER + printk(KERN_DEBUG "cyc:cy_hangup ttyC%d\n", info->line); +#endif + + if (serial_paranoia_check(info, tty->name, "cy_hangup")) + return; + + cy_flush_buffer(tty); + cy_shutdown(info, tty); + tty_port_hangup(&info->port); +} /* cy_hangup */ + +static int cyy_carrier_raised(struct tty_port *port) +{ + struct cyclades_port *info = container_of(port, struct cyclades_port, + port); + struct cyclades_card *cinfo = info->card; + unsigned long flags; + int channel = info->line - cinfo->first_line; + u32 cd; + + spin_lock_irqsave(&cinfo->card_lock, flags); + cyy_writeb(info, CyCAR, channel & 0x03); + cd = cyy_readb(info, CyMSVR1) & CyDCD; + spin_unlock_irqrestore(&cinfo->card_lock, flags); + + return cd; +} + +static void cyy_dtr_rts(struct tty_port *port, int raise) +{ + struct cyclades_port *info = container_of(port, struct cyclades_port, + port); + struct cyclades_card *cinfo = info->card; + unsigned long flags; + + spin_lock_irqsave(&cinfo->card_lock, flags); + cyy_change_rts_dtr(info, raise ? TIOCM_RTS | TIOCM_DTR : 0, + raise ? 0 : TIOCM_RTS | TIOCM_DTR); + spin_unlock_irqrestore(&cinfo->card_lock, flags); +} + +static int cyz_carrier_raised(struct tty_port *port) +{ + struct cyclades_port *info = container_of(port, struct cyclades_port, + port); + + return readl(&info->u.cyz.ch_ctrl->rs_status) & C_RS_DCD; +} + +static void cyz_dtr_rts(struct tty_port *port, int raise) +{ + struct cyclades_port *info = container_of(port, struct cyclades_port, + port); + struct cyclades_card *cinfo = info->card; + struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl; + int ret, channel = info->line - cinfo->first_line; + u32 rs; + + rs = readl(&ch_ctrl->rs_control); + if (raise) + rs |= C_RS_RTS | C_RS_DTR; + else + rs &= ~(C_RS_RTS | C_RS_DTR); + cy_writel(&ch_ctrl->rs_control, rs); + ret = cyz_issue_cmd(cinfo, channel, C_CM_IOCTLM, 0L); + if (ret != 0) + printk(KERN_ERR "%s: retval on ttyC%d was %x\n", + __func__, info->line, ret); +#ifdef CY_DEBUG_DTR + printk(KERN_DEBUG "%s: raising Z DTR\n", __func__); +#endif +} + +static const struct tty_port_operations cyy_port_ops = { + .carrier_raised = cyy_carrier_raised, + .dtr_rts = cyy_dtr_rts, + .shutdown = cy_do_close, +}; + +static const struct tty_port_operations cyz_port_ops = { + .carrier_raised = cyz_carrier_raised, + .dtr_rts = cyz_dtr_rts, + .shutdown = cy_do_close, +}; + +/* + * --------------------------------------------------------------------- + * cy_init() and friends + * + * cy_init() is called at boot-time to initialize the serial driver. + * --------------------------------------------------------------------- + */ + +static int __devinit cy_init_card(struct cyclades_card *cinfo) +{ + struct cyclades_port *info; + unsigned int channel, port; + + spin_lock_init(&cinfo->card_lock); + cinfo->intr_enabled = 0; + + cinfo->ports = kcalloc(cinfo->nports, sizeof(*cinfo->ports), + GFP_KERNEL); + if (cinfo->ports == NULL) { + printk(KERN_ERR "Cyclades: cannot allocate ports\n"); + return -ENOMEM; + } + + for (channel = 0, port = cinfo->first_line; channel < cinfo->nports; + channel++, port++) { + info = &cinfo->ports[channel]; + tty_port_init(&info->port); + info->magic = CYCLADES_MAGIC; + info->card = cinfo; + info->line = port; + + info->port.closing_wait = CLOSING_WAIT_DELAY; + info->port.close_delay = 5 * HZ / 10; + info->port.flags = STD_COM_FLAGS; + init_completion(&info->shutdown_wait); + + if (cy_is_Z(cinfo)) { + struct FIRM_ID *firm_id = cinfo->base_addr + ID_ADDRESS; + struct ZFW_CTRL *zfw_ctrl; + + info->port.ops = &cyz_port_ops; + info->type = PORT_STARTECH; + + zfw_ctrl = cinfo->base_addr + + (readl(&firm_id->zfwctrl_addr) & 0xfffff); + info->u.cyz.ch_ctrl = &zfw_ctrl->ch_ctrl[channel]; + info->u.cyz.buf_ctrl = &zfw_ctrl->buf_ctrl[channel]; + + if (cinfo->hw_ver == ZO_V1) + info->xmit_fifo_size = CYZ_FIFO_SIZE; + else + info->xmit_fifo_size = 4 * CYZ_FIFO_SIZE; +#ifdef CONFIG_CYZ_INTR + setup_timer(&cyz_rx_full_timer[port], + cyz_rx_restart, (unsigned long)info); +#endif + } else { + unsigned short chip_number; + int index = cinfo->bus_index; + + info->port.ops = &cyy_port_ops; + info->type = PORT_CIRRUS; + info->xmit_fifo_size = CyMAX_CHAR_FIFO; + info->cor1 = CyPARITY_NONE | Cy_1_STOP | Cy_8_BITS; + info->cor2 = CyETC; + info->cor3 = 0x08; /* _very_ small rcv threshold */ + + chip_number = channel / CyPORTS_PER_CHIP; + info->u.cyy.base_addr = cinfo->base_addr + + (cy_chip_offset[chip_number] << index); + info->chip_rev = cyy_readb(info, CyGFRCR); + + if (info->chip_rev >= CD1400_REV_J) { + /* It is a CD1400 rev. J or later */ + info->tbpr = baud_bpr_60[13]; /* Tx BPR */ + info->tco = baud_co_60[13]; /* Tx CO */ + info->rbpr = baud_bpr_60[13]; /* Rx BPR */ + info->rco = baud_co_60[13]; /* Rx CO */ + info->rtsdtr_inv = 1; + } else { + info->tbpr = baud_bpr_25[13]; /* Tx BPR */ + info->tco = baud_co_25[13]; /* Tx CO */ + info->rbpr = baud_bpr_25[13]; /* Rx BPR */ + info->rco = baud_co_25[13]; /* Rx CO */ + info->rtsdtr_inv = 0; + } + info->read_status_mask = CyTIMEOUT | CySPECHAR | + CyBREAK | CyPARITY | CyFRAME | CyOVERRUN; + } + + } + +#ifndef CONFIG_CYZ_INTR + if (cy_is_Z(cinfo) && !timer_pending(&cyz_timerlist)) { + mod_timer(&cyz_timerlist, jiffies + 1); +#ifdef CY_PCI_DEBUG + printk(KERN_DEBUG "Cyclades-Z polling initialized\n"); +#endif + } +#endif + return 0; +} + +/* initialize chips on Cyclom-Y card -- return number of valid + chips (which is number of ports/4) */ +static unsigned short __devinit cyy_init_card(void __iomem *true_base_addr, + int index) +{ + unsigned int chip_number; + void __iomem *base_addr; + + cy_writeb(true_base_addr + (Cy_HwReset << index), 0); + /* Cy_HwReset is 0x1400 */ + cy_writeb(true_base_addr + (Cy_ClrIntr << index), 0); + /* Cy_ClrIntr is 0x1800 */ + udelay(500L); + + for (chip_number = 0; chip_number < CyMAX_CHIPS_PER_CARD; + chip_number++) { + base_addr = + true_base_addr + (cy_chip_offset[chip_number] << index); + mdelay(1); + if (readb(base_addr + (CyCCR << index)) != 0x00) { + /************* + printk(" chip #%d at %#6lx is never idle (CCR != 0)\n", + chip_number, (unsigned long)base_addr); + *************/ + return chip_number; + } + + cy_writeb(base_addr + (CyGFRCR << index), 0); + udelay(10L); + + /* The Cyclom-16Y does not decode address bit 9 and therefore + cannot distinguish between references to chip 0 and a non- + existent chip 4. If the preceding clearing of the supposed + chip 4 GFRCR register appears at chip 0, there is no chip 4 + and this must be a Cyclom-16Y, not a Cyclom-32Ye. + */ + if (chip_number == 4 && readb(true_base_addr + + (cy_chip_offset[0] << index) + + (CyGFRCR << index)) == 0) { + return chip_number; + } + + cy_writeb(base_addr + (CyCCR << index), CyCHIP_RESET); + mdelay(1); + + if (readb(base_addr + (CyGFRCR << index)) == 0x00) { + /* + printk(" chip #%d at %#6lx is not responding ", + chip_number, (unsigned long)base_addr); + printk("(GFRCR stayed 0)\n", + */ + return chip_number; + } + if ((0xf0 & (readb(base_addr + (CyGFRCR << index)))) != + 0x40) { + /* + printk(" chip #%d at %#6lx is not valid (GFRCR == " + "%#2x)\n", + chip_number, (unsigned long)base_addr, + base_addr[CyGFRCR<<index]); + */ + return chip_number; + } + cy_writeb(base_addr + (CyGCR << index), CyCH0_SERIAL); + if (readb(base_addr + (CyGFRCR << index)) >= CD1400_REV_J) { + /* It is a CD1400 rev. J or later */ + /* Impossible to reach 5ms with this chip. + Changed to 2ms instead (f = 500 Hz). */ + cy_writeb(base_addr + (CyPPR << index), CyCLOCK_60_2MS); + } else { + /* f = 200 Hz */ + cy_writeb(base_addr + (CyPPR << index), CyCLOCK_25_5MS); + } + + /* + printk(" chip #%d at %#6lx is rev 0x%2x\n", + chip_number, (unsigned long)base_addr, + readb(base_addr+(CyGFRCR<<index))); + */ + } + return chip_number; +} /* cyy_init_card */ + +/* + * --------------------------------------------------------------------- + * cy_detect_isa() - Probe for Cyclom-Y/ISA boards. + * sets global variables and return the number of ISA boards found. + * --------------------------------------------------------------------- + */ +static int __init cy_detect_isa(void) +{ +#ifdef CONFIG_ISA + unsigned short cy_isa_irq, nboard; + void __iomem *cy_isa_address; + unsigned short i, j, cy_isa_nchan; + int isparam = 0; + + nboard = 0; + + /* Check for module parameters */ + for (i = 0; i < NR_CARDS; i++) { + if (maddr[i] || i) { + isparam = 1; + cy_isa_addresses[i] = maddr[i]; + } + if (!maddr[i]) + break; + } + + /* scan the address table probing for Cyclom-Y/ISA boards */ + for (i = 0; i < NR_ISA_ADDRS; i++) { + unsigned int isa_address = cy_isa_addresses[i]; + if (isa_address == 0x0000) + return nboard; + + /* probe for CD1400... */ + cy_isa_address = ioremap_nocache(isa_address, CyISA_Ywin); + if (cy_isa_address == NULL) { + printk(KERN_ERR "Cyclom-Y/ISA: can't remap base " + "address\n"); + continue; + } + cy_isa_nchan = CyPORTS_PER_CHIP * + cyy_init_card(cy_isa_address, 0); + if (cy_isa_nchan == 0) { + iounmap(cy_isa_address); + continue; + } + + if (isparam && i < NR_CARDS && irq[i]) + cy_isa_irq = irq[i]; + else + /* find out the board's irq by probing */ + cy_isa_irq = detect_isa_irq(cy_isa_address); + if (cy_isa_irq == 0) { + printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but the " + "IRQ could not be detected.\n", + (unsigned long)cy_isa_address); + iounmap(cy_isa_address); + continue; + } + + if ((cy_next_channel + cy_isa_nchan) > NR_PORTS) { + printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but no " + "more channels are available. Change NR_PORTS " + "in cyclades.c and recompile kernel.\n", + (unsigned long)cy_isa_address); + iounmap(cy_isa_address); + return nboard; + } + /* fill the next cy_card structure available */ + for (j = 0; j < NR_CARDS; j++) { + if (cy_card[j].base_addr == NULL) + break; + } + if (j == NR_CARDS) { /* no more cy_cards available */ + printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but no " + "more cards can be used. Change NR_CARDS in " + "cyclades.c and recompile kernel.\n", + (unsigned long)cy_isa_address); + iounmap(cy_isa_address); + return nboard; + } + + /* allocate IRQ */ + if (request_irq(cy_isa_irq, cyy_interrupt, + IRQF_DISABLED, "Cyclom-Y", &cy_card[j])) { + printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but " + "could not allocate IRQ#%d.\n", + (unsigned long)cy_isa_address, cy_isa_irq); + iounmap(cy_isa_address); + return nboard; + } + + /* set cy_card */ + cy_card[j].base_addr = cy_isa_address; + cy_card[j].ctl_addr.p9050 = NULL; + cy_card[j].irq = (int)cy_isa_irq; + cy_card[j].bus_index = 0; + cy_card[j].first_line = cy_next_channel; + cy_card[j].num_chips = cy_isa_nchan / CyPORTS_PER_CHIP; + cy_card[j].nports = cy_isa_nchan; + if (cy_init_card(&cy_card[j])) { + cy_card[j].base_addr = NULL; + free_irq(cy_isa_irq, &cy_card[j]); + iounmap(cy_isa_address); + continue; + } + nboard++; + + printk(KERN_INFO "Cyclom-Y/ISA #%d: 0x%lx-0x%lx, IRQ%d found: " + "%d channels starting from port %d\n", + j + 1, (unsigned long)cy_isa_address, + (unsigned long)(cy_isa_address + (CyISA_Ywin - 1)), + cy_isa_irq, cy_isa_nchan, cy_next_channel); + + for (j = cy_next_channel; + j < cy_next_channel + cy_isa_nchan; j++) + tty_register_device(cy_serial_driver, j, NULL); + cy_next_channel += cy_isa_nchan; + } + return nboard; +#else + return 0; +#endif /* CONFIG_ISA */ +} /* cy_detect_isa */ + +#ifdef CONFIG_PCI +static inline int __devinit cyc_isfwstr(const char *str, unsigned int size) +{ + unsigned int a; + + for (a = 0; a < size && *str; a++, str++) + if (*str & 0x80) + return -EINVAL; + + for (; a < size; a++, str++) + if (*str) + return -EINVAL; + + return 0; +} + +static inline void __devinit cyz_fpga_copy(void __iomem *fpga, const u8 *data, + unsigned int size) +{ + for (; size > 0; size--) { + cy_writel(fpga, *data++); + udelay(10); + } +} + +static void __devinit plx_init(struct pci_dev *pdev, int irq, + struct RUNTIME_9060 __iomem *addr) +{ + /* Reset PLX */ + cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x40000000); + udelay(100L); + cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x40000000); + + /* Reload Config. Registers from EEPROM */ + cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x20000000); + udelay(100L); + cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x20000000); + + /* For some yet unknown reason, once the PLX9060 reloads the EEPROM, + * the IRQ is lost and, thus, we have to re-write it to the PCI config. + * registers. This will remain here until we find a permanent fix. + */ + pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq); +} + +static int __devinit __cyz_load_fw(const struct firmware *fw, + const char *name, const u32 mailbox, void __iomem *base, + void __iomem *fpga) +{ + const void *ptr = fw->data; + const struct zfile_header *h = ptr; + const struct zfile_config *c, *cs; + const struct zfile_block *b, *bs; + unsigned int a, tmp, len = fw->size; +#define BAD_FW KERN_ERR "Bad firmware: " + if (len < sizeof(*h)) { + printk(BAD_FW "too short: %u<%zu\n", len, sizeof(*h)); + return -EINVAL; + } + + cs = ptr + h->config_offset; + bs = ptr + h->block_offset; + + if ((void *)(cs + h->n_config) > ptr + len || + (void *)(bs + h->n_blocks) > ptr + len) { + printk(BAD_FW "too short"); + return -EINVAL; + } + + if (cyc_isfwstr(h->name, sizeof(h->name)) || + cyc_isfwstr(h->date, sizeof(h->date))) { + printk(BAD_FW "bad formatted header string\n"); + return -EINVAL; + } + + if (strncmp(name, h->name, sizeof(h->name))) { + printk(BAD_FW "bad name '%s' (expected '%s')\n", h->name, name); + return -EINVAL; + } + + tmp = 0; + for (c = cs; c < cs + h->n_config; c++) { + for (a = 0; a < c->n_blocks; a++) + if (c->block_list[a] > h->n_blocks) { + printk(BAD_FW "bad block ref number in cfgs\n"); + return -EINVAL; + } + if (c->mailbox == mailbox && c->function == 0) /* 0 is normal */ + tmp++; + } + if (!tmp) { + printk(BAD_FW "nothing appropriate\n"); + return -EINVAL; + } + + for (b = bs; b < bs + h->n_blocks; b++) + if (b->file_offset + b->size > len) { + printk(BAD_FW "bad block data offset\n"); + return -EINVAL; + } + + /* everything is OK, let's seek'n'load it */ + for (c = cs; c < cs + h->n_config; c++) + if (c->mailbox == mailbox && c->function == 0) + break; + + for (a = 0; a < c->n_blocks; a++) { + b = &bs[c->block_list[a]]; + if (b->type == ZBLOCK_FPGA) { + if (fpga != NULL) + cyz_fpga_copy(fpga, ptr + b->file_offset, + b->size); + } else { + if (base != NULL) + memcpy_toio(base + b->ram_offset, + ptr + b->file_offset, b->size); + } + } +#undef BAD_FW + return 0; +} + +static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr, + struct RUNTIME_9060 __iomem *ctl_addr, int irq) +{ + const struct firmware *fw; + struct FIRM_ID __iomem *fid = base_addr + ID_ADDRESS; + struct CUSTOM_REG __iomem *cust = base_addr; + struct ZFW_CTRL __iomem *pt_zfwctrl; + void __iomem *tmp; + u32 mailbox, status, nchan; + unsigned int i; + int retval; + + retval = request_firmware(&fw, "cyzfirm.bin", &pdev->dev); + if (retval) { + dev_err(&pdev->dev, "can't get firmware\n"); + goto err; + } + + /* Check whether the firmware is already loaded and running. If + positive, skip this board */ + if (__cyz_fpga_loaded(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) { + u32 cntval = readl(base_addr + 0x190); + + udelay(100); + if (cntval != readl(base_addr + 0x190)) { + /* FW counter is working, FW is running */ + dev_dbg(&pdev->dev, "Cyclades-Z FW already loaded. " + "Skipping board.\n"); + retval = 0; + goto err_rel; + } + } + + /* start boot */ + cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) & + ~0x00030800UL); + + mailbox = readl(&ctl_addr->mail_box_0); + + if (mailbox == 0 || __cyz_fpga_loaded(ctl_addr)) { + /* stops CPU and set window to beginning of RAM */ + cy_writel(&ctl_addr->loc_addr_base, WIN_CREG); + cy_writel(&cust->cpu_stop, 0); + cy_writel(&ctl_addr->loc_addr_base, WIN_RAM); + udelay(100); + } + + plx_init(pdev, irq, ctl_addr); + + if (mailbox != 0) { + /* load FPGA */ + retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, NULL, + base_addr); + if (retval) + goto err_rel; + if (!__cyz_fpga_loaded(ctl_addr)) { + dev_err(&pdev->dev, "fw upload successful, but fw is " + "not loaded\n"); + goto err_rel; + } + } + + /* stops CPU and set window to beginning of RAM */ + cy_writel(&ctl_addr->loc_addr_base, WIN_CREG); + cy_writel(&cust->cpu_stop, 0); + cy_writel(&ctl_addr->loc_addr_base, WIN_RAM); + udelay(100); + + /* clear memory */ + for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++) + cy_writeb(tmp, 255); + if (mailbox != 0) { + /* set window to last 512K of RAM */ + cy_writel(&ctl_addr->loc_addr_base, WIN_RAM + RAM_SIZE); + for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++) + cy_writeb(tmp, 255); + /* set window to beginning of RAM */ + cy_writel(&ctl_addr->loc_addr_base, WIN_RAM); + } + + retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, base_addr, NULL); + release_firmware(fw); + if (retval) + goto err; + + /* finish boot and start boards */ + cy_writel(&ctl_addr->loc_addr_base, WIN_CREG); + cy_writel(&cust->cpu_start, 0); + cy_writel(&ctl_addr->loc_addr_base, WIN_RAM); + i = 0; + while ((status = readl(&fid->signature)) != ZFIRM_ID && i++ < 40) + msleep(100); + if (status != ZFIRM_ID) { + if (status == ZFIRM_HLT) { + dev_err(&pdev->dev, "you need an external power supply " + "for this number of ports. Firmware halted and " + "board reset.\n"); + retval = -EIO; + goto err; + } + dev_warn(&pdev->dev, "fid->signature = 0x%x... Waiting " + "some more time\n", status); + while ((status = readl(&fid->signature)) != ZFIRM_ID && + i++ < 200) + msleep(100); + if (status != ZFIRM_ID) { + dev_err(&pdev->dev, "Board not started in 20 seconds! " + "Giving up. (fid->signature = 0x%x)\n", + status); + dev_info(&pdev->dev, "*** Warning ***: if you are " + "upgrading the FW, please power cycle the " + "system before loading the new FW to the " + "Cyclades-Z.\n"); + + if (__cyz_fpga_loaded(ctl_addr)) + plx_init(pdev, irq, ctl_addr); + + retval = -EIO; + goto err; + } + dev_dbg(&pdev->dev, "Firmware started after %d seconds.\n", + i / 10); + } + pt_zfwctrl = base_addr + readl(&fid->zfwctrl_addr); + + dev_dbg(&pdev->dev, "fid=> %p, zfwctrl_addr=> %x, npt_zfwctrl=> %p\n", + base_addr + ID_ADDRESS, readl(&fid->zfwctrl_addr), + base_addr + readl(&fid->zfwctrl_addr)); + + nchan = readl(&pt_zfwctrl->board_ctrl.n_channel); + dev_info(&pdev->dev, "Cyclades-Z FW loaded: version = %x, ports = %u\n", + readl(&pt_zfwctrl->board_ctrl.fw_version), nchan); + + if (nchan == 0) { + dev_warn(&pdev->dev, "no Cyclades-Z ports were found. Please " + "check the connection between the Z host card and the " + "serial expanders.\n"); + + if (__cyz_fpga_loaded(ctl_addr)) + plx_init(pdev, irq, ctl_addr); + + dev_info(&pdev->dev, "Null number of ports detected. Board " + "reset.\n"); + retval = 0; + goto err; + } + + cy_writel(&pt_zfwctrl->board_ctrl.op_system, C_OS_LINUX); + cy_writel(&pt_zfwctrl->board_ctrl.dr_version, DRIVER_VERSION); + + /* + Early firmware failed to start looking for commands. + This enables firmware interrupts for those commands. + */ + cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) | + (1 << 17)); + cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) | + 0x00030800UL); + + return nchan; +err_rel: + release_firmware(fw); +err: + return retval; +} + +static int __devinit cy_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + void __iomem *addr0 = NULL, *addr2 = NULL; + char *card_name = NULL; + u32 uninitialized_var(mailbox); + unsigned int device_id, nchan = 0, card_no, i; + unsigned char plx_ver; + int retval, irq; + + retval = pci_enable_device(pdev); + if (retval) { + dev_err(&pdev->dev, "cannot enable device\n"); + goto err; + } + + /* read PCI configuration area */ + irq = pdev->irq; + device_id = pdev->device & ~PCI_DEVICE_ID_MASK; + +#if defined(__alpha__) + if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo) { /* below 1M? */ + dev_err(&pdev->dev, "Cyclom-Y/PCI not supported for low " + "addresses on Alpha systems.\n"); + retval = -EIO; + goto err_dis; + } +#endif + if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Lo) { + dev_err(&pdev->dev, "Cyclades-Z/PCI not supported for low " + "addresses\n"); + retval = -EIO; + goto err_dis; + } + + if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) { + dev_warn(&pdev->dev, "PCI I/O bit incorrectly set. Ignoring " + "it...\n"); + pdev->resource[2].flags &= ~IORESOURCE_IO; + } + + retval = pci_request_regions(pdev, "cyclades"); + if (retval) { + dev_err(&pdev->dev, "failed to reserve resources\n"); + goto err_dis; + } + + retval = -EIO; + if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo || + device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) { + card_name = "Cyclom-Y"; + + addr0 = ioremap_nocache(pci_resource_start(pdev, 0), + CyPCI_Yctl); + if (addr0 == NULL) { + dev_err(&pdev->dev, "can't remap ctl region\n"); + goto err_reg; + } + addr2 = ioremap_nocache(pci_resource_start(pdev, 2), + CyPCI_Ywin); + if (addr2 == NULL) { + dev_err(&pdev->dev, "can't remap base region\n"); + goto err_unmap; + } + + nchan = CyPORTS_PER_CHIP * cyy_init_card(addr2, 1); + if (nchan == 0) { + dev_err(&pdev->dev, "Cyclom-Y PCI host card with no " + "Serial-Modules\n"); + goto err_unmap; + } + } else if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Hi) { + struct RUNTIME_9060 __iomem *ctl_addr; + + ctl_addr = addr0 = ioremap_nocache(pci_resource_start(pdev, 0), + CyPCI_Zctl); + if (addr0 == NULL) { + dev_err(&pdev->dev, "can't remap ctl region\n"); + goto err_reg; + } + + /* Disable interrupts on the PLX before resetting it */ + cy_writew(&ctl_addr->intr_ctrl_stat, + readw(&ctl_addr->intr_ctrl_stat) & ~0x0900); + + plx_init(pdev, irq, addr0); + + mailbox = readl(&ctl_addr->mail_box_0); + + addr2 = ioremap_nocache(pci_resource_start(pdev, 2), + mailbox == ZE_V1 ? CyPCI_Ze_win : CyPCI_Zwin); + if (addr2 == NULL) { + dev_err(&pdev->dev, "can't remap base region\n"); + goto err_unmap; + } + + if (mailbox == ZE_V1) { + card_name = "Cyclades-Ze"; + } else { + card_name = "Cyclades-8Zo"; +#ifdef CY_PCI_DEBUG + if (mailbox == ZO_V1) { + cy_writel(&ctl_addr->loc_addr_base, WIN_CREG); + dev_info(&pdev->dev, "Cyclades-8Zo/PCI: FPGA " + "id %lx, ver %lx\n", (ulong)(0xff & + readl(&((struct CUSTOM_REG *)addr2)-> + fpga_id)), (ulong)(0xff & + readl(&((struct CUSTOM_REG *)addr2)-> + fpga_version))); + cy_writel(&ctl_addr->loc_addr_base, WIN_RAM); + } else { + dev_info(&pdev->dev, "Cyclades-Z/PCI: New " + "Cyclades-Z board. FPGA not loaded\n"); + } +#endif + /* The following clears the firmware id word. This + ensures that the driver will not attempt to talk to + the board until it has been properly initialized. + */ + if ((mailbox == ZO_V1) || (mailbox == ZO_V2)) + cy_writel(addr2 + ID_ADDRESS, 0L); + } + + retval = cyz_load_fw(pdev, addr2, addr0, irq); + if (retval <= 0) + goto err_unmap; + nchan = retval; + } + + if ((cy_next_channel + nchan) > NR_PORTS) { + dev_err(&pdev->dev, "Cyclades-8Zo/PCI found, but no " + "channels are available. Change NR_PORTS in " + "cyclades.c and recompile kernel.\n"); + goto err_unmap; + } + /* fill the next cy_card structure available */ + for (card_no = 0; card_no < NR_CARDS; card_no++) { + if (cy_card[card_no].base_addr == NULL) + break; + } + if (card_no == NR_CARDS) { /* no more cy_cards available */ + dev_err(&pdev->dev, "Cyclades-8Zo/PCI found, but no " + "more cards can be used. Change NR_CARDS in " + "cyclades.c and recompile kernel.\n"); + goto err_unmap; + } + + if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo || + device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) { + /* allocate IRQ */ + retval = request_irq(irq, cyy_interrupt, + IRQF_SHARED, "Cyclom-Y", &cy_card[card_no]); + if (retval) { + dev_err(&pdev->dev, "could not allocate IRQ\n"); + goto err_unmap; + } + cy_card[card_no].num_chips = nchan / CyPORTS_PER_CHIP; + } else { + struct FIRM_ID __iomem *firm_id = addr2 + ID_ADDRESS; + struct ZFW_CTRL __iomem *zfw_ctrl; + + zfw_ctrl = addr2 + (readl(&firm_id->zfwctrl_addr) & 0xfffff); + + cy_card[card_no].hw_ver = mailbox; + cy_card[card_no].num_chips = (unsigned int)-1; + cy_card[card_no].board_ctrl = &zfw_ctrl->board_ctrl; +#ifdef CONFIG_CYZ_INTR + /* allocate IRQ only if board has an IRQ */ + if (irq != 0 && irq != 255) { + retval = request_irq(irq, cyz_interrupt, + IRQF_SHARED, "Cyclades-Z", + &cy_card[card_no]); + if (retval) { + dev_err(&pdev->dev, "could not allocate IRQ\n"); + goto err_unmap; + } + } +#endif /* CONFIG_CYZ_INTR */ + } + + /* set cy_card */ + cy_card[card_no].base_addr = addr2; + cy_card[card_no].ctl_addr.p9050 = addr0; + cy_card[card_no].irq = irq; + cy_card[card_no].bus_index = 1; + cy_card[card_no].first_line = cy_next_channel; + cy_card[card_no].nports = nchan; + retval = cy_init_card(&cy_card[card_no]); + if (retval) + goto err_null; + + pci_set_drvdata(pdev, &cy_card[card_no]); + + if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo || + device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) { + /* enable interrupts in the PCI interface */ + plx_ver = readb(addr2 + CyPLX_VER) & 0x0f; + switch (plx_ver) { + case PLX_9050: + cy_writeb(addr0 + 0x4c, 0x43); + break; + + case PLX_9060: + case PLX_9080: + default: /* Old boards, use PLX_9060 */ + { + struct RUNTIME_9060 __iomem *ctl_addr = addr0; + plx_init(pdev, irq, ctl_addr); + cy_writew(&ctl_addr->intr_ctrl_stat, + readw(&ctl_addr->intr_ctrl_stat) | 0x0900); + break; + } + } + } + + dev_info(&pdev->dev, "%s/PCI #%d found: %d channels starting from " + "port %d.\n", card_name, card_no + 1, nchan, cy_next_channel); + for (i = cy_next_channel; i < cy_next_channel + nchan; i++) + tty_register_device(cy_serial_driver, i, &pdev->dev); + cy_next_channel += nchan; + + return 0; +err_null: + cy_card[card_no].base_addr = NULL; + free_irq(irq, &cy_card[card_no]); +err_unmap: + iounmap(addr0); + if (addr2) + iounmap(addr2); +err_reg: + pci_release_regions(pdev); +err_dis: + pci_disable_device(pdev); +err: + return retval; +} + +static void __devexit cy_pci_remove(struct pci_dev *pdev) +{ + struct cyclades_card *cinfo = pci_get_drvdata(pdev); + unsigned int i; + + /* non-Z with old PLX */ + if (!cy_is_Z(cinfo) && (readb(cinfo->base_addr + CyPLX_VER) & 0x0f) == + PLX_9050) + cy_writeb(cinfo->ctl_addr.p9050 + 0x4c, 0); + else +#ifndef CONFIG_CYZ_INTR + if (!cy_is_Z(cinfo)) +#endif + cy_writew(&cinfo->ctl_addr.p9060->intr_ctrl_stat, + readw(&cinfo->ctl_addr.p9060->intr_ctrl_stat) & + ~0x0900); + + iounmap(cinfo->base_addr); + if (cinfo->ctl_addr.p9050) + iounmap(cinfo->ctl_addr.p9050); + if (cinfo->irq +#ifndef CONFIG_CYZ_INTR + && !cy_is_Z(cinfo) +#endif /* CONFIG_CYZ_INTR */ + ) + free_irq(cinfo->irq, cinfo); + pci_release_regions(pdev); + + cinfo->base_addr = NULL; + for (i = cinfo->first_line; i < cinfo->first_line + + cinfo->nports; i++) + tty_unregister_device(cy_serial_driver, i); + cinfo->nports = 0; + kfree(cinfo->ports); +} + +static struct pci_driver cy_pci_driver = { + .name = "cyclades", + .id_table = cy_pci_dev_id, + .probe = cy_pci_probe, + .remove = __devexit_p(cy_pci_remove) +}; +#endif + +static int cyclades_proc_show(struct seq_file *m, void *v) +{ + struct cyclades_port *info; + unsigned int i, j; + __u32 cur_jifs = jiffies; + + seq_puts(m, "Dev TimeOpen BytesOut IdleOut BytesIn " + "IdleIn Overruns Ldisc\n"); + + /* Output one line for each known port */ + for (i = 0; i < NR_CARDS; i++) + for (j = 0; j < cy_card[i].nports; j++) { + info = &cy_card[i].ports[j]; + + if (info->port.count) { + /* XXX is the ldisc num worth this? */ + struct tty_struct *tty; + struct tty_ldisc *ld; + int num = 0; + tty = tty_port_tty_get(&info->port); + if (tty) { + ld = tty_ldisc_ref(tty); + if (ld) { + num = ld->ops->num; + tty_ldisc_deref(ld); + } + tty_kref_put(tty); + } + seq_printf(m, "%3d %8lu %10lu %8lu " + "%10lu %8lu %9lu %6d\n", info->line, + (cur_jifs - info->idle_stats.in_use) / + HZ, info->idle_stats.xmit_bytes, + (cur_jifs - info->idle_stats.xmit_idle)/ + HZ, info->idle_stats.recv_bytes, + (cur_jifs - info->idle_stats.recv_idle)/ + HZ, info->idle_stats.overruns, + num); + } else + seq_printf(m, "%3d %8lu %10lu %8lu " + "%10lu %8lu %9lu %6ld\n", + info->line, 0L, 0L, 0L, 0L, 0L, 0L, 0L); + } + return 0; +} + +static int cyclades_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, cyclades_proc_show, NULL); +} + +static const struct file_operations cyclades_proc_fops = { + .owner = THIS_MODULE, + .open = cyclades_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* The serial driver boot-time initialization code! + Hardware I/O ports are mapped to character special devices on a + first found, first allocated manner. That is, this code searches + for Cyclom cards in the system. As each is found, it is probed + to discover how many chips (and thus how many ports) are present. + These ports are mapped to the tty ports 32 and upward in monotonic + fashion. If an 8-port card is replaced with a 16-port card, the + port mapping on a following card will shift. + + This approach is different from what is used in the other serial + device driver because the Cyclom is more properly a multiplexer, + not just an aggregation of serial ports on one card. + + If there are more cards with more ports than have been + statically allocated above, a warning is printed and the + extra ports are ignored. + */ + +static const struct tty_operations cy_ops = { + .open = cy_open, + .close = cy_close, + .write = cy_write, + .put_char = cy_put_char, + .flush_chars = cy_flush_chars, + .write_room = cy_write_room, + .chars_in_buffer = cy_chars_in_buffer, + .flush_buffer = cy_flush_buffer, + .ioctl = cy_ioctl, + .throttle = cy_throttle, + .unthrottle = cy_unthrottle, + .set_termios = cy_set_termios, + .stop = cy_stop, + .start = cy_start, + .hangup = cy_hangup, + .break_ctl = cy_break, + .wait_until_sent = cy_wait_until_sent, + .tiocmget = cy_tiocmget, + .tiocmset = cy_tiocmset, + .get_icount = cy_get_icount, + .proc_fops = &cyclades_proc_fops, +}; + +static int __init cy_init(void) +{ + unsigned int nboards; + int retval = -ENOMEM; + + cy_serial_driver = alloc_tty_driver(NR_PORTS); + if (!cy_serial_driver) + goto err; + + printk(KERN_INFO "Cyclades driver " CY_VERSION " (built %s %s)\n", + __DATE__, __TIME__); + + /* Initialize the tty_driver structure */ + + cy_serial_driver->owner = THIS_MODULE; + cy_serial_driver->driver_name = "cyclades"; + cy_serial_driver->name = "ttyC"; + cy_serial_driver->major = CYCLADES_MAJOR; + cy_serial_driver->minor_start = 0; + cy_serial_driver->type = TTY_DRIVER_TYPE_SERIAL; + cy_serial_driver->subtype = SERIAL_TYPE_NORMAL; + cy_serial_driver->init_termios = tty_std_termios; + cy_serial_driver->init_termios.c_cflag = + B9600 | CS8 | CREAD | HUPCL | CLOCAL; + cy_serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; + tty_set_operations(cy_serial_driver, &cy_ops); + + retval = tty_register_driver(cy_serial_driver); + if (retval) { + printk(KERN_ERR "Couldn't register Cyclades serial driver\n"); + goto err_frtty; + } + + /* the code below is responsible to find the boards. Each different + type of board has its own detection routine. If a board is found, + the next cy_card structure available is set by the detection + routine. These functions are responsible for checking the + availability of cy_card and cy_port data structures and updating + the cy_next_channel. */ + + /* look for isa boards */ + nboards = cy_detect_isa(); + +#ifdef CONFIG_PCI + /* look for pci boards */ + retval = pci_register_driver(&cy_pci_driver); + if (retval && !nboards) { + tty_unregister_driver(cy_serial_driver); + goto err_frtty; + } +#endif + + return 0; +err_frtty: + put_tty_driver(cy_serial_driver); +err: + return retval; +} /* cy_init */ + +static void __exit cy_cleanup_module(void) +{ + struct cyclades_card *card; + unsigned int i, e1; + +#ifndef CONFIG_CYZ_INTR + del_timer_sync(&cyz_timerlist); +#endif /* CONFIG_CYZ_INTR */ + + e1 = tty_unregister_driver(cy_serial_driver); + if (e1) + printk(KERN_ERR "failed to unregister Cyclades serial " + "driver(%d)\n", e1); + +#ifdef CONFIG_PCI + pci_unregister_driver(&cy_pci_driver); +#endif + + for (i = 0; i < NR_CARDS; i++) { + card = &cy_card[i]; + if (card->base_addr) { + /* clear interrupt */ + cy_writeb(card->base_addr + Cy_ClrIntr, 0); + iounmap(card->base_addr); + if (card->ctl_addr.p9050) + iounmap(card->ctl_addr.p9050); + if (card->irq +#ifndef CONFIG_CYZ_INTR + && !cy_is_Z(card) +#endif /* CONFIG_CYZ_INTR */ + ) + free_irq(card->irq, card); + for (e1 = card->first_line; e1 < card->first_line + + card->nports; e1++) + tty_unregister_device(cy_serial_driver, e1); + kfree(card->ports); + } + } + + put_tty_driver(cy_serial_driver); +} /* cy_cleanup_module */ + +module_init(cy_init); +module_exit(cy_cleanup_module); + +MODULE_LICENSE("GPL"); +MODULE_VERSION(CY_VERSION); +MODULE_ALIAS_CHARDEV_MAJOR(CYCLADES_MAJOR); +MODULE_FIRMWARE("cyzfirm.bin"); diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig new file mode 100644 index 00000000000..6f2c9809f1f --- /dev/null +++ b/drivers/tty/hvc/Kconfig @@ -0,0 +1,105 @@ +config HVC_DRIVER + bool + help + Generic "hypervisor virtual console" infrastructure for various + hypervisors (pSeries, iSeries, Xen, lguest). + It will automatically be selected if one of the back-end console drivers + is selected. + +config HVC_IRQ + bool + +config HVC_CONSOLE + bool "pSeries Hypervisor Virtual Console support" + depends on PPC_PSERIES + select HVC_DRIVER + select HVC_IRQ + help + pSeries machines when partitioned support a hypervisor virtual + console. This driver allows each pSeries partition to have a console + which is accessed via the HMC. + +config HVC_ISERIES + bool "iSeries Hypervisor Virtual Console support" + depends on PPC_ISERIES + default y + select HVC_DRIVER + select HVC_IRQ + select VIOPATH + help + iSeries machines support a hypervisor virtual console. + +config HVC_RTAS + bool "IBM RTAS Console support" + depends on PPC_RTAS + select HVC_DRIVER + help + IBM Console device driver which makes use of RTAS + +config HVC_BEAT + bool "Toshiba's Beat Hypervisor Console support" + depends on PPC_CELLEB + select HVC_DRIVER + help + Toshiba's Cell Reference Set Beat Console device driver + +config HVC_IUCV + bool "z/VM IUCV Hypervisor console support (VM only)" + depends on S390 + select HVC_DRIVER + select IUCV + default y + help + This driver provides a Hypervisor console (HVC) back-end to access + a Linux (console) terminal via a z/VM IUCV communication path. + +config HVC_XEN + bool "Xen Hypervisor Console support" + depends on XEN + select HVC_DRIVER + select HVC_IRQ + default y + help + Xen virtual console device driver + +config HVC_UDBG + bool "udbg based fake hypervisor console" + depends on PPC && EXPERIMENTAL + select HVC_DRIVER + default n + +config HVC_DCC + bool "ARM JTAG DCC console" + depends on ARM + select HVC_DRIVER + help + This console uses the JTAG DCC on ARM to create a console under the HVC + driver. This console is used through a JTAG only on ARM. If you don't have + a JTAG then you probably don't want this option. + +config HVC_BFIN_JTAG + bool "Blackfin JTAG console" + depends on BLACKFIN + select HVC_DRIVER + help + This console uses the Blackfin JTAG to create a console under the + the HVC driver. If you don't have JTAG, then you probably don't + want this option. + +config HVCS + tristate "IBM Hypervisor Virtual Console Server support" + depends on PPC_PSERIES && HVC_CONSOLE + help + Partitionable IBM Power5 ppc64 machines allow hosting of + firmware virtual consoles from one Linux partition by + another Linux partition. This driver allows console data + from Linux partitions to be accessed through TTY device + interfaces in the device tree of a Linux partition running + this driver. + + To compile this driver as a module, choose M here: the + module will be called hvcs. Additionally, this module + will depend on arch specific APIs exported from hvcserver.ko + which will also be compiled when this driver is built as a + module. + diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile index e6bed5f177f..40a25d93fe5 100644 --- a/drivers/tty/hvc/Makefile +++ b/drivers/tty/hvc/Makefile @@ -9,5 +9,5 @@ obj-$(CONFIG_HVC_IRQ) += hvc_irq.o obj-$(CONFIG_HVC_XEN) += hvc_xen.o obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o obj-$(CONFIG_HVC_UDBG) += hvc_udbg.o +obj-$(CONFIG_HVC_BFIN_JTAG) += hvc_bfin_jtag.o obj-$(CONFIG_HVCS) += hvcs.o -obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o diff --git a/drivers/tty/hvc/hvc_bfin_jtag.c b/drivers/tty/hvc/hvc_bfin_jtag.c new file mode 100644 index 00000000000..31d6cc6a77a --- /dev/null +++ b/drivers/tty/hvc/hvc_bfin_jtag.c @@ -0,0 +1,105 @@ +/* + * Console via Blackfin JTAG Communication + * + * Copyright 2008-2011 Analog Devices Inc. + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Licensed under the GPL-2 or later. + */ + +#include <linux/console.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/moduleparam.h> +#include <linux/types.h> + +#include "hvc_console.h" + +/* See the Debug/Emulation chapter in the HRM */ +#define EMUDOF 0x00000001 /* EMUDAT_OUT full & valid */ +#define EMUDIF 0x00000002 /* EMUDAT_IN full & valid */ +#define EMUDOOVF 0x00000004 /* EMUDAT_OUT overflow */ +#define EMUDIOVF 0x00000008 /* EMUDAT_IN overflow */ + +/* Helper functions to glue the register API to simple C operations */ +static inline uint32_t bfin_write_emudat(uint32_t emudat) +{ + __asm__ __volatile__("emudat = %0;" : : "d"(emudat)); + return emudat; +} + +static inline uint32_t bfin_read_emudat(void) +{ + uint32_t emudat; + __asm__ __volatile__("%0 = emudat;" : "=d"(emudat)); + return emudat; +} + +/* Send data to the host */ +static int hvc_bfin_put_chars(uint32_t vt, const char *buf, int count) +{ + static uint32_t outbound_len; + uint32_t emudat; + int ret; + + if (bfin_read_DBGSTAT() & EMUDOF) + return 0; + + if (!outbound_len) { + outbound_len = count; + bfin_write_emudat(outbound_len); + return 0; + } + + ret = min(outbound_len, (uint32_t)4); + memcpy(&emudat, buf, ret); + bfin_write_emudat(emudat); + outbound_len -= ret; + + return ret; +} + +/* Receive data from the host */ +static int hvc_bfin_get_chars(uint32_t vt, char *buf, int count) +{ + static uint32_t inbound_len; + uint32_t emudat; + int ret; + + if (!(bfin_read_DBGSTAT() & EMUDIF)) + return 0; + emudat = bfin_read_emudat(); + + if (!inbound_len) { + inbound_len = emudat; + return 0; + } + + ret = min(inbound_len, (uint32_t)4); + memcpy(buf, &emudat, ret); + inbound_len -= ret; + + return ret; +} + +/* Glue the HVC layers to the Blackfin layers */ +static const struct hv_ops hvc_bfin_get_put_ops = { + .get_chars = hvc_bfin_get_chars, + .put_chars = hvc_bfin_put_chars, +}; + +static int __init hvc_bfin_console_init(void) +{ + hvc_instantiate(0, 0, &hvc_bfin_get_put_ops); + return 0; +} +console_initcall(hvc_bfin_console_init); + +static int __init hvc_bfin_init(void) +{ + hvc_alloc(0, 0, &hvc_bfin_get_put_ops, 128); + return 0; +} +device_initcall(hvc_bfin_init); diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c index 6470f63deb4..435f6facbc2 100644 --- a/drivers/tty/hvc/hvc_dcc.c +++ b/drivers/tty/hvc/hvc_dcc.c @@ -33,54 +33,29 @@ static inline u32 __dcc_getstatus(void) { u32 __ret; - - asm("mrc p14, 0, %0, c0, c1, 0 @ read comms ctrl reg" + asm volatile("mrc p14, 0, %0, c0, c1, 0 @ read comms ctrl reg" : "=r" (__ret) : : "cc"); return __ret; } -#if defined(CONFIG_CPU_V7) static inline char __dcc_getchar(void) { char __c; - asm("get_wait: mrc p14, 0, pc, c0, c1, 0 \n\ - bne get_wait \n\ - mrc p14, 0, %0, c0, c5, 0 @ read comms data reg" - : "=r" (__c) : : "cc"); - - return __c; -} -#else -static inline char __dcc_getchar(void) -{ - char __c; - - asm("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg" + asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg" : "=r" (__c)); return __c; } -#endif -#if defined(CONFIG_CPU_V7) -static inline void __dcc_putchar(char c) -{ - asm("put_wait: mrc p14, 0, pc, c0, c1, 0 \n\ - bcs put_wait \n\ - mcr p14, 0, %0, c0, c5, 0 " - : : "r" (c) : "cc"); -} -#else static inline void __dcc_putchar(char c) { - asm("mcr p14, 0, %0, c0, c5, 0 @ write a char" + asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char" : /* no output register */ : "r" (c)); } -#endif static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count) { @@ -90,7 +65,7 @@ static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count) while (__dcc_getstatus() & DCC_STATUS_TX) cpu_relax(); - __dcc_putchar((char)(buf[i] & 0xFF)); + __dcc_putchar(buf[i]); } return count; @@ -100,15 +75,11 @@ static int hvc_dcc_get_chars(uint32_t vt, char *buf, int count) { int i; - for (i = 0; i < count; ++i) { - int c = -1; - + for (i = 0; i < count; ++i) if (__dcc_getstatus() & DCC_STATUS_RX) - c = __dcc_getchar(); - if (c < 0) + buf[i] = __dcc_getchar(); + else break; - buf[i] = c; - } return i; } diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index 3740e327f18..c35f1a73bc8 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -177,6 +177,8 @@ static int __init xen_hvc_init(void) } if (xencons_irq < 0) xencons_irq = 0; /* NO_IRQ */ + else + set_irq_noprobe(xencons_irq); hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256); if (IS_ERR(hp)) diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c index 67a75a502c0..8a8d6373f16 100644 --- a/drivers/tty/hvc/hvsi.c +++ b/drivers/tty/hvc/hvsi.c @@ -1095,7 +1095,7 @@ static void hvsi_unthrottle(struct tty_struct *tty) h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE); } -static int hvsi_tiocmget(struct tty_struct *tty, struct file *file) +static int hvsi_tiocmget(struct tty_struct *tty) { struct hvsi_struct *hp = tty->driver_data; @@ -1103,8 +1103,8 @@ static int hvsi_tiocmget(struct tty_struct *tty, struct file *file) return hp->mctrl; } -static int hvsi_tiocmset(struct tty_struct *tty, struct file *file, - unsigned int set, unsigned int clear) +static int hvsi_tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear) { struct hvsi_struct *hp = tty->driver_data; unsigned long flags; diff --git a/drivers/tty/hvc/virtio_console.c b/drivers/tty/hvc/virtio_console.c deleted file mode 100644 index 896a2ced1d2..00000000000 --- a/drivers/tty/hvc/virtio_console.c +++ /dev/null @@ -1,1838 +0,0 @@ -/* - * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation - * Copyright (C) 2009, 2010 Red Hat, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#include <linux/cdev.h> -#include <linux/debugfs.h> -#include <linux/device.h> -#include <linux/err.h> -#include <linux/fs.h> -#include <linux/init.h> -#include <linux/list.h> -#include <linux/poll.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/virtio.h> -#include <linux/virtio_console.h> -#include <linux/wait.h> -#include <linux/workqueue.h> -#include "hvc_console.h" - -/* - * This is a global struct for storing common data for all the devices - * this driver handles. - * - * Mainly, it has a linked list for all the consoles in one place so - * that callbacks from hvc for get_chars(), put_chars() work properly - * across multiple devices and multiple ports per device. - */ -struct ports_driver_data { - /* Used for registering chardevs */ - struct class *class; - - /* Used for exporting per-port information to debugfs */ - struct dentry *debugfs_dir; - - /* List of all the devices we're handling */ - struct list_head portdevs; - - /* Number of devices this driver is handling */ - unsigned int index; - - /* - * This is used to keep track of the number of hvc consoles - * spawned by this driver. This number is given as the first - * argument to hvc_alloc(). To correctly map an initial - * console spawned via hvc_instantiate to the console being - * hooked up via hvc_alloc, we need to pass the same vtermno. - * - * We also just assume the first console being initialised was - * the first one that got used as the initial console. - */ - unsigned int next_vtermno; - - /* All the console devices handled by this driver */ - struct list_head consoles; -}; -static struct ports_driver_data pdrvdata; - -DEFINE_SPINLOCK(pdrvdata_lock); - -/* This struct holds information that's relevant only for console ports */ -struct console { - /* We'll place all consoles in a list in the pdrvdata struct */ - struct list_head list; - - /* The hvc device associated with this console port */ - struct hvc_struct *hvc; - - /* The size of the console */ - struct winsize ws; - - /* - * This number identifies the number that we used to register - * with hvc in hvc_instantiate() and hvc_alloc(); this is the - * number passed on by the hvc callbacks to us to - * differentiate between the other console ports handled by - * this driver - */ - u32 vtermno; -}; - -struct port_buffer { - char *buf; - - /* size of the buffer in *buf above */ - size_t size; - - /* used length of the buffer */ - size_t len; - /* offset in the buf from which to consume data */ - size_t offset; -}; - -/* - * This is a per-device struct that stores data common to all the - * ports for that device (vdev->priv). - */ -struct ports_device { - /* Next portdev in the list, head is in the pdrvdata struct */ - struct list_head list; - - /* - * Workqueue handlers where we process deferred work after - * notification - */ - struct work_struct control_work; - - struct list_head ports; - - /* To protect the list of ports */ - spinlock_t ports_lock; - - /* To protect the vq operations for the control channel */ - spinlock_t cvq_lock; - - /* The current config space is stored here */ - struct virtio_console_config config; - - /* The virtio device we're associated with */ - struct virtio_device *vdev; - - /* - * A couple of virtqueues for the control channel: one for - * guest->host transfers, one for host->guest transfers - */ - struct virtqueue *c_ivq, *c_ovq; - - /* Array of per-port IO virtqueues */ - struct virtqueue **in_vqs, **out_vqs; - - /* Used for numbering devices for sysfs and debugfs */ - unsigned int drv_index; - - /* Major number for this device. Ports will be created as minors. */ - int chr_major; -}; - -/* This struct holds the per-port data */ -struct port { - /* Next port in the list, head is in the ports_device */ - struct list_head list; - - /* Pointer to the parent virtio_console device */ - struct ports_device *portdev; - - /* The current buffer from which data has to be fed to readers */ - struct port_buffer *inbuf; - - /* - * To protect the operations on the in_vq associated with this - * port. Has to be a spinlock because it can be called from - * interrupt context (get_char()). - */ - spinlock_t inbuf_lock; - - /* Protect the operations on the out_vq. */ - spinlock_t outvq_lock; - - /* The IO vqs for this port */ - struct virtqueue *in_vq, *out_vq; - - /* File in the debugfs directory that exposes this port's information */ - struct dentry *debugfs_file; - - /* - * The entries in this struct will be valid if this port is - * hooked up to an hvc console - */ - struct console cons; - - /* Each port associates with a separate char device */ - struct cdev *cdev; - struct device *dev; - - /* Reference-counting to handle port hot-unplugs and file operations */ - struct kref kref; - - /* A waitqueue for poll() or blocking read operations */ - wait_queue_head_t waitqueue; - - /* The 'name' of the port that we expose via sysfs properties */ - char *name; - - /* We can notify apps of host connect / disconnect events via SIGIO */ - struct fasync_struct *async_queue; - - /* The 'id' to identify the port with the Host */ - u32 id; - - bool outvq_full; - - /* Is the host device open */ - bool host_connected; - - /* We should allow only one process to open a port */ - bool guest_connected; -}; - -/* This is the very early arch-specified put chars function. */ -static int (*early_put_chars)(u32, const char *, int); - -static struct port *find_port_by_vtermno(u32 vtermno) -{ - struct port *port; - struct console *cons; - unsigned long flags; - - spin_lock_irqsave(&pdrvdata_lock, flags); - list_for_each_entry(cons, &pdrvdata.consoles, list) { - if (cons->vtermno == vtermno) { - port = container_of(cons, struct port, cons); - goto out; - } - } - port = NULL; -out: - spin_unlock_irqrestore(&pdrvdata_lock, flags); - return port; -} - -static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev, - dev_t dev) -{ - struct port *port; - unsigned long flags; - - spin_lock_irqsave(&portdev->ports_lock, flags); - list_for_each_entry(port, &portdev->ports, list) - if (port->cdev->dev == dev) - goto out; - port = NULL; -out: - spin_unlock_irqrestore(&portdev->ports_lock, flags); - - return port; -} - -static struct port *find_port_by_devt(dev_t dev) -{ - struct ports_device *portdev; - struct port *port; - unsigned long flags; - - spin_lock_irqsave(&pdrvdata_lock, flags); - list_for_each_entry(portdev, &pdrvdata.portdevs, list) { - port = find_port_by_devt_in_portdev(portdev, dev); - if (port) - goto out; - } - port = NULL; -out: - spin_unlock_irqrestore(&pdrvdata_lock, flags); - return port; -} - -static struct port *find_port_by_id(struct ports_device *portdev, u32 id) -{ - struct port *port; - unsigned long flags; - - spin_lock_irqsave(&portdev->ports_lock, flags); - list_for_each_entry(port, &portdev->ports, list) - if (port->id == id) - goto out; - port = NULL; -out: - spin_unlock_irqrestore(&portdev->ports_lock, flags); - - return port; -} - -static struct port *find_port_by_vq(struct ports_device *portdev, - struct virtqueue *vq) -{ - struct port *port; - unsigned long flags; - - spin_lock_irqsave(&portdev->ports_lock, flags); - list_for_each_entry(port, &portdev->ports, list) - if (port->in_vq == vq || port->out_vq == vq) - goto out; - port = NULL; -out: - spin_unlock_irqrestore(&portdev->ports_lock, flags); - return port; -} - -static bool is_console_port(struct port *port) -{ - if (port->cons.hvc) - return true; - return false; -} - -static inline bool use_multiport(struct ports_device *portdev) -{ - /* - * This condition can be true when put_chars is called from - * early_init - */ - if (!portdev->vdev) - return 0; - return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); -} - -static void free_buf(struct port_buffer *buf) -{ - kfree(buf->buf); - kfree(buf); -} - -static struct port_buffer *alloc_buf(size_t buf_size) -{ - struct port_buffer *buf; - - buf = kmalloc(sizeof(*buf), GFP_KERNEL); - if (!buf) - goto fail; - buf->buf = kzalloc(buf_size, GFP_KERNEL); - if (!buf->buf) - goto free_buf; - buf->len = 0; - buf->offset = 0; - buf->size = buf_size; - return buf; - -free_buf: - kfree(buf); -fail: - return NULL; -} - -/* Callers should take appropriate locks */ -static void *get_inbuf(struct port *port) -{ - struct port_buffer *buf; - struct virtqueue *vq; - unsigned int len; - - vq = port->in_vq; - buf = virtqueue_get_buf(vq, &len); - if (buf) { - buf->len = len; - buf->offset = 0; - } - return buf; -} - -/* - * Create a scatter-gather list representing our input buffer and put - * it in the queue. - * - * Callers should take appropriate locks. - */ -static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) -{ - struct scatterlist sg[1]; - int ret; - - sg_init_one(sg, buf->buf, buf->size); - - ret = virtqueue_add_buf(vq, sg, 0, 1, buf); - virtqueue_kick(vq); - return ret; -} - -/* Discard any unread data this port has. Callers lockers. */ -static void discard_port_data(struct port *port) -{ - struct port_buffer *buf; - struct virtqueue *vq; - unsigned int len; - int ret; - - vq = port->in_vq; - if (port->inbuf) - buf = port->inbuf; - else - buf = virtqueue_get_buf(vq, &len); - - ret = 0; - while (buf) { - if (add_inbuf(vq, buf) < 0) { - ret++; - free_buf(buf); - } - buf = virtqueue_get_buf(vq, &len); - } - port->inbuf = NULL; - if (ret) - dev_warn(port->dev, "Errors adding %d buffers back to vq\n", - ret); -} - -static bool port_has_data(struct port *port) -{ - unsigned long flags; - bool ret; - - spin_lock_irqsave(&port->inbuf_lock, flags); - if (port->inbuf) { - ret = true; - goto out; - } - port->inbuf = get_inbuf(port); - if (port->inbuf) { - ret = true; - goto out; - } - ret = false; -out: - spin_unlock_irqrestore(&port->inbuf_lock, flags); - return ret; -} - -static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, - unsigned int event, unsigned int value) -{ - struct scatterlist sg[1]; - struct virtio_console_control cpkt; - struct virtqueue *vq; - unsigned int len; - - if (!use_multiport(portdev)) - return 0; - - cpkt.id = port_id; - cpkt.event = event; - cpkt.value = value; - - vq = portdev->c_ovq; - - sg_init_one(sg, &cpkt, sizeof(cpkt)); - if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) { - virtqueue_kick(vq); - while (!virtqueue_get_buf(vq, &len)) - cpu_relax(); - } - return 0; -} - -static ssize_t send_control_msg(struct port *port, unsigned int event, - unsigned int value) -{ - /* Did the port get unplugged before userspace closed it? */ - if (port->portdev) - return __send_control_msg(port->portdev, port->id, event, value); - return 0; -} - -/* Callers must take the port->outvq_lock */ -static void reclaim_consumed_buffers(struct port *port) -{ - void *buf; - unsigned int len; - - while ((buf = virtqueue_get_buf(port->out_vq, &len))) { - kfree(buf); - port->outvq_full = false; - } -} - -static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, - bool nonblock) -{ - struct scatterlist sg[1]; - struct virtqueue *out_vq; - ssize_t ret; - unsigned long flags; - unsigned int len; - - out_vq = port->out_vq; - - spin_lock_irqsave(&port->outvq_lock, flags); - - reclaim_consumed_buffers(port); - - sg_init_one(sg, in_buf, in_count); - ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf); - - /* Tell Host to go! */ - virtqueue_kick(out_vq); - - if (ret < 0) { - in_count = 0; - goto done; - } - - if (ret == 0) - port->outvq_full = true; - - if (nonblock) - goto done; - - /* - * Wait till the host acknowledges it pushed out the data we - * sent. This is done for data from the hvc_console; the tty - * operations are performed with spinlocks held so we can't - * sleep here. An alternative would be to copy the data to a - * buffer and relax the spinning requirement. The downside is - * we need to kmalloc a GFP_ATOMIC buffer each time the - * console driver writes something out. - */ - while (!virtqueue_get_buf(out_vq, &len)) - cpu_relax(); -done: - spin_unlock_irqrestore(&port->outvq_lock, flags); - /* - * We're expected to return the amount of data we wrote -- all - * of it - */ - return in_count; -} - -/* - * Give out the data that's requested from the buffer that we have - * queued up. - */ -static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, - bool to_user) -{ - struct port_buffer *buf; - unsigned long flags; - - if (!out_count || !port_has_data(port)) - return 0; - - buf = port->inbuf; - out_count = min(out_count, buf->len - buf->offset); - - if (to_user) { - ssize_t ret; - - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); - if (ret) - return -EFAULT; - } else { - memcpy(out_buf, buf->buf + buf->offset, out_count); - } - - buf->offset += out_count; - - if (buf->offset == buf->len) { - /* - * We're done using all the data in this buffer. - * Re-queue so that the Host can send us more data. - */ - spin_lock_irqsave(&port->inbuf_lock, flags); - port->inbuf = NULL; - - if (add_inbuf(port->in_vq, buf) < 0) - dev_warn(port->dev, "failed add_buf\n"); - - spin_unlock_irqrestore(&port->inbuf_lock, flags); - } - /* Return the number of bytes actually copied */ - return out_count; -} - -/* The condition that must be true for polling to end */ -static bool will_read_block(struct port *port) -{ - if (!port->guest_connected) { - /* Port got hot-unplugged. Let's exit. */ - return false; - } - return !port_has_data(port) && port->host_connected; -} - -static bool will_write_block(struct port *port) -{ - bool ret; - - if (!port->guest_connected) { - /* Port got hot-unplugged. Let's exit. */ - return false; - } - if (!port->host_connected) - return true; - - spin_lock_irq(&port->outvq_lock); - /* - * Check if the Host has consumed any buffers since we last - * sent data (this is only applicable for nonblocking ports). - */ - reclaim_consumed_buffers(port); - ret = port->outvq_full; - spin_unlock_irq(&port->outvq_lock); - - return ret; -} - -static ssize_t port_fops_read(struct file *filp, char __user *ubuf, - size_t count, loff_t *offp) -{ - struct port *port; - ssize_t ret; - - port = filp->private_data; - - if (!port_has_data(port)) { - /* - * If nothing's connected on the host just return 0 in - * case of list_empty; this tells the userspace app - * that there's no connection - */ - if (!port->host_connected) - return 0; - if (filp->f_flags & O_NONBLOCK) - return -EAGAIN; - - ret = wait_event_interruptible(port->waitqueue, - !will_read_block(port)); - if (ret < 0) - return ret; - } - /* Port got hot-unplugged. */ - if (!port->guest_connected) - return -ENODEV; - /* - * We could've received a disconnection message while we were - * waiting for more data. - * - * This check is not clubbed in the if() statement above as we - * might receive some data as well as the host could get - * disconnected after we got woken up from our wait. So we - * really want to give off whatever data we have and only then - * check for host_connected. - */ - if (!port_has_data(port) && !port->host_connected) - return 0; - - return fill_readbuf(port, ubuf, count, true); -} - -static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, - size_t count, loff_t *offp) -{ - struct port *port; - char *buf; - ssize_t ret; - bool nonblock; - - /* Userspace could be out to fool us */ - if (!count) - return 0; - - port = filp->private_data; - - nonblock = filp->f_flags & O_NONBLOCK; - - if (will_write_block(port)) { - if (nonblock) - return -EAGAIN; - - ret = wait_event_interruptible(port->waitqueue, - !will_write_block(port)); - if (ret < 0) - return ret; - } - /* Port got hot-unplugged. */ - if (!port->guest_connected) - return -ENODEV; - - count = min((size_t)(32 * 1024), count); - - buf = kmalloc(count, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - ret = copy_from_user(buf, ubuf, count); - if (ret) { - ret = -EFAULT; - goto free_buf; - } - - /* - * We now ask send_buf() to not spin for generic ports -- we - * can re-use the same code path that non-blocking file - * descriptors take for blocking file descriptors since the - * wait is already done and we're certain the write will go - * through to the host. - */ - nonblock = true; - ret = send_buf(port, buf, count, nonblock); - - if (nonblock && ret > 0) - goto out; - -free_buf: - kfree(buf); -out: - return ret; -} - -static unsigned int port_fops_poll(struct file *filp, poll_table *wait) -{ - struct port *port; - unsigned int ret; - - port = filp->private_data; - poll_wait(filp, &port->waitqueue, wait); - - if (!port->guest_connected) { - /* Port got unplugged */ - return POLLHUP; - } - ret = 0; - if (!will_read_block(port)) - ret |= POLLIN | POLLRDNORM; - if (!will_write_block(port)) - ret |= POLLOUT; - if (!port->host_connected) - ret |= POLLHUP; - - return ret; -} - -static void remove_port(struct kref *kref); - -static int port_fops_release(struct inode *inode, struct file *filp) -{ - struct port *port; - - port = filp->private_data; - - /* Notify host of port being closed */ - send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); - - spin_lock_irq(&port->inbuf_lock); - port->guest_connected = false; - - discard_port_data(port); - - spin_unlock_irq(&port->inbuf_lock); - - spin_lock_irq(&port->outvq_lock); - reclaim_consumed_buffers(port); - spin_unlock_irq(&port->outvq_lock); - - /* - * Locks aren't necessary here as a port can't be opened after - * unplug, and if a port isn't unplugged, a kref would already - * exist for the port. Plus, taking ports_lock here would - * create a dependency on other locks taken by functions - * inside remove_port if we're the last holder of the port, - * creating many problems. - */ - kref_put(&port->kref, remove_port); - - return 0; -} - -static int port_fops_open(struct inode *inode, struct file *filp) -{ - struct cdev *cdev = inode->i_cdev; - struct port *port; - int ret; - - port = find_port_by_devt(cdev->dev); - filp->private_data = port; - - /* Prevent against a port getting hot-unplugged at the same time */ - spin_lock_irq(&port->portdev->ports_lock); - kref_get(&port->kref); - spin_unlock_irq(&port->portdev->ports_lock); - - /* - * Don't allow opening of console port devices -- that's done - * via /dev/hvc - */ - if (is_console_port(port)) { - ret = -ENXIO; - goto out; - } - - /* Allow only one process to open a particular port at a time */ - spin_lock_irq(&port->inbuf_lock); - if (port->guest_connected) { - spin_unlock_irq(&port->inbuf_lock); - ret = -EMFILE; - goto out; - } - - port->guest_connected = true; - spin_unlock_irq(&port->inbuf_lock); - - spin_lock_irq(&port->outvq_lock); - /* - * There might be a chance that we missed reclaiming a few - * buffers in the window of the port getting previously closed - * and opening now. - */ - reclaim_consumed_buffers(port); - spin_unlock_irq(&port->outvq_lock); - - nonseekable_open(inode, filp); - - /* Notify host of port being opened */ - send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); - - return 0; -out: - kref_put(&port->kref, remove_port); - return ret; -} - -static int port_fops_fasync(int fd, struct file *filp, int mode) -{ - struct port *port; - - port = filp->private_data; - return fasync_helper(fd, filp, mode, &port->async_queue); -} - -/* - * The file operations that we support: programs in the guest can open - * a console device, read from it, write to it, poll for data and - * close it. The devices are at - * /dev/vport<device number>p<port number> - */ -static const struct file_operations port_fops = { - .owner = THIS_MODULE, - .open = port_fops_open, - .read = port_fops_read, - .write = port_fops_write, - .poll = port_fops_poll, - .release = port_fops_release, - .fasync = port_fops_fasync, - .llseek = no_llseek, -}; - -/* - * The put_chars() callback is pretty straightforward. - * - * We turn the characters into a scatter-gather list, add it to the - * output queue and then kick the Host. Then we sit here waiting for - * it to finish: inefficient in theory, but in practice - * implementations will do it immediately (lguest's Launcher does). - */ -static int put_chars(u32 vtermno, const char *buf, int count) -{ - struct port *port; - - if (unlikely(early_put_chars)) - return early_put_chars(vtermno, buf, count); - - port = find_port_by_vtermno(vtermno); - if (!port) - return -EPIPE; - - return send_buf(port, (void *)buf, count, false); -} - -/* - * get_chars() is the callback from the hvc_console infrastructure - * when an interrupt is received. - * - * We call out to fill_readbuf that gets us the required data from the - * buffers that are queued up. - */ -static int get_chars(u32 vtermno, char *buf, int count) -{ - struct port *port; - - /* If we've not set up the port yet, we have no input to give. */ - if (unlikely(early_put_chars)) - return 0; - - port = find_port_by_vtermno(vtermno); - if (!port) - return -EPIPE; - - /* If we don't have an input queue yet, we can't get input. */ - BUG_ON(!port->in_vq); - - return fill_readbuf(port, buf, count, false); -} - -static void resize_console(struct port *port) -{ - struct virtio_device *vdev; - - /* The port could have been hot-unplugged */ - if (!port || !is_console_port(port)) - return; - - vdev = port->portdev->vdev; - if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) - hvc_resize(port->cons.hvc, port->cons.ws); -} - -/* We set the configuration at this point, since we now have a tty */ -static int notifier_add_vio(struct hvc_struct *hp, int data) -{ - struct port *port; - - port = find_port_by_vtermno(hp->vtermno); - if (!port) - return -EINVAL; - - hp->irq_requested = 1; - resize_console(port); - - return 0; -} - -static void notifier_del_vio(struct hvc_struct *hp, int data) -{ - hp->irq_requested = 0; -} - -/* The operations for console ports. */ -static const struct hv_ops hv_ops = { - .get_chars = get_chars, - .put_chars = put_chars, - .notifier_add = notifier_add_vio, - .notifier_del = notifier_del_vio, - .notifier_hangup = notifier_del_vio, -}; - -/* - * Console drivers are initialized very early so boot messages can go - * out, so we do things slightly differently from the generic virtio - * initialization of the net and block drivers. - * - * At this stage, the console is output-only. It's too early to set - * up a virtqueue, so we let the drivers do some boutique early-output - * thing. - */ -int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) -{ - early_put_chars = put_chars; - return hvc_instantiate(0, 0, &hv_ops); -} - -int init_port_console(struct port *port) -{ - int ret; - - /* - * The Host's telling us this port is a console port. Hook it - * up with an hvc console. - * - * To set up and manage our virtual console, we call - * hvc_alloc(). - * - * The first argument of hvc_alloc() is the virtual console - * number. The second argument is the parameter for the - * notification mechanism (like irq number). We currently - * leave this as zero, virtqueues have implicit notifications. - * - * The third argument is a "struct hv_ops" containing the - * put_chars() get_chars(), notifier_add() and notifier_del() - * pointers. The final argument is the output buffer size: we - * can do any size, so we put PAGE_SIZE here. - */ - port->cons.vtermno = pdrvdata.next_vtermno; - - port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE); - if (IS_ERR(port->cons.hvc)) { - ret = PTR_ERR(port->cons.hvc); - dev_err(port->dev, - "error %d allocating hvc for port\n", ret); - port->cons.hvc = NULL; - return ret; - } - spin_lock_irq(&pdrvdata_lock); - pdrvdata.next_vtermno++; - list_add_tail(&port->cons.list, &pdrvdata.consoles); - spin_unlock_irq(&pdrvdata_lock); - port->guest_connected = true; - - /* - * Start using the new console output if this is the first - * console to come up. - */ - if (early_put_chars) - early_put_chars = NULL; - - /* Notify host of port being opened */ - send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); - - return 0; -} - -static ssize_t show_port_name(struct device *dev, - struct device_attribute *attr, char *buffer) -{ - struct port *port; - - port = dev_get_drvdata(dev); - - return sprintf(buffer, "%s\n", port->name); -} - -static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL); - -static struct attribute *port_sysfs_entries[] = { - &dev_attr_name.attr, - NULL -}; - -static struct attribute_group port_attribute_group = { - .name = NULL, /* put in device directory */ - .attrs = port_sysfs_entries, -}; - -static int debugfs_open(struct inode *inode, struct file *filp) -{ - filp->private_data = inode->i_private; - return 0; -} - -static ssize_t debugfs_read(struct file *filp, char __user *ubuf, - size_t count, loff_t *offp) -{ - struct port *port; - char *buf; - ssize_t ret, out_offset, out_count; - - out_count = 1024; - buf = kmalloc(out_count, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - port = filp->private_data; - out_offset = 0; - out_offset += snprintf(buf + out_offset, out_count, - "name: %s\n", port->name ? port->name : ""); - out_offset += snprintf(buf + out_offset, out_count - out_offset, - "guest_connected: %d\n", port->guest_connected); - out_offset += snprintf(buf + out_offset, out_count - out_offset, - "host_connected: %d\n", port->host_connected); - out_offset += snprintf(buf + out_offset, out_count - out_offset, - "outvq_full: %d\n", port->outvq_full); - out_offset += snprintf(buf + out_offset, out_count - out_offset, - "is_console: %s\n", - is_console_port(port) ? "yes" : "no"); - out_offset += snprintf(buf + out_offset, out_count - out_offset, - "console_vtermno: %u\n", port->cons.vtermno); - - ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); - kfree(buf); - return ret; -} - -static const struct file_operations port_debugfs_ops = { - .owner = THIS_MODULE, - .open = debugfs_open, - .read = debugfs_read, -}; - -static void set_console_size(struct port *port, u16 rows, u16 cols) -{ - if (!port || !is_console_port(port)) - return; - - port->cons.ws.ws_row = rows; - port->cons.ws.ws_col = cols; -} - -static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) -{ - struct port_buffer *buf; - unsigned int nr_added_bufs; - int ret; - - nr_added_bufs = 0; - do { - buf = alloc_buf(PAGE_SIZE); - if (!buf) - break; - - spin_lock_irq(lock); - ret = add_inbuf(vq, buf); - if (ret < 0) { - spin_unlock_irq(lock); - free_buf(buf); - break; - } - nr_added_bufs++; - spin_unlock_irq(lock); - } while (ret > 0); - - return nr_added_bufs; -} - -static void send_sigio_to_port(struct port *port) -{ - if (port->async_queue && port->guest_connected) - kill_fasync(&port->async_queue, SIGIO, POLL_OUT); -} - -static int add_port(struct ports_device *portdev, u32 id) -{ - char debugfs_name[16]; - struct port *port; - struct port_buffer *buf; - dev_t devt; - unsigned int nr_added_bufs; - int err; - - port = kmalloc(sizeof(*port), GFP_KERNEL); - if (!port) { - err = -ENOMEM; - goto fail; - } - kref_init(&port->kref); - - port->portdev = portdev; - port->id = id; - - port->name = NULL; - port->inbuf = NULL; - port->cons.hvc = NULL; - port->async_queue = NULL; - - port->cons.ws.ws_row = port->cons.ws.ws_col = 0; - - port->host_connected = port->guest_connected = false; - - port->outvq_full = false; - - port->in_vq = portdev->in_vqs[port->id]; - port->out_vq = portdev->out_vqs[port->id]; - - port->cdev = cdev_alloc(); - if (!port->cdev) { - dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n"); - err = -ENOMEM; - goto free_port; - } - port->cdev->ops = &port_fops; - - devt = MKDEV(portdev->chr_major, id); - err = cdev_add(port->cdev, devt, 1); - if (err < 0) { - dev_err(&port->portdev->vdev->dev, - "Error %d adding cdev for port %u\n", err, id); - goto free_cdev; - } - port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, - devt, port, "vport%up%u", - port->portdev->drv_index, id); - if (IS_ERR(port->dev)) { - err = PTR_ERR(port->dev); - dev_err(&port->portdev->vdev->dev, - "Error %d creating device for port %u\n", - err, id); - goto free_cdev; - } - - spin_lock_init(&port->inbuf_lock); - spin_lock_init(&port->outvq_lock); - init_waitqueue_head(&port->waitqueue); - - /* Fill the in_vq with buffers so the host can send us data. */ - nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); - if (!nr_added_bufs) { - dev_err(port->dev, "Error allocating inbufs\n"); - err = -ENOMEM; - goto free_device; - } - - /* - * If we're not using multiport support, this has to be a console port - */ - if (!use_multiport(port->portdev)) { - err = init_port_console(port); - if (err) - goto free_inbufs; - } - - spin_lock_irq(&portdev->ports_lock); - list_add_tail(&port->list, &port->portdev->ports); - spin_unlock_irq(&portdev->ports_lock); - - /* - * Tell the Host we're set so that it can send us various - * configuration parameters for this port (eg, port name, - * caching, whether this is a console port, etc.) - */ - send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); - - if (pdrvdata.debugfs_dir) { - /* - * Finally, create the debugfs file that we can use to - * inspect a port's state at any time - */ - sprintf(debugfs_name, "vport%up%u", - port->portdev->drv_index, id); - port->debugfs_file = debugfs_create_file(debugfs_name, 0444, - pdrvdata.debugfs_dir, - port, - &port_debugfs_ops); - } - return 0; - -free_inbufs: - while ((buf = virtqueue_detach_unused_buf(port->in_vq))) - free_buf(buf); -free_device: - device_destroy(pdrvdata.class, port->dev->devt); -free_cdev: - cdev_del(port->cdev); -free_port: - kfree(port); -fail: - /* The host might want to notify management sw about port add failure */ - __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0); - return err; -} - -/* No users remain, remove all port-specific data. */ -static void remove_port(struct kref *kref) -{ - struct port *port; - - port = container_of(kref, struct port, kref); - - sysfs_remove_group(&port->dev->kobj, &port_attribute_group); - device_destroy(pdrvdata.class, port->dev->devt); - cdev_del(port->cdev); - - kfree(port->name); - - debugfs_remove(port->debugfs_file); - - kfree(port); -} - -/* - * Port got unplugged. Remove port from portdev's list and drop the - * kref reference. If no userspace has this port opened, it will - * result in immediate removal the port. - */ -static void unplug_port(struct port *port) -{ - struct port_buffer *buf; - - spin_lock_irq(&port->portdev->ports_lock); - list_del(&port->list); - spin_unlock_irq(&port->portdev->ports_lock); - - if (port->guest_connected) { - port->guest_connected = false; - port->host_connected = false; - wake_up_interruptible(&port->waitqueue); - - /* Let the app know the port is going down. */ - send_sigio_to_port(port); - } - - if (is_console_port(port)) { - spin_lock_irq(&pdrvdata_lock); - list_del(&port->cons.list); - spin_unlock_irq(&pdrvdata_lock); -#if 0 - /* - * hvc_remove() not called as removing one hvc port - * results in other hvc ports getting frozen. - * - * Once this is resolved in hvc, this functionality - * will be enabled. Till that is done, the -EPIPE - * return from get_chars() above will help - * hvc_console.c to clean up on ports we remove here. - */ - hvc_remove(port->cons.hvc); -#endif - } - - /* Remove unused data this port might have received. */ - discard_port_data(port); - - reclaim_consumed_buffers(port); - - /* Remove buffers we queued up for the Host to send us data in. */ - while ((buf = virtqueue_detach_unused_buf(port->in_vq))) - free_buf(buf); - - /* - * We should just assume the device itself has gone off -- - * else a close on an open port later will try to send out a - * control message. - */ - port->portdev = NULL; - - /* - * Locks around here are not necessary - a port can't be - * opened after we removed the port struct from ports_list - * above. - */ - kref_put(&port->kref, remove_port); -} - -/* Any private messages that the Host and Guest want to share */ -static void handle_control_message(struct ports_device *portdev, - struct port_buffer *buf) -{ - struct virtio_console_control *cpkt; - struct port *port; - size_t name_size; - int err; - - cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); - - port = find_port_by_id(portdev, cpkt->id); - if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) { - /* No valid header at start of buffer. Drop it. */ - dev_dbg(&portdev->vdev->dev, - "Invalid index %u in control packet\n", cpkt->id); - return; - } - - switch (cpkt->event) { - case VIRTIO_CONSOLE_PORT_ADD: - if (port) { - dev_dbg(&portdev->vdev->dev, - "Port %u already added\n", port->id); - send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); - break; - } - if (cpkt->id >= portdev->config.max_nr_ports) { - dev_warn(&portdev->vdev->dev, - "Request for adding port with out-of-bound id %u, max. supported id: %u\n", - cpkt->id, portdev->config.max_nr_ports - 1); - break; - } - add_port(portdev, cpkt->id); - break; - case VIRTIO_CONSOLE_PORT_REMOVE: - unplug_port(port); - break; - case VIRTIO_CONSOLE_CONSOLE_PORT: - if (!cpkt->value) - break; - if (is_console_port(port)) - break; - - init_port_console(port); - /* - * Could remove the port here in case init fails - but - * have to notify the host first. - */ - break; - case VIRTIO_CONSOLE_RESIZE: { - struct { - __u16 rows; - __u16 cols; - } size; - - if (!is_console_port(port)) - break; - - memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), - sizeof(size)); - set_console_size(port, size.rows, size.cols); - - port->cons.hvc->irq_requested = 1; - resize_console(port); - break; - } - case VIRTIO_CONSOLE_PORT_OPEN: - port->host_connected = cpkt->value; - wake_up_interruptible(&port->waitqueue); - /* - * If the host port got closed and the host had any - * unconsumed buffers, we'll be able to reclaim them - * now. - */ - spin_lock_irq(&port->outvq_lock); - reclaim_consumed_buffers(port); - spin_unlock_irq(&port->outvq_lock); - - /* - * If the guest is connected, it'll be interested in - * knowing the host connection state changed. - */ - send_sigio_to_port(port); - break; - case VIRTIO_CONSOLE_PORT_NAME: - /* - * Skip the size of the header and the cpkt to get the size - * of the name that was sent - */ - name_size = buf->len - buf->offset - sizeof(*cpkt) + 1; - - port->name = kmalloc(name_size, GFP_KERNEL); - if (!port->name) { - dev_err(port->dev, - "Not enough space to store port name\n"); - break; - } - strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt), - name_size - 1); - port->name[name_size - 1] = 0; - - /* - * Since we only have one sysfs attribute, 'name', - * create it only if we have a name for the port. - */ - err = sysfs_create_group(&port->dev->kobj, - &port_attribute_group); - if (err) { - dev_err(port->dev, - "Error %d creating sysfs device attributes\n", - err); - } else { - /* - * Generate a udev event so that appropriate - * symlinks can be created based on udev - * rules. - */ - kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); - } - break; - } -} - -static void control_work_handler(struct work_struct *work) -{ - struct ports_device *portdev; - struct virtqueue *vq; - struct port_buffer *buf; - unsigned int len; - - portdev = container_of(work, struct ports_device, control_work); - vq = portdev->c_ivq; - - spin_lock(&portdev->cvq_lock); - while ((buf = virtqueue_get_buf(vq, &len))) { - spin_unlock(&portdev->cvq_lock); - - buf->len = len; - buf->offset = 0; - - handle_control_message(portdev, buf); - - spin_lock(&portdev->cvq_lock); - if (add_inbuf(portdev->c_ivq, buf) < 0) { - dev_warn(&portdev->vdev->dev, - "Error adding buffer to queue\n"); - free_buf(buf); - } - } - spin_unlock(&portdev->cvq_lock); -} - -static void in_intr(struct virtqueue *vq) -{ - struct port *port; - unsigned long flags; - - port = find_port_by_vq(vq->vdev->priv, vq); - if (!port) - return; - - spin_lock_irqsave(&port->inbuf_lock, flags); - if (!port->inbuf) - port->inbuf = get_inbuf(port); - - /* - * Don't queue up data when port is closed. This condition - * can be reached when a console port is not yet connected (no - * tty is spawned) and the host sends out data to console - * ports. For generic serial ports, the host won't - * (shouldn't) send data till the guest is connected. - */ - if (!port->guest_connected) - discard_port_data(port); - - spin_unlock_irqrestore(&port->inbuf_lock, flags); - - wake_up_interruptible(&port->waitqueue); - - /* Send a SIGIO indicating new data in case the process asked for it */ - send_sigio_to_port(port); - - if (is_console_port(port) && hvc_poll(port->cons.hvc)) - hvc_kick(); -} - -static void control_intr(struct virtqueue *vq) -{ - struct ports_device *portdev; - - portdev = vq->vdev->priv; - schedule_work(&portdev->control_work); -} - -static void config_intr(struct virtio_device *vdev) -{ - struct ports_device *portdev; - - portdev = vdev->priv; - - if (!use_multiport(portdev)) { - struct port *port; - u16 rows, cols; - - vdev->config->get(vdev, - offsetof(struct virtio_console_config, cols), - &cols, sizeof(u16)); - vdev->config->get(vdev, - offsetof(struct virtio_console_config, rows), - &rows, sizeof(u16)); - - port = find_port_by_id(portdev, 0); - set_console_size(port, rows, cols); - - /* - * We'll use this way of resizing only for legacy - * support. For newer userspace - * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages - * to indicate console size changes so that it can be - * done per-port. - */ - resize_console(port); - } -} - -static int init_vqs(struct ports_device *portdev) -{ - vq_callback_t **io_callbacks; - char **io_names; - struct virtqueue **vqs; - u32 i, j, nr_ports, nr_queues; - int err; - - nr_ports = portdev->config.max_nr_ports; - nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; - - vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); - io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); - io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); - portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), - GFP_KERNEL); - portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), - GFP_KERNEL); - if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || - !portdev->out_vqs) { - err = -ENOMEM; - goto free; - } - - /* - * For backward compat (newer host but older guest), the host - * spawns a console port first and also inits the vqs for port - * 0 before others. - */ - j = 0; - io_callbacks[j] = in_intr; - io_callbacks[j + 1] = NULL; - io_names[j] = "input"; - io_names[j + 1] = "output"; - j += 2; - - if (use_multiport(portdev)) { - io_callbacks[j] = control_intr; - io_callbacks[j + 1] = NULL; - io_names[j] = "control-i"; - io_names[j + 1] = "control-o"; - - for (i = 1; i < nr_ports; i++) { - j += 2; - io_callbacks[j] = in_intr; - io_callbacks[j + 1] = NULL; - io_names[j] = "input"; - io_names[j + 1] = "output"; - } - } - /* Find the queues. */ - err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, - io_callbacks, - (const char **)io_names); - if (err) - goto free; - - j = 0; - portdev->in_vqs[0] = vqs[0]; - portdev->out_vqs[0] = vqs[1]; - j += 2; - if (use_multiport(portdev)) { - portdev->c_ivq = vqs[j]; - portdev->c_ovq = vqs[j + 1]; - - for (i = 1; i < nr_ports; i++) { - j += 2; - portdev->in_vqs[i] = vqs[j]; - portdev->out_vqs[i] = vqs[j + 1]; - } - } - kfree(io_names); - kfree(io_callbacks); - kfree(vqs); - - return 0; - -free: - kfree(portdev->out_vqs); - kfree(portdev->in_vqs); - kfree(io_names); - kfree(io_callbacks); - kfree(vqs); - - return err; -} - -static const struct file_operations portdev_fops = { - .owner = THIS_MODULE, -}; - -/* - * Once we're further in boot, we get probed like any other virtio - * device. - * - * If the host also supports multiple console ports, we check the - * config space to see how many ports the host has spawned. We - * initialize each port found. - */ -static int __devinit virtcons_probe(struct virtio_device *vdev) -{ - struct ports_device *portdev; - int err; - bool multiport; - - portdev = kmalloc(sizeof(*portdev), GFP_KERNEL); - if (!portdev) { - err = -ENOMEM; - goto fail; - } - - /* Attach this portdev to this virtio_device, and vice-versa. */ - portdev->vdev = vdev; - vdev->priv = portdev; - - spin_lock_irq(&pdrvdata_lock); - portdev->drv_index = pdrvdata.index++; - spin_unlock_irq(&pdrvdata_lock); - - portdev->chr_major = register_chrdev(0, "virtio-portsdev", - &portdev_fops); - if (portdev->chr_major < 0) { - dev_err(&vdev->dev, - "Error %d registering chrdev for device %u\n", - portdev->chr_major, portdev->drv_index); - err = portdev->chr_major; - goto free; - } - - multiport = false; - portdev->config.max_nr_ports = 1; - if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { - multiport = true; - vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; - - vdev->config->get(vdev, offsetof(struct virtio_console_config, - max_nr_ports), - &portdev->config.max_nr_ports, - sizeof(portdev->config.max_nr_ports)); - } - - /* Let the Host know we support multiple ports.*/ - vdev->config->finalize_features(vdev); - - err = init_vqs(portdev); - if (err < 0) { - dev_err(&vdev->dev, "Error %d initializing vqs\n", err); - goto free_chrdev; - } - - spin_lock_init(&portdev->ports_lock); - INIT_LIST_HEAD(&portdev->ports); - - if (multiport) { - unsigned int nr_added_bufs; - - spin_lock_init(&portdev->cvq_lock); - INIT_WORK(&portdev->control_work, &control_work_handler); - - nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); - if (!nr_added_bufs) { - dev_err(&vdev->dev, - "Error allocating buffers for control queue\n"); - err = -ENOMEM; - goto free_vqs; - } - } else { - /* - * For backward compatibility: Create a console port - * if we're running on older host. - */ - add_port(portdev, 0); - } - - spin_lock_irq(&pdrvdata_lock); - list_add_tail(&portdev->list, &pdrvdata.portdevs); - spin_unlock_irq(&pdrvdata_lock); - - __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, - VIRTIO_CONSOLE_DEVICE_READY, 1); - return 0; - -free_vqs: - /* The host might want to notify mgmt sw about device add failure */ - __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, - VIRTIO_CONSOLE_DEVICE_READY, 0); - vdev->config->del_vqs(vdev); - kfree(portdev->in_vqs); - kfree(portdev->out_vqs); -free_chrdev: - unregister_chrdev(portdev->chr_major, "virtio-portsdev"); -free: - kfree(portdev); -fail: - return err; -} - -static void virtcons_remove(struct virtio_device *vdev) -{ - struct ports_device *portdev; - struct port *port, *port2; - - portdev = vdev->priv; - - spin_lock_irq(&pdrvdata_lock); - list_del(&portdev->list); - spin_unlock_irq(&pdrvdata_lock); - - /* Disable interrupts for vqs */ - vdev->config->reset(vdev); - /* Finish up work that's lined up */ - cancel_work_sync(&portdev->control_work); - - list_for_each_entry_safe(port, port2, &portdev->ports, list) - unplug_port(port); - - unregister_chrdev(portdev->chr_major, "virtio-portsdev"); - - /* - * When yanking out a device, we immediately lose the - * (device-side) queues. So there's no point in keeping the - * guest side around till we drop our final reference. This - * also means that any ports which are in an open state will - * have to just stop using the port, as the vqs are going - * away. - */ - if (use_multiport(portdev)) { - struct port_buffer *buf; - unsigned int len; - - while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) - free_buf(buf); - - while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) - free_buf(buf); - } - - vdev->config->del_vqs(vdev); - kfree(portdev->in_vqs); - kfree(portdev->out_vqs); - - kfree(portdev); -} - -static struct virtio_device_id id_table[] = { - { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, - { 0 }, -}; - -static unsigned int features[] = { - VIRTIO_CONSOLE_F_SIZE, - VIRTIO_CONSOLE_F_MULTIPORT, -}; - -static struct virtio_driver virtio_console = { - .feature_table = features, - .feature_table_size = ARRAY_SIZE(features), - .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, - .id_table = id_table, - .probe = virtcons_probe, - .remove = virtcons_remove, - .config_changed = config_intr, -}; - -static int __init init(void) -{ - int err; - - pdrvdata.class = class_create(THIS_MODULE, "virtio-ports"); - if (IS_ERR(pdrvdata.class)) { - err = PTR_ERR(pdrvdata.class); - pr_err("Error %d creating virtio-ports class\n", err); - return err; - } - - pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL); - if (!pdrvdata.debugfs_dir) { - pr_warning("Error %ld creating debugfs dir for virtio-ports\n", - PTR_ERR(pdrvdata.debugfs_dir)); - } - INIT_LIST_HEAD(&pdrvdata.consoles); - INIT_LIST_HEAD(&pdrvdata.portdevs); - - return register_virtio_driver(&virtio_console); -} - -static void __exit fini(void) -{ - unregister_virtio_driver(&virtio_console); - - class_destroy(pdrvdata.class); - if (pdrvdata.debugfs_dir) - debugfs_remove_recursive(pdrvdata.debugfs_dir); -} -module_init(init); -module_exit(fini); - -MODULE_DEVICE_TABLE(virtio, id_table); -MODULE_DESCRIPTION("Virtio console driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/tty/ipwireless/Makefile b/drivers/tty/ipwireless/Makefile new file mode 100644 index 00000000000..db80873d7f2 --- /dev/null +++ b/drivers/tty/ipwireless/Makefile @@ -0,0 +1,10 @@ +# +# drivers/char/pcmcia/ipwireless/Makefile +# +# Makefile for the IPWireless driver +# + +obj-$(CONFIG_IPWIRELESS) += ipwireless.o + +ipwireless-y := hardware.o main.o network.o tty.o + diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c new file mode 100644 index 00000000000..0aeb5a38d29 --- /dev/null +++ b/drivers/tty/ipwireless/hardware.c @@ -0,0 +1,1764 @@ +/* + * IPWireless 3G PCMCIA Network Driver + * + * Original code + * by Stephen Blackheath <stephen@blacksapphire.com>, + * Ben Martel <benm@symmetric.co.nz> + * + * Copyrighted as follows: + * Copyright (C) 2004 by Symmetric Systems Ltd (NZ) + * + * Various driver changes and rewrites, port to new kernels + * Copyright (C) 2006-2007 Jiri Kosina + * + * Misc code cleanups and updates + * Copyright (C) 2007 David Sterba + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/slab.h> + +#include "hardware.h" +#include "setup_protocol.h" +#include "network.h" +#include "main.h" + +static void ipw_send_setup_packet(struct ipw_hardware *hw); +static void handle_received_SETUP_packet(struct ipw_hardware *ipw, + unsigned int address, + const unsigned char *data, int len, + int is_last); +static void ipwireless_setup_timer(unsigned long data); +static void handle_received_CTRL_packet(struct ipw_hardware *hw, + unsigned int channel_idx, const unsigned char *data, int len); + +/*#define TIMING_DIAGNOSTICS*/ + +#ifdef TIMING_DIAGNOSTICS + +static struct timing_stats { + unsigned long last_report_time; + unsigned long read_time; + unsigned long write_time; + unsigned long read_bytes; + unsigned long write_bytes; + unsigned long start_time; +}; + +static void start_timing(void) +{ + timing_stats.start_time = jiffies; +} + +static void end_read_timing(unsigned length) +{ + timing_stats.read_time += (jiffies - start_time); + timing_stats.read_bytes += length + 2; + report_timing(); +} + +static void end_write_timing(unsigned length) +{ + timing_stats.write_time += (jiffies - start_time); + timing_stats.write_bytes += length + 2; + report_timing(); +} + +static void report_timing(void) +{ + unsigned long since = jiffies - timing_stats.last_report_time; + + /* If it's been more than one second... */ + if (since >= HZ) { + int first = (timing_stats.last_report_time == 0); + + timing_stats.last_report_time = jiffies; + if (!first) + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": %u us elapsed - read %lu bytes in %u us, wrote %lu bytes in %u us\n", + jiffies_to_usecs(since), + timing_stats.read_bytes, + jiffies_to_usecs(timing_stats.read_time), + timing_stats.write_bytes, + jiffies_to_usecs(timing_stats.write_time)); + + timing_stats.read_time = 0; + timing_stats.write_time = 0; + timing_stats.read_bytes = 0; + timing_stats.write_bytes = 0; + } +} +#else +static void start_timing(void) { } +static void end_read_timing(unsigned length) { } +static void end_write_timing(unsigned length) { } +#endif + +/* Imported IPW definitions */ + +#define LL_MTU_V1 318 +#define LL_MTU_V2 250 +#define LL_MTU_MAX (LL_MTU_V1 > LL_MTU_V2 ? LL_MTU_V1 : LL_MTU_V2) + +#define PRIO_DATA 2 +#define PRIO_CTRL 1 +#define PRIO_SETUP 0 + +/* Addresses */ +#define ADDR_SETUP_PROT 0 + +/* Protocol ids */ +enum { + /* Identifier for the Com Data protocol */ + TL_PROTOCOLID_COM_DATA = 0, + + /* Identifier for the Com Control protocol */ + TL_PROTOCOLID_COM_CTRL = 1, + + /* Identifier for the Setup protocol */ + TL_PROTOCOLID_SETUP = 2 +}; + +/* Number of bytes in NL packet header (cannot do + * sizeof(nl_packet_header) since it's a bitfield) */ +#define NL_FIRST_PACKET_HEADER_SIZE 3 + +/* Number of bytes in NL packet header (cannot do + * sizeof(nl_packet_header) since it's a bitfield) */ +#define NL_FOLLOWING_PACKET_HEADER_SIZE 1 + +struct nl_first_packet_header { + unsigned char protocol:3; + unsigned char address:3; + unsigned char packet_rank:2; + unsigned char length_lsb; + unsigned char length_msb; +}; + +struct nl_packet_header { + unsigned char protocol:3; + unsigned char address:3; + unsigned char packet_rank:2; +}; + +/* Value of 'packet_rank' above */ +#define NL_INTERMEDIATE_PACKET 0x0 +#define NL_LAST_PACKET 0x1 +#define NL_FIRST_PACKET 0x2 + +union nl_packet { + /* Network packet header of the first packet (a special case) */ + struct nl_first_packet_header hdr_first; + /* Network packet header of the following packets (if any) */ + struct nl_packet_header hdr; + /* Complete network packet (header + data) */ + unsigned char rawpkt[LL_MTU_MAX]; +} __attribute__ ((__packed__)); + +#define HW_VERSION_UNKNOWN -1 +#define HW_VERSION_1 1 +#define HW_VERSION_2 2 + +/* IPW I/O ports */ +#define IOIER 0x00 /* Interrupt Enable Register */ +#define IOIR 0x02 /* Interrupt Source/ACK register */ +#define IODCR 0x04 /* Data Control Register */ +#define IODRR 0x06 /* Data Read Register */ +#define IODWR 0x08 /* Data Write Register */ +#define IOESR 0x0A /* Embedded Driver Status Register */ +#define IORXR 0x0C /* Rx Fifo Register (Host to Embedded) */ +#define IOTXR 0x0E /* Tx Fifo Register (Embedded to Host) */ + +/* I/O ports and bit definitions for version 1 of the hardware */ + +/* IER bits*/ +#define IER_RXENABLED 0x1 +#define IER_TXENABLED 0x2 + +/* ISR bits */ +#define IR_RXINTR 0x1 +#define IR_TXINTR 0x2 + +/* DCR bits */ +#define DCR_RXDONE 0x1 +#define DCR_TXDONE 0x2 +#define DCR_RXRESET 0x4 +#define DCR_TXRESET 0x8 + +/* I/O ports and bit definitions for version 2 of the hardware */ + +struct MEMCCR { + unsigned short reg_config_option; /* PCCOR: Configuration Option Register */ + unsigned short reg_config_and_status; /* PCCSR: Configuration and Status Register */ + unsigned short reg_pin_replacement; /* PCPRR: Pin Replacemant Register */ + unsigned short reg_socket_and_copy; /* PCSCR: Socket and Copy Register */ + unsigned short reg_ext_status; /* PCESR: Extendend Status Register */ + unsigned short reg_io_base; /* PCIOB: I/O Base Register */ +}; + +struct MEMINFREG { + unsigned short memreg_tx_old; /* TX Register (R/W) */ + unsigned short pad1; + unsigned short memreg_rx_done; /* RXDone Register (R/W) */ + unsigned short pad2; + unsigned short memreg_rx; /* RX Register (R/W) */ + unsigned short pad3; + unsigned short memreg_pc_interrupt_ack; /* PC intr Ack Register (W) */ + unsigned short pad4; + unsigned long memreg_card_present;/* Mask for Host to check (R) for + * CARD_PRESENT_VALUE */ + unsigned short memreg_tx_new; /* TX2 (new) Register (R/W) */ +}; + +#define CARD_PRESENT_VALUE (0xBEEFCAFEUL) + +#define MEMTX_TX 0x0001 +#define MEMRX_RX 0x0001 +#define MEMRX_RX_DONE 0x0001 +#define MEMRX_PCINTACKK 0x0001 + +#define NL_NUM_OF_PRIORITIES 3 +#define NL_NUM_OF_PROTOCOLS 3 +#define NL_NUM_OF_ADDRESSES NO_OF_IPW_CHANNELS + +struct ipw_hardware { + unsigned int base_port; + short hw_version; + unsigned short ll_mtu; + spinlock_t lock; + + int initializing; + int init_loops; + struct timer_list setup_timer; + + /* Flag if hw is ready to send next packet */ + int tx_ready; + /* Count of pending packets to be sent */ + int tx_queued; + struct list_head tx_queue[NL_NUM_OF_PRIORITIES]; + + int rx_bytes_queued; + struct list_head rx_queue; + /* Pool of rx_packet structures that are not currently used. */ + struct list_head rx_pool; + int rx_pool_size; + /* True if reception of data is blocked while userspace processes it. */ + int blocking_rx; + /* True if there is RX data ready on the hardware. */ + int rx_ready; + unsigned short last_memtx_serial; + /* + * Newer versions of the V2 card firmware send serial numbers in the + * MemTX register. 'serial_number_detected' is set true when we detect + * a non-zero serial number (indicating the new firmware). Thereafter, + * the driver can safely ignore the Timer Recovery re-sends to avoid + * out-of-sync problems. + */ + int serial_number_detected; + struct work_struct work_rx; + + /* True if we are to send the set-up data to the hardware. */ + int to_setup; + + /* Card has been removed */ + int removed; + /* Saved irq value when we disable the interrupt. */ + int irq; + /* True if this driver is shutting down. */ + int shutting_down; + /* Modem control lines */ + unsigned int control_lines[NL_NUM_OF_ADDRESSES]; + struct ipw_rx_packet *packet_assembler[NL_NUM_OF_ADDRESSES]; + + struct tasklet_struct tasklet; + + /* The handle for the network layer, for the sending of events to it. */ + struct ipw_network *network; + struct MEMINFREG __iomem *memory_info_regs; + struct MEMCCR __iomem *memregs_CCR; + void (*reboot_callback) (void *data); + void *reboot_callback_data; + + unsigned short __iomem *memreg_tx; +}; + +/* + * Packet info structure for tx packets. + * Note: not all the fields defined here are required for all protocols + */ +struct ipw_tx_packet { + struct list_head queue; + /* channel idx + 1 */ + unsigned char dest_addr; + /* SETUP, CTRL or DATA */ + unsigned char protocol; + /* Length of data block, which starts at the end of this structure */ + unsigned short length; + /* Sending state */ + /* Offset of where we've sent up to so far */ + unsigned long offset; + /* Count of packet fragments, starting at 0 */ + int fragment_count; + + /* Called after packet is sent and before is freed */ + void (*packet_callback) (void *cb_data, unsigned int packet_length); + void *callback_data; +}; + +/* Signals from DTE */ +#define COMCTRL_RTS 0 +#define COMCTRL_DTR 1 + +/* Signals from DCE */ +#define COMCTRL_CTS 2 +#define COMCTRL_DCD 3 +#define COMCTRL_DSR 4 +#define COMCTRL_RI 5 + +struct ipw_control_packet_body { + /* DTE signal or DCE signal */ + unsigned char sig_no; + /* 0: set signal, 1: clear signal */ + unsigned char value; +} __attribute__ ((__packed__)); + +struct ipw_control_packet { + struct ipw_tx_packet header; + struct ipw_control_packet_body body; +}; + +struct ipw_rx_packet { + struct list_head queue; + unsigned int capacity; + unsigned int length; + unsigned int protocol; + unsigned int channel_idx; +}; + +static char *data_type(const unsigned char *buf, unsigned length) +{ + struct nl_packet_header *hdr = (struct nl_packet_header *) buf; + + if (length == 0) + return " "; + + if (hdr->packet_rank & NL_FIRST_PACKET) { + switch (hdr->protocol) { + case TL_PROTOCOLID_COM_DATA: return "DATA "; + case TL_PROTOCOLID_COM_CTRL: return "CTRL "; + case TL_PROTOCOLID_SETUP: return "SETUP"; + default: return "???? "; + } + } else + return " "; +} + +#define DUMP_MAX_BYTES 64 + +static void dump_data_bytes(const char *type, const unsigned char *data, + unsigned length) +{ + char prefix[56]; + + sprintf(prefix, IPWIRELESS_PCCARD_NAME ": %s %s ", + type, data_type(data, length)); + print_hex_dump_bytes(prefix, 0, (void *)data, + length < DUMP_MAX_BYTES ? length : DUMP_MAX_BYTES); +} + +static void swap_packet_bitfield_to_le(unsigned char *data) +{ +#ifdef __BIG_ENDIAN_BITFIELD + unsigned char tmp = *data, ret = 0; + + /* + * transform bits from aa.bbb.ccc to ccc.bbb.aa + */ + ret |= tmp & 0xc0 >> 6; + ret |= tmp & 0x38 >> 1; + ret |= tmp & 0x07 << 5; + *data = ret & 0xff; +#endif +} + +static void swap_packet_bitfield_from_le(unsigned char *data) +{ +#ifdef __BIG_ENDIAN_BITFIELD + unsigned char tmp = *data, ret = 0; + + /* + * transform bits from ccc.bbb.aa to aa.bbb.ccc + */ + ret |= tmp & 0xe0 >> 5; + ret |= tmp & 0x1c << 1; + ret |= tmp & 0x03 << 6; + *data = ret & 0xff; +#endif +} + +static void do_send_fragment(struct ipw_hardware *hw, unsigned char *data, + unsigned length) +{ + unsigned i; + unsigned long flags; + + start_timing(); + BUG_ON(length > hw->ll_mtu); + + if (ipwireless_debug) + dump_data_bytes("send", data, length); + + spin_lock_irqsave(&hw->lock, flags); + + hw->tx_ready = 0; + swap_packet_bitfield_to_le(data); + + if (hw->hw_version == HW_VERSION_1) { + outw((unsigned short) length, hw->base_port + IODWR); + + for (i = 0; i < length; i += 2) { + unsigned short d = data[i]; + __le16 raw_data; + + if (i + 1 < length) + d |= data[i + 1] << 8; + raw_data = cpu_to_le16(d); + outw(raw_data, hw->base_port + IODWR); + } + + outw(DCR_TXDONE, hw->base_port + IODCR); + } else if (hw->hw_version == HW_VERSION_2) { + outw((unsigned short) length, hw->base_port); + + for (i = 0; i < length; i += 2) { + unsigned short d = data[i]; + __le16 raw_data; + + if (i + 1 < length) + d |= data[i + 1] << 8; + raw_data = cpu_to_le16(d); + outw(raw_data, hw->base_port); + } + while ((i & 3) != 2) { + outw((unsigned short) 0xDEAD, hw->base_port); + i += 2; + } + writew(MEMRX_RX, &hw->memory_info_regs->memreg_rx); + } + + spin_unlock_irqrestore(&hw->lock, flags); + + end_write_timing(length); +} + +static void do_send_packet(struct ipw_hardware *hw, struct ipw_tx_packet *packet) +{ + unsigned short fragment_data_len; + unsigned short data_left = packet->length - packet->offset; + unsigned short header_size; + union nl_packet pkt; + + header_size = + (packet->fragment_count == 0) + ? NL_FIRST_PACKET_HEADER_SIZE + : NL_FOLLOWING_PACKET_HEADER_SIZE; + fragment_data_len = hw->ll_mtu - header_size; + if (data_left < fragment_data_len) + fragment_data_len = data_left; + + /* + * hdr_first is now in machine bitfield order, which will be swapped + * to le just before it goes to hw + */ + pkt.hdr_first.protocol = packet->protocol; + pkt.hdr_first.address = packet->dest_addr; + pkt.hdr_first.packet_rank = 0; + + /* First packet? */ + if (packet->fragment_count == 0) { + pkt.hdr_first.packet_rank |= NL_FIRST_PACKET; + pkt.hdr_first.length_lsb = (unsigned char) packet->length; + pkt.hdr_first.length_msb = + (unsigned char) (packet->length >> 8); + } + + memcpy(pkt.rawpkt + header_size, + ((unsigned char *) packet) + sizeof(struct ipw_tx_packet) + + packet->offset, fragment_data_len); + packet->offset += fragment_data_len; + packet->fragment_count++; + + /* Last packet? (May also be first packet.) */ + if (packet->offset == packet->length) + pkt.hdr_first.packet_rank |= NL_LAST_PACKET; + do_send_fragment(hw, pkt.rawpkt, header_size + fragment_data_len); + + /* If this packet has unsent data, then re-queue it. */ + if (packet->offset < packet->length) { + /* + * Re-queue it at the head of the highest priority queue so + * it goes before all other packets + */ + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + list_add(&packet->queue, &hw->tx_queue[0]); + hw->tx_queued++; + spin_unlock_irqrestore(&hw->lock, flags); + } else { + if (packet->packet_callback) + packet->packet_callback(packet->callback_data, + packet->length); + kfree(packet); + } +} + +static void ipw_setup_hardware(struct ipw_hardware *hw) +{ + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + if (hw->hw_version == HW_VERSION_1) { + /* Reset RX FIFO */ + outw(DCR_RXRESET, hw->base_port + IODCR); + /* SB: Reset TX FIFO */ + outw(DCR_TXRESET, hw->base_port + IODCR); + + /* Enable TX and RX interrupts. */ + outw(IER_TXENABLED | IER_RXENABLED, hw->base_port + IOIER); + } else { + /* + * Set INTRACK bit (bit 0), which means we must explicitly + * acknowledge interrupts by clearing bit 2 of reg_config_and_status. + */ + unsigned short csr = readw(&hw->memregs_CCR->reg_config_and_status); + + csr |= 1; + writew(csr, &hw->memregs_CCR->reg_config_and_status); + } + spin_unlock_irqrestore(&hw->lock, flags); +} + +/* + * If 'packet' is NULL, then this function allocates a new packet, setting its + * length to 0 and ensuring it has the specified minimum amount of free space. + * + * If 'packet' is not NULL, then this function enlarges it if it doesn't + * have the specified minimum amount of free space. + * + */ +static struct ipw_rx_packet *pool_allocate(struct ipw_hardware *hw, + struct ipw_rx_packet *packet, + int minimum_free_space) +{ + + if (!packet) { + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + if (!list_empty(&hw->rx_pool)) { + packet = list_first_entry(&hw->rx_pool, + struct ipw_rx_packet, queue); + hw->rx_pool_size--; + spin_unlock_irqrestore(&hw->lock, flags); + list_del(&packet->queue); + } else { + const int min_capacity = + ipwireless_ppp_mru(hw->network) + 2; + int new_capacity; + + spin_unlock_irqrestore(&hw->lock, flags); + new_capacity = + (minimum_free_space > min_capacity + ? minimum_free_space + : min_capacity); + packet = kmalloc(sizeof(struct ipw_rx_packet) + + new_capacity, GFP_ATOMIC); + if (!packet) + return NULL; + packet->capacity = new_capacity; + } + packet->length = 0; + } + + if (packet->length + minimum_free_space > packet->capacity) { + struct ipw_rx_packet *old_packet = packet; + + packet = kmalloc(sizeof(struct ipw_rx_packet) + + old_packet->length + minimum_free_space, + GFP_ATOMIC); + if (!packet) { + kfree(old_packet); + return NULL; + } + memcpy(packet, old_packet, + sizeof(struct ipw_rx_packet) + + old_packet->length); + packet->capacity = old_packet->length + minimum_free_space; + kfree(old_packet); + } + + return packet; +} + +static void pool_free(struct ipw_hardware *hw, struct ipw_rx_packet *packet) +{ + if (hw->rx_pool_size > 6) + kfree(packet); + else { + hw->rx_pool_size++; + list_add(&packet->queue, &hw->rx_pool); + } +} + +static void queue_received_packet(struct ipw_hardware *hw, + unsigned int protocol, + unsigned int address, + const unsigned char *data, int length, + int is_last) +{ + unsigned int channel_idx = address - 1; + struct ipw_rx_packet *packet = NULL; + unsigned long flags; + + /* Discard packet if channel index is out of range. */ + if (channel_idx >= NL_NUM_OF_ADDRESSES) { + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": data packet has bad address %u\n", address); + return; + } + + /* + * ->packet_assembler is safe to touch unlocked, this is the only place + */ + if (protocol == TL_PROTOCOLID_COM_DATA) { + struct ipw_rx_packet **assem = + &hw->packet_assembler[channel_idx]; + + /* + * Create a new packet, or assembler already contains one + * enlarge it by 'length' bytes. + */ + (*assem) = pool_allocate(hw, *assem, length); + if (!(*assem)) { + printk(KERN_ERR IPWIRELESS_PCCARD_NAME + ": no memory for incomming data packet, dropped!\n"); + return; + } + (*assem)->protocol = protocol; + (*assem)->channel_idx = channel_idx; + + /* Append this packet data onto existing data. */ + memcpy((unsigned char *)(*assem) + + sizeof(struct ipw_rx_packet) + + (*assem)->length, data, length); + (*assem)->length += length; + if (is_last) { + packet = *assem; + *assem = NULL; + /* Count queued DATA bytes only */ + spin_lock_irqsave(&hw->lock, flags); + hw->rx_bytes_queued += packet->length; + spin_unlock_irqrestore(&hw->lock, flags); + } + } else { + /* If it's a CTRL packet, don't assemble, just queue it. */ + packet = pool_allocate(hw, NULL, length); + if (!packet) { + printk(KERN_ERR IPWIRELESS_PCCARD_NAME + ": no memory for incomming ctrl packet, dropped!\n"); + return; + } + packet->protocol = protocol; + packet->channel_idx = channel_idx; + memcpy((unsigned char *)packet + sizeof(struct ipw_rx_packet), + data, length); + packet->length = length; + } + + /* + * If this is the last packet, then send the assembled packet on to the + * network layer. + */ + if (packet) { + spin_lock_irqsave(&hw->lock, flags); + list_add_tail(&packet->queue, &hw->rx_queue); + /* Block reception of incoming packets if queue is full. */ + hw->blocking_rx = + (hw->rx_bytes_queued >= IPWIRELESS_RX_QUEUE_SIZE); + + spin_unlock_irqrestore(&hw->lock, flags); + schedule_work(&hw->work_rx); + } +} + +/* + * Workqueue callback + */ +static void ipw_receive_data_work(struct work_struct *work_rx) +{ + struct ipw_hardware *hw = + container_of(work_rx, struct ipw_hardware, work_rx); + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + while (!list_empty(&hw->rx_queue)) { + struct ipw_rx_packet *packet = + list_first_entry(&hw->rx_queue, + struct ipw_rx_packet, queue); + + if (hw->shutting_down) + break; + list_del(&packet->queue); + + /* + * Note: ipwireless_network_packet_received must be called in a + * process context (i.e. via schedule_work) because the tty + * output code can sleep in the tty_flip_buffer_push call. + */ + if (packet->protocol == TL_PROTOCOLID_COM_DATA) { + if (hw->network != NULL) { + /* If the network hasn't been disconnected. */ + spin_unlock_irqrestore(&hw->lock, flags); + /* + * This must run unlocked due to tty processing + * and mutex locking + */ + ipwireless_network_packet_received( + hw->network, + packet->channel_idx, + (unsigned char *)packet + + sizeof(struct ipw_rx_packet), + packet->length); + spin_lock_irqsave(&hw->lock, flags); + } + /* Count queued DATA bytes only */ + hw->rx_bytes_queued -= packet->length; + } else { + /* + * This is safe to be called locked, callchain does + * not block + */ + handle_received_CTRL_packet(hw, packet->channel_idx, + (unsigned char *)packet + + sizeof(struct ipw_rx_packet), + packet->length); + } + pool_free(hw, packet); + /* + * Unblock reception of incoming packets if queue is no longer + * full. + */ + hw->blocking_rx = + hw->rx_bytes_queued >= IPWIRELESS_RX_QUEUE_SIZE; + if (hw->shutting_down) + break; + } + spin_unlock_irqrestore(&hw->lock, flags); +} + +static void handle_received_CTRL_packet(struct ipw_hardware *hw, + unsigned int channel_idx, + const unsigned char *data, int len) +{ + const struct ipw_control_packet_body *body = + (const struct ipw_control_packet_body *) data; + unsigned int changed_mask; + + if (len != sizeof(struct ipw_control_packet_body)) { + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": control packet was %d bytes - wrong size!\n", + len); + return; + } + + switch (body->sig_no) { + case COMCTRL_CTS: + changed_mask = IPW_CONTROL_LINE_CTS; + break; + case COMCTRL_DCD: + changed_mask = IPW_CONTROL_LINE_DCD; + break; + case COMCTRL_DSR: + changed_mask = IPW_CONTROL_LINE_DSR; + break; + case COMCTRL_RI: + changed_mask = IPW_CONTROL_LINE_RI; + break; + default: + changed_mask = 0; + } + + if (changed_mask != 0) { + if (body->value) + hw->control_lines[channel_idx] |= changed_mask; + else + hw->control_lines[channel_idx] &= ~changed_mask; + if (hw->network) + ipwireless_network_notify_control_line_change( + hw->network, + channel_idx, + hw->control_lines[channel_idx], + changed_mask); + } +} + +static void handle_received_packet(struct ipw_hardware *hw, + const union nl_packet *packet, + unsigned short len) +{ + unsigned int protocol = packet->hdr.protocol; + unsigned int address = packet->hdr.address; + unsigned int header_length; + const unsigned char *data; + unsigned int data_len; + int is_last = packet->hdr.packet_rank & NL_LAST_PACKET; + + if (packet->hdr.packet_rank & NL_FIRST_PACKET) + header_length = NL_FIRST_PACKET_HEADER_SIZE; + else + header_length = NL_FOLLOWING_PACKET_HEADER_SIZE; + + data = packet->rawpkt + header_length; + data_len = len - header_length; + switch (protocol) { + case TL_PROTOCOLID_COM_DATA: + case TL_PROTOCOLID_COM_CTRL: + queue_received_packet(hw, protocol, address, data, data_len, + is_last); + break; + case TL_PROTOCOLID_SETUP: + handle_received_SETUP_packet(hw, address, data, data_len, + is_last); + break; + } +} + +static void acknowledge_data_read(struct ipw_hardware *hw) +{ + if (hw->hw_version == HW_VERSION_1) + outw(DCR_RXDONE, hw->base_port + IODCR); + else + writew(MEMRX_PCINTACKK, + &hw->memory_info_regs->memreg_pc_interrupt_ack); +} + +/* + * Retrieve a packet from the IPW hardware. + */ +static void do_receive_packet(struct ipw_hardware *hw) +{ + unsigned len; + unsigned i; + unsigned char pkt[LL_MTU_MAX]; + + start_timing(); + + if (hw->hw_version == HW_VERSION_1) { + len = inw(hw->base_port + IODRR); + if (len > hw->ll_mtu) { + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": received a packet of %u bytes - longer than the MTU!\n", len); + outw(DCR_RXDONE | DCR_RXRESET, hw->base_port + IODCR); + return; + } + + for (i = 0; i < len; i += 2) { + __le16 raw_data = inw(hw->base_port + IODRR); + unsigned short data = le16_to_cpu(raw_data); + + pkt[i] = (unsigned char) data; + pkt[i + 1] = (unsigned char) (data >> 8); + } + } else { + len = inw(hw->base_port); + if (len > hw->ll_mtu) { + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": received a packet of %u bytes - longer than the MTU!\n", len); + writew(MEMRX_PCINTACKK, + &hw->memory_info_regs->memreg_pc_interrupt_ack); + return; + } + + for (i = 0; i < len; i += 2) { + __le16 raw_data = inw(hw->base_port); + unsigned short data = le16_to_cpu(raw_data); + + pkt[i] = (unsigned char) data; + pkt[i + 1] = (unsigned char) (data >> 8); + } + + while ((i & 3) != 2) { + inw(hw->base_port); + i += 2; + } + } + + acknowledge_data_read(hw); + + swap_packet_bitfield_from_le(pkt); + + if (ipwireless_debug) + dump_data_bytes("recv", pkt, len); + + handle_received_packet(hw, (union nl_packet *) pkt, len); + + end_read_timing(len); +} + +static int get_current_packet_priority(struct ipw_hardware *hw) +{ + /* + * If we're initializing, don't send anything of higher priority than + * PRIO_SETUP. The network layer therefore need not care about + * hardware initialization - any of its stuff will simply be queued + * until setup is complete. + */ + return (hw->to_setup || hw->initializing + ? PRIO_SETUP + 1 : NL_NUM_OF_PRIORITIES); +} + +/* + * return 1 if something has been received from hw + */ +static int get_packets_from_hw(struct ipw_hardware *hw) +{ + int received = 0; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + while (hw->rx_ready && !hw->blocking_rx) { + received = 1; + hw->rx_ready--; + spin_unlock_irqrestore(&hw->lock, flags); + + do_receive_packet(hw); + + spin_lock_irqsave(&hw->lock, flags); + } + spin_unlock_irqrestore(&hw->lock, flags); + + return received; +} + +/* + * Send pending packet up to given priority, prioritize SETUP data until + * hardware is fully setup. + * + * return 1 if more packets can be sent + */ +static int send_pending_packet(struct ipw_hardware *hw, int priority_limit) +{ + int more_to_send = 0; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + if (hw->tx_queued && hw->tx_ready) { + int priority; + struct ipw_tx_packet *packet = NULL; + + /* Pick a packet */ + for (priority = 0; priority < priority_limit; priority++) { + if (!list_empty(&hw->tx_queue[priority])) { + packet = list_first_entry( + &hw->tx_queue[priority], + struct ipw_tx_packet, + queue); + + hw->tx_queued--; + list_del(&packet->queue); + + break; + } + } + if (!packet) { + hw->tx_queued = 0; + spin_unlock_irqrestore(&hw->lock, flags); + return 0; + } + + spin_unlock_irqrestore(&hw->lock, flags); + + /* Send */ + do_send_packet(hw, packet); + + /* Check if more to send */ + spin_lock_irqsave(&hw->lock, flags); + for (priority = 0; priority < priority_limit; priority++) + if (!list_empty(&hw->tx_queue[priority])) { + more_to_send = 1; + break; + } + + if (!more_to_send) + hw->tx_queued = 0; + } + spin_unlock_irqrestore(&hw->lock, flags); + + return more_to_send; +} + +/* + * Send and receive all queued packets. + */ +static void ipwireless_do_tasklet(unsigned long hw_) +{ + struct ipw_hardware *hw = (struct ipw_hardware *) hw_; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + if (hw->shutting_down) { + spin_unlock_irqrestore(&hw->lock, flags); + return; + } + + if (hw->to_setup == 1) { + /* + * Initial setup data sent to hardware + */ + hw->to_setup = 2; + spin_unlock_irqrestore(&hw->lock, flags); + + ipw_setup_hardware(hw); + ipw_send_setup_packet(hw); + + send_pending_packet(hw, PRIO_SETUP + 1); + get_packets_from_hw(hw); + } else { + int priority_limit = get_current_packet_priority(hw); + int again; + + spin_unlock_irqrestore(&hw->lock, flags); + + do { + again = send_pending_packet(hw, priority_limit); + again |= get_packets_from_hw(hw); + } while (again); + } +} + +/* + * return true if the card is physically present. + */ +static int is_card_present(struct ipw_hardware *hw) +{ + if (hw->hw_version == HW_VERSION_1) + return inw(hw->base_port + IOIR) != 0xFFFF; + else + return readl(&hw->memory_info_regs->memreg_card_present) == + CARD_PRESENT_VALUE; +} + +static irqreturn_t ipwireless_handle_v1_interrupt(int irq, + struct ipw_hardware *hw) +{ + unsigned short irqn; + + irqn = inw(hw->base_port + IOIR); + + /* Check if card is present */ + if (irqn == 0xFFFF) + return IRQ_NONE; + else if (irqn != 0) { + unsigned short ack = 0; + unsigned long flags; + + /* Transmit complete. */ + if (irqn & IR_TXINTR) { + ack |= IR_TXINTR; + spin_lock_irqsave(&hw->lock, flags); + hw->tx_ready = 1; + spin_unlock_irqrestore(&hw->lock, flags); + } + /* Received data */ + if (irqn & IR_RXINTR) { + ack |= IR_RXINTR; + spin_lock_irqsave(&hw->lock, flags); + hw->rx_ready++; + spin_unlock_irqrestore(&hw->lock, flags); + } + if (ack != 0) { + outw(ack, hw->base_port + IOIR); + tasklet_schedule(&hw->tasklet); + } + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +static void acknowledge_pcmcia_interrupt(struct ipw_hardware *hw) +{ + unsigned short csr = readw(&hw->memregs_CCR->reg_config_and_status); + + csr &= 0xfffd; + writew(csr, &hw->memregs_CCR->reg_config_and_status); +} + +static irqreturn_t ipwireless_handle_v2_v3_interrupt(int irq, + struct ipw_hardware *hw) +{ + int tx = 0; + int rx = 0; + int rx_repeat = 0; + int try_mem_tx_old; + unsigned long flags; + + do { + + unsigned short memtx = readw(hw->memreg_tx); + unsigned short memtx_serial; + unsigned short memrxdone = + readw(&hw->memory_info_regs->memreg_rx_done); + + try_mem_tx_old = 0; + + /* check whether the interrupt was generated by ipwireless card */ + if (!(memtx & MEMTX_TX) && !(memrxdone & MEMRX_RX_DONE)) { + + /* check if the card uses memreg_tx_old register */ + if (hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) { + memtx = readw(&hw->memory_info_regs->memreg_tx_old); + if (memtx & MEMTX_TX) { + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": Using memreg_tx_old\n"); + hw->memreg_tx = + &hw->memory_info_regs->memreg_tx_old; + } else { + return IRQ_NONE; + } + } else + return IRQ_NONE; + } + + /* + * See if the card is physically present. Note that while it is + * powering up, it appears not to be present. + */ + if (!is_card_present(hw)) { + acknowledge_pcmcia_interrupt(hw); + return IRQ_HANDLED; + } + + memtx_serial = memtx & (unsigned short) 0xff00; + if (memtx & MEMTX_TX) { + writew(memtx_serial, hw->memreg_tx); + + if (hw->serial_number_detected) { + if (memtx_serial != hw->last_memtx_serial) { + hw->last_memtx_serial = memtx_serial; + spin_lock_irqsave(&hw->lock, flags); + hw->rx_ready++; + spin_unlock_irqrestore(&hw->lock, flags); + rx = 1; + } else + /* Ignore 'Timer Recovery' duplicates. */ + rx_repeat = 1; + } else { + /* + * If a non-zero serial number is seen, then enable + * serial number checking. + */ + if (memtx_serial != 0) { + hw->serial_number_detected = 1; + printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME + ": memreg_tx serial num detected\n"); + + spin_lock_irqsave(&hw->lock, flags); + hw->rx_ready++; + spin_unlock_irqrestore(&hw->lock, flags); + } + rx = 1; + } + } + if (memrxdone & MEMRX_RX_DONE) { + writew(0, &hw->memory_info_regs->memreg_rx_done); + spin_lock_irqsave(&hw->lock, flags); + hw->tx_ready = 1; + spin_unlock_irqrestore(&hw->lock, flags); + tx = 1; + } + if (tx) + writew(MEMRX_PCINTACKK, + &hw->memory_info_regs->memreg_pc_interrupt_ack); + + acknowledge_pcmcia_interrupt(hw); + + if (tx || rx) + tasklet_schedule(&hw->tasklet); + else if (!rx_repeat) { + if (hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) { + if (hw->serial_number_detected) + printk(KERN_WARNING IPWIRELESS_PCCARD_NAME + ": spurious interrupt - new_tx mode\n"); + else { + printk(KERN_WARNING IPWIRELESS_PCCARD_NAME + ": no valid memreg_tx value - switching to the old memreg_tx\n"); + hw->memreg_tx = + &hw->memory_info_regs->memreg_tx_old; + try_mem_tx_old = 1; + } + } else + printk(KERN_WARNING IPWIRELESS_PCCARD_NAME + ": spurious interrupt - old_tx mode\n"); + } + + } while (try_mem_tx_old == 1); + + return IRQ_HANDLED; +} + +irqreturn_t ipwireless_interrupt(int irq, void *dev_id) +{ + struct ipw_dev *ipw = dev_id; + + if (ipw->hardware->hw_version == HW_VERSION_1) + return ipwireless_handle_v1_interrupt(irq, ipw->hardware); + else + return ipwireless_handle_v2_v3_interrupt(irq, ipw->hardware); +} + +static void flush_packets_to_hw(struct ipw_hardware *hw) +{ + int priority_limit; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + priority_limit = get_current_packet_priority(hw); + spin_unlock_irqrestore(&hw->lock, flags); + + while (send_pending_packet(hw, priority_limit)); +} + +static void send_packet(struct ipw_hardware *hw, int priority, + struct ipw_tx_packet *packet) +{ + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + list_add_tail(&packet->queue, &hw->tx_queue[priority]); + hw->tx_queued++; + spin_unlock_irqrestore(&hw->lock, flags); + + flush_packets_to_hw(hw); +} + +/* Create data packet, non-atomic allocation */ +static void *alloc_data_packet(int data_size, + unsigned char dest_addr, + unsigned char protocol) +{ + struct ipw_tx_packet *packet = kzalloc( + sizeof(struct ipw_tx_packet) + data_size, + GFP_ATOMIC); + + if (!packet) + return NULL; + + INIT_LIST_HEAD(&packet->queue); + packet->dest_addr = dest_addr; + packet->protocol = protocol; + packet->length = data_size; + + return packet; +} + +static void *alloc_ctrl_packet(int header_size, + unsigned char dest_addr, + unsigned char protocol, + unsigned char sig_no) +{ + /* + * sig_no is located right after ipw_tx_packet struct in every + * CTRL or SETUP packets, we can use ipw_control_packet as a + * common struct + */ + struct ipw_control_packet *packet = kzalloc(header_size, GFP_ATOMIC); + + if (!packet) + return NULL; + + INIT_LIST_HEAD(&packet->header.queue); + packet->header.dest_addr = dest_addr; + packet->header.protocol = protocol; + packet->header.length = header_size - sizeof(struct ipw_tx_packet); + packet->body.sig_no = sig_no; + + return packet; +} + +int ipwireless_send_packet(struct ipw_hardware *hw, unsigned int channel_idx, + const unsigned char *data, unsigned int length, + void (*callback) (void *cb, unsigned int length), + void *callback_data) +{ + struct ipw_tx_packet *packet; + + packet = alloc_data_packet(length, (channel_idx + 1), + TL_PROTOCOLID_COM_DATA); + if (!packet) + return -ENOMEM; + packet->packet_callback = callback; + packet->callback_data = callback_data; + memcpy((unsigned char *) packet + sizeof(struct ipw_tx_packet), data, + length); + + send_packet(hw, PRIO_DATA, packet); + return 0; +} + +static int set_control_line(struct ipw_hardware *hw, int prio, + unsigned int channel_idx, int line, int state) +{ + struct ipw_control_packet *packet; + int protocolid = TL_PROTOCOLID_COM_CTRL; + + if (prio == PRIO_SETUP) + protocolid = TL_PROTOCOLID_SETUP; + + packet = alloc_ctrl_packet(sizeof(struct ipw_control_packet), + (channel_idx + 1), protocolid, line); + if (!packet) + return -ENOMEM; + packet->header.length = sizeof(struct ipw_control_packet_body); + packet->body.value = (state == 0 ? 0 : 1); + send_packet(hw, prio, &packet->header); + return 0; +} + + +static int set_DTR(struct ipw_hardware *hw, int priority, + unsigned int channel_idx, int state) +{ + if (state != 0) + hw->control_lines[channel_idx] |= IPW_CONTROL_LINE_DTR; + else + hw->control_lines[channel_idx] &= ~IPW_CONTROL_LINE_DTR; + + return set_control_line(hw, priority, channel_idx, COMCTRL_DTR, state); +} + +static int set_RTS(struct ipw_hardware *hw, int priority, + unsigned int channel_idx, int state) +{ + if (state != 0) + hw->control_lines[channel_idx] |= IPW_CONTROL_LINE_RTS; + else + hw->control_lines[channel_idx] &= ~IPW_CONTROL_LINE_RTS; + + return set_control_line(hw, priority, channel_idx, COMCTRL_RTS, state); +} + +int ipwireless_set_DTR(struct ipw_hardware *hw, unsigned int channel_idx, + int state) +{ + return set_DTR(hw, PRIO_CTRL, channel_idx, state); +} + +int ipwireless_set_RTS(struct ipw_hardware *hw, unsigned int channel_idx, + int state) +{ + return set_RTS(hw, PRIO_CTRL, channel_idx, state); +} + +struct ipw_setup_get_version_query_packet { + struct ipw_tx_packet header; + struct tl_setup_get_version_qry body; +}; + +struct ipw_setup_config_packet { + struct ipw_tx_packet header; + struct tl_setup_config_msg body; +}; + +struct ipw_setup_config_done_packet { + struct ipw_tx_packet header; + struct tl_setup_config_done_msg body; +}; + +struct ipw_setup_open_packet { + struct ipw_tx_packet header; + struct tl_setup_open_msg body; +}; + +struct ipw_setup_info_packet { + struct ipw_tx_packet header; + struct tl_setup_info_msg body; +}; + +struct ipw_setup_reboot_msg_ack { + struct ipw_tx_packet header; + struct TlSetupRebootMsgAck body; +}; + +/* This handles the actual initialization of the card */ +static void __handle_setup_get_version_rsp(struct ipw_hardware *hw) +{ + struct ipw_setup_config_packet *config_packet; + struct ipw_setup_config_done_packet *config_done_packet; + struct ipw_setup_open_packet *open_packet; + struct ipw_setup_info_packet *info_packet; + int port; + unsigned int channel_idx; + + /* generate config packet */ + for (port = 1; port <= NL_NUM_OF_ADDRESSES; port++) { + config_packet = alloc_ctrl_packet( + sizeof(struct ipw_setup_config_packet), + ADDR_SETUP_PROT, + TL_PROTOCOLID_SETUP, + TL_SETUP_SIGNO_CONFIG_MSG); + if (!config_packet) + goto exit_nomem; + config_packet->header.length = sizeof(struct tl_setup_config_msg); + config_packet->body.port_no = port; + config_packet->body.prio_data = PRIO_DATA; + config_packet->body.prio_ctrl = PRIO_CTRL; + send_packet(hw, PRIO_SETUP, &config_packet->header); + } + config_done_packet = alloc_ctrl_packet( + sizeof(struct ipw_setup_config_done_packet), + ADDR_SETUP_PROT, + TL_PROTOCOLID_SETUP, + TL_SETUP_SIGNO_CONFIG_DONE_MSG); + if (!config_done_packet) + goto exit_nomem; + config_done_packet->header.length = sizeof(struct tl_setup_config_done_msg); + send_packet(hw, PRIO_SETUP, &config_done_packet->header); + + /* generate open packet */ + for (port = 1; port <= NL_NUM_OF_ADDRESSES; port++) { + open_packet = alloc_ctrl_packet( + sizeof(struct ipw_setup_open_packet), + ADDR_SETUP_PROT, + TL_PROTOCOLID_SETUP, + TL_SETUP_SIGNO_OPEN_MSG); + if (!open_packet) + goto exit_nomem; + open_packet->header.length = sizeof(struct tl_setup_open_msg); + open_packet->body.port_no = port; + send_packet(hw, PRIO_SETUP, &open_packet->header); + } + for (channel_idx = 0; + channel_idx < NL_NUM_OF_ADDRESSES; channel_idx++) { + int ret; + + ret = set_DTR(hw, PRIO_SETUP, channel_idx, + (hw->control_lines[channel_idx] & + IPW_CONTROL_LINE_DTR) != 0); + if (ret) { + printk(KERN_ERR IPWIRELESS_PCCARD_NAME + ": error setting DTR (%d)\n", ret); + return; + } + + set_RTS(hw, PRIO_SETUP, channel_idx, + (hw->control_lines [channel_idx] & + IPW_CONTROL_LINE_RTS) != 0); + if (ret) { + printk(KERN_ERR IPWIRELESS_PCCARD_NAME + ": error setting RTS (%d)\n", ret); + return; + } + } + /* + * For NDIS we assume that we are using sync PPP frames, for COM async. + * This driver uses NDIS mode too. We don't bother with translation + * from async -> sync PPP. + */ + info_packet = alloc_ctrl_packet(sizeof(struct ipw_setup_info_packet), + ADDR_SETUP_PROT, + TL_PROTOCOLID_SETUP, + TL_SETUP_SIGNO_INFO_MSG); + if (!info_packet) + goto exit_nomem; + info_packet->header.length = sizeof(struct tl_setup_info_msg); + info_packet->body.driver_type = NDISWAN_DRIVER; + info_packet->body.major_version = NDISWAN_DRIVER_MAJOR_VERSION; + info_packet->body.minor_version = NDISWAN_DRIVER_MINOR_VERSION; + send_packet(hw, PRIO_SETUP, &info_packet->header); + + /* Initialization is now complete, so we clear the 'to_setup' flag */ + hw->to_setup = 0; + + return; + +exit_nomem: + printk(KERN_ERR IPWIRELESS_PCCARD_NAME + ": not enough memory to alloc control packet\n"); + hw->to_setup = -1; +} + +static void handle_setup_get_version_rsp(struct ipw_hardware *hw, + unsigned char vers_no) +{ + del_timer(&hw->setup_timer); + hw->initializing = 0; + printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": card is ready.\n"); + + if (vers_no == TL_SETUP_VERSION) + __handle_setup_get_version_rsp(hw); + else + printk(KERN_ERR IPWIRELESS_PCCARD_NAME + ": invalid hardware version no %u\n", + (unsigned int) vers_no); +} + +static void ipw_send_setup_packet(struct ipw_hardware *hw) +{ + struct ipw_setup_get_version_query_packet *ver_packet; + + ver_packet = alloc_ctrl_packet( + sizeof(struct ipw_setup_get_version_query_packet), + ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP, + TL_SETUP_SIGNO_GET_VERSION_QRY); + ver_packet->header.length = sizeof(struct tl_setup_get_version_qry); + + /* + * Response is handled in handle_received_SETUP_packet + */ + send_packet(hw, PRIO_SETUP, &ver_packet->header); +} + +static void handle_received_SETUP_packet(struct ipw_hardware *hw, + unsigned int address, + const unsigned char *data, int len, + int is_last) +{ + const union ipw_setup_rx_msg *rx_msg = (const union ipw_setup_rx_msg *) data; + + if (address != ADDR_SETUP_PROT) { + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": setup packet has bad address %d\n", address); + return; + } + + switch (rx_msg->sig_no) { + case TL_SETUP_SIGNO_GET_VERSION_RSP: + if (hw->to_setup) + handle_setup_get_version_rsp(hw, + rx_msg->version_rsp_msg.version); + break; + + case TL_SETUP_SIGNO_OPEN_MSG: + if (ipwireless_debug) { + unsigned int channel_idx = rx_msg->open_msg.port_no - 1; + + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": OPEN_MSG [channel %u] reply received\n", + channel_idx); + } + break; + + case TL_SETUP_SIGNO_INFO_MSG_ACK: + if (ipwireless_debug) + printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME + ": card successfully configured as NDISWAN\n"); + break; + + case TL_SETUP_SIGNO_REBOOT_MSG: + if (hw->to_setup) + printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME + ": Setup not completed - ignoring reboot msg\n"); + else { + struct ipw_setup_reboot_msg_ack *packet; + + printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME + ": Acknowledging REBOOT message\n"); + packet = alloc_ctrl_packet( + sizeof(struct ipw_setup_reboot_msg_ack), + ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP, + TL_SETUP_SIGNO_REBOOT_MSG_ACK); + packet->header.length = + sizeof(struct TlSetupRebootMsgAck); + send_packet(hw, PRIO_SETUP, &packet->header); + if (hw->reboot_callback) + hw->reboot_callback(hw->reboot_callback_data); + } + break; + + default: + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": unknown setup message %u received\n", + (unsigned int) rx_msg->sig_no); + } +} + +static void do_close_hardware(struct ipw_hardware *hw) +{ + unsigned int irqn; + + if (hw->hw_version == HW_VERSION_1) { + /* Disable TX and RX interrupts. */ + outw(0, hw->base_port + IOIER); + + /* Acknowledge any outstanding interrupt requests */ + irqn = inw(hw->base_port + IOIR); + if (irqn & IR_TXINTR) + outw(IR_TXINTR, hw->base_port + IOIR); + if (irqn & IR_RXINTR) + outw(IR_RXINTR, hw->base_port + IOIR); + + synchronize_irq(hw->irq); + } +} + +struct ipw_hardware *ipwireless_hardware_create(void) +{ + int i; + struct ipw_hardware *hw = + kzalloc(sizeof(struct ipw_hardware), GFP_KERNEL); + + if (!hw) + return NULL; + + hw->irq = -1; + hw->initializing = 1; + hw->tx_ready = 1; + hw->rx_bytes_queued = 0; + hw->rx_pool_size = 0; + hw->last_memtx_serial = (unsigned short) 0xffff; + for (i = 0; i < NL_NUM_OF_PRIORITIES; i++) + INIT_LIST_HEAD(&hw->tx_queue[i]); + + INIT_LIST_HEAD(&hw->rx_queue); + INIT_LIST_HEAD(&hw->rx_pool); + spin_lock_init(&hw->lock); + tasklet_init(&hw->tasklet, ipwireless_do_tasklet, (unsigned long) hw); + INIT_WORK(&hw->work_rx, ipw_receive_data_work); + setup_timer(&hw->setup_timer, ipwireless_setup_timer, + (unsigned long) hw); + + return hw; +} + +void ipwireless_init_hardware_v1(struct ipw_hardware *hw, + unsigned int base_port, + void __iomem *attr_memory, + void __iomem *common_memory, + int is_v2_card, + void (*reboot_callback) (void *data), + void *reboot_callback_data) +{ + if (hw->removed) { + hw->removed = 0; + enable_irq(hw->irq); + } + hw->base_port = base_port; + hw->hw_version = (is_v2_card ? HW_VERSION_2 : HW_VERSION_1); + hw->ll_mtu = (hw->hw_version == HW_VERSION_1 ? LL_MTU_V1 : LL_MTU_V2); + hw->memregs_CCR = (struct MEMCCR __iomem *) + ((unsigned short __iomem *) attr_memory + 0x200); + hw->memory_info_regs = (struct MEMINFREG __iomem *) common_memory; + hw->memreg_tx = &hw->memory_info_regs->memreg_tx_new; + hw->reboot_callback = reboot_callback; + hw->reboot_callback_data = reboot_callback_data; +} + +void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw) +{ + hw->initializing = 1; + hw->init_loops = 0; + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": waiting for card to start up...\n"); + ipwireless_setup_timer((unsigned long) hw); +} + +static void ipwireless_setup_timer(unsigned long data) +{ + struct ipw_hardware *hw = (struct ipw_hardware *) data; + + hw->init_loops++; + + if (hw->init_loops == TL_SETUP_MAX_VERSION_QRY && + hw->hw_version == HW_VERSION_2 && + hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) { + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": failed to startup using TX2, trying TX\n"); + + hw->memreg_tx = &hw->memory_info_regs->memreg_tx_old; + hw->init_loops = 0; + } + /* Give up after a certain number of retries */ + if (hw->init_loops == TL_SETUP_MAX_VERSION_QRY) { + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": card failed to start up!\n"); + hw->initializing = 0; + } else { + /* Do not attempt to write to the board if it is not present. */ + if (is_card_present(hw)) { + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + hw->to_setup = 1; + hw->tx_ready = 1; + spin_unlock_irqrestore(&hw->lock, flags); + tasklet_schedule(&hw->tasklet); + } + + mod_timer(&hw->setup_timer, + jiffies + msecs_to_jiffies(TL_SETUP_VERSION_QRY_TMO)); + } +} + +/* + * Stop any interrupts from executing so that, once this function returns, + * other layers of the driver can be sure they won't get any more callbacks. + * Thus must be called on a proper process context. + */ +void ipwireless_stop_interrupts(struct ipw_hardware *hw) +{ + if (!hw->shutting_down) { + /* Tell everyone we are going down. */ + hw->shutting_down = 1; + del_timer(&hw->setup_timer); + + /* Prevent the hardware from sending any more interrupts */ + do_close_hardware(hw); + } +} + +void ipwireless_hardware_free(struct ipw_hardware *hw) +{ + int i; + struct ipw_rx_packet *rp, *rq; + struct ipw_tx_packet *tp, *tq; + + ipwireless_stop_interrupts(hw); + + flush_work_sync(&hw->work_rx); + + for (i = 0; i < NL_NUM_OF_ADDRESSES; i++) + if (hw->packet_assembler[i] != NULL) + kfree(hw->packet_assembler[i]); + + for (i = 0; i < NL_NUM_OF_PRIORITIES; i++) + list_for_each_entry_safe(tp, tq, &hw->tx_queue[i], queue) { + list_del(&tp->queue); + kfree(tp); + } + + list_for_each_entry_safe(rp, rq, &hw->rx_queue, queue) { + list_del(&rp->queue); + kfree(rp); + } + + list_for_each_entry_safe(rp, rq, &hw->rx_pool, queue) { + list_del(&rp->queue); + kfree(rp); + } + kfree(hw); +} + +/* + * Associate the specified network with this hardware, so it will receive events + * from it. + */ +void ipwireless_associate_network(struct ipw_hardware *hw, + struct ipw_network *network) +{ + hw->network = network; +} diff --git a/drivers/tty/ipwireless/hardware.h b/drivers/tty/ipwireless/hardware.h new file mode 100644 index 00000000000..90a8590e43b --- /dev/null +++ b/drivers/tty/ipwireless/hardware.h @@ -0,0 +1,62 @@ +/* + * IPWireless 3G PCMCIA Network Driver + * + * Original code + * by Stephen Blackheath <stephen@blacksapphire.com>, + * Ben Martel <benm@symmetric.co.nz> + * + * Copyrighted as follows: + * Copyright (C) 2004 by Symmetric Systems Ltd (NZ) + * + * Various driver changes and rewrites, port to new kernels + * Copyright (C) 2006-2007 Jiri Kosina + * + * Misc code cleanups and updates + * Copyright (C) 2007 David Sterba + */ + +#ifndef _IPWIRELESS_CS_HARDWARE_H_ +#define _IPWIRELESS_CS_HARDWARE_H_ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/interrupt.h> + +#define IPW_CONTROL_LINE_CTS 0x0001 +#define IPW_CONTROL_LINE_DCD 0x0002 +#define IPW_CONTROL_LINE_DSR 0x0004 +#define IPW_CONTROL_LINE_RI 0x0008 +#define IPW_CONTROL_LINE_DTR 0x0010 +#define IPW_CONTROL_LINE_RTS 0x0020 + +struct ipw_hardware; +struct ipw_network; + +struct ipw_hardware *ipwireless_hardware_create(void); +void ipwireless_hardware_free(struct ipw_hardware *hw); +irqreturn_t ipwireless_interrupt(int irq, void *dev_id); +int ipwireless_set_DTR(struct ipw_hardware *hw, unsigned int channel_idx, + int state); +int ipwireless_set_RTS(struct ipw_hardware *hw, unsigned int channel_idx, + int state); +int ipwireless_send_packet(struct ipw_hardware *hw, + unsigned int channel_idx, + const unsigned char *data, + unsigned int length, + void (*packet_sent_callback) (void *cb, + unsigned int length), + void *sent_cb_data); +void ipwireless_associate_network(struct ipw_hardware *hw, + struct ipw_network *net); +void ipwireless_stop_interrupts(struct ipw_hardware *hw); +void ipwireless_init_hardware_v1(struct ipw_hardware *hw, + unsigned int base_port, + void __iomem *attr_memory, + void __iomem *common_memory, + int is_v2_card, + void (*reboot_cb) (void *data), + void *reboot_cb_data); +void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw); +void ipwireless_sleep(unsigned int tenths); + +#endif diff --git a/drivers/tty/ipwireless/main.c b/drivers/tty/ipwireless/main.c new file mode 100644 index 00000000000..444155a305a --- /dev/null +++ b/drivers/tty/ipwireless/main.c @@ -0,0 +1,347 @@ +/* + * IPWireless 3G PCMCIA Network Driver + * + * Original code + * by Stephen Blackheath <stephen@blacksapphire.com>, + * Ben Martel <benm@symmetric.co.nz> + * + * Copyrighted as follows: + * Copyright (C) 2004 by Symmetric Systems Ltd (NZ) + * + * Various driver changes and rewrites, port to new kernels + * Copyright (C) 2006-2007 Jiri Kosina + * + * Misc code cleanups and updates + * Copyright (C) 2007 David Sterba + */ + +#include "hardware.h" +#include "network.h" +#include "main.h" +#include "tty.h" + +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> + +#include <pcmcia/cisreg.h> +#include <pcmcia/device_id.h> +#include <pcmcia/ss.h> +#include <pcmcia/ds.h> + +static struct pcmcia_device_id ipw_ids[] = { + PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0100), + PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0200), + PCMCIA_DEVICE_NULL +}; +MODULE_DEVICE_TABLE(pcmcia, ipw_ids); + +static void ipwireless_detach(struct pcmcia_device *link); + +/* + * Module params + */ +/* Debug mode: more verbose, print sent/recv bytes */ +int ipwireless_debug; +int ipwireless_loopback; +int ipwireless_out_queue = 10; + +module_param_named(debug, ipwireless_debug, int, 0); +module_param_named(loopback, ipwireless_loopback, int, 0); +module_param_named(out_queue, ipwireless_out_queue, int, 0); +MODULE_PARM_DESC(debug, "switch on debug messages [0]"); +MODULE_PARM_DESC(loopback, + "debug: enable ras_raw channel [0]"); +MODULE_PARM_DESC(out_queue, "debug: set size of outgoing PPP queue [10]"); + +/* Executes in process context. */ +static void signalled_reboot_work(struct work_struct *work_reboot) +{ + struct ipw_dev *ipw = container_of(work_reboot, struct ipw_dev, + work_reboot); + struct pcmcia_device *link = ipw->link; + pcmcia_reset_card(link->socket); +} + +static void signalled_reboot_callback(void *callback_data) +{ + struct ipw_dev *ipw = (struct ipw_dev *) callback_data; + + /* Delegate to process context. */ + schedule_work(&ipw->work_reboot); +} + +static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) +{ + struct ipw_dev *ipw = priv_data; + int ret; + + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + + /* 0x40 causes it to generate level mode interrupts. */ + /* 0x04 enables IREQ pin. */ + p_dev->config_index |= 0x44; + p_dev->io_lines = 16; + ret = pcmcia_request_io(p_dev); + if (ret) + return ret; + + if (!request_region(p_dev->resource[0]->start, + resource_size(p_dev->resource[0]), + IPWIRELESS_PCCARD_NAME)) { + ret = -EBUSY; + goto exit; + } + + p_dev->resource[2]->flags |= + WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE; + + ret = pcmcia_request_window(p_dev, p_dev->resource[2], 0); + if (ret != 0) + goto exit1; + + ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr); + if (ret != 0) + goto exit1; + + ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100; + + ipw->common_memory = ioremap(p_dev->resource[2]->start, + resource_size(p_dev->resource[2])); + if (!request_mem_region(p_dev->resource[2]->start, + resource_size(p_dev->resource[2]), + IPWIRELESS_PCCARD_NAME)) { + ret = -EBUSY; + goto exit2; + } + + p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | + WIN_ENABLE; + p_dev->resource[3]->end = 0; /* this used to be 0x1000 */ + ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0); + if (ret != 0) + goto exit3; + + ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0); + if (ret != 0) + goto exit3; + + ipw->attr_memory = ioremap(p_dev->resource[3]->start, + resource_size(p_dev->resource[3])); + if (!request_mem_region(p_dev->resource[3]->start, + resource_size(p_dev->resource[3]), + IPWIRELESS_PCCARD_NAME)) { + ret = -EBUSY; + goto exit4; + } + + return 0; + +exit4: + iounmap(ipw->attr_memory); +exit3: + release_mem_region(p_dev->resource[2]->start, + resource_size(p_dev->resource[2])); +exit2: + iounmap(ipw->common_memory); +exit1: + release_region(p_dev->resource[0]->start, + resource_size(p_dev->resource[0])); +exit: + pcmcia_disable_device(p_dev); + return ret; +} + +static int config_ipwireless(struct ipw_dev *ipw) +{ + struct pcmcia_device *link = ipw->link; + int ret = 0; + + ipw->is_v2_card = 0; + link->config_flags |= CONF_AUTO_SET_IO | CONF_AUTO_SET_IOMEM | + CONF_ENABLE_IRQ; + + ret = pcmcia_loop_config(link, ipwireless_probe, ipw); + if (ret != 0) + return ret; + + INIT_WORK(&ipw->work_reboot, signalled_reboot_work); + + ipwireless_init_hardware_v1(ipw->hardware, link->resource[0]->start, + ipw->attr_memory, ipw->common_memory, + ipw->is_v2_card, signalled_reboot_callback, + ipw); + + ret = pcmcia_request_irq(link, ipwireless_interrupt); + if (ret != 0) + goto exit; + + printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Card type %s\n", + ipw->is_v2_card ? "V2/V3" : "V1"); + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": I/O ports %pR, irq %d\n", link->resource[0], + (unsigned int) link->irq); + if (ipw->attr_memory && ipw->common_memory) + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": attr memory %pR, common memory %pR\n", + link->resource[3], + link->resource[2]); + + ipw->network = ipwireless_network_create(ipw->hardware); + if (!ipw->network) + goto exit; + + ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network); + if (!ipw->tty) + goto exit; + + ipwireless_init_hardware_v2_v3(ipw->hardware); + + /* + * Do the RequestConfiguration last, because it enables interrupts. + * Then we don't get any interrupts before we're ready for them. + */ + ret = pcmcia_enable_device(link); + if (ret != 0) + goto exit; + + return 0; + +exit: + if (ipw->common_memory) { + release_mem_region(link->resource[2]->start, + resource_size(link->resource[2])); + iounmap(ipw->common_memory); + } + if (ipw->attr_memory) { + release_mem_region(link->resource[3]->start, + resource_size(link->resource[3])); + iounmap(ipw->attr_memory); + } + pcmcia_disable_device(link); + return -1; +} + +static void release_ipwireless(struct ipw_dev *ipw) +{ + release_region(ipw->link->resource[0]->start, + resource_size(ipw->link->resource[0])); + if (ipw->common_memory) { + release_mem_region(ipw->link->resource[2]->start, + resource_size(ipw->link->resource[2])); + iounmap(ipw->common_memory); + } + if (ipw->attr_memory) { + release_mem_region(ipw->link->resource[3]->start, + resource_size(ipw->link->resource[3])); + iounmap(ipw->attr_memory); + } + pcmcia_disable_device(ipw->link); +} + +/* + * ipwireless_attach() creates an "instance" of the driver, allocating + * local data structures for one device (one interface). The device + * is registered with Card Services. + * + * The pcmcia_device structure is initialized, but we don't actually + * configure the card at this point -- we wait until we receive a + * card insertion event. + */ +static int ipwireless_attach(struct pcmcia_device *link) +{ + struct ipw_dev *ipw; + int ret; + + ipw = kzalloc(sizeof(struct ipw_dev), GFP_KERNEL); + if (!ipw) + return -ENOMEM; + + ipw->link = link; + link->priv = ipw; + + ipw->hardware = ipwireless_hardware_create(); + if (!ipw->hardware) { + kfree(ipw); + return -ENOMEM; + } + /* RegisterClient will call config_ipwireless */ + + ret = config_ipwireless(ipw); + + if (ret != 0) { + ipwireless_detach(link); + return ret; + } + + return 0; +} + +/* + * This deletes a driver "instance". The device is de-registered with + * Card Services. If it has been released, all local data structures + * are freed. Otherwise, the structures will be freed when the device + * is released. + */ +static void ipwireless_detach(struct pcmcia_device *link) +{ + struct ipw_dev *ipw = link->priv; + + release_ipwireless(ipw); + + if (ipw->tty != NULL) + ipwireless_tty_free(ipw->tty); + if (ipw->network != NULL) + ipwireless_network_free(ipw->network); + if (ipw->hardware != NULL) + ipwireless_hardware_free(ipw->hardware); + kfree(ipw); +} + +static struct pcmcia_driver me = { + .owner = THIS_MODULE, + .probe = ipwireless_attach, + .remove = ipwireless_detach, + .name = IPWIRELESS_PCCARD_NAME, + .id_table = ipw_ids +}; + +/* + * Module insertion : initialisation of the module. + * Register the card with cardmgr... + */ +static int __init init_ipwireless(void) +{ + int ret; + + ret = ipwireless_tty_init(); + if (ret != 0) + return ret; + + ret = pcmcia_register_driver(&me); + if (ret != 0) + ipwireless_tty_release(); + + return ret; +} + +/* + * Module removal + */ +static void __exit exit_ipwireless(void) +{ + pcmcia_unregister_driver(&me); + ipwireless_tty_release(); +} + +module_init(init_ipwireless); +module_exit(exit_ipwireless); + +MODULE_AUTHOR(IPWIRELESS_PCMCIA_AUTHOR); +MODULE_DESCRIPTION(IPWIRELESS_PCCARD_NAME " " IPWIRELESS_PCMCIA_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/ipwireless/main.h b/drivers/tty/ipwireless/main.h new file mode 100644 index 00000000000..f2cbb116bcc --- /dev/null +++ b/drivers/tty/ipwireless/main.h @@ -0,0 +1,68 @@ +/* + * IPWireless 3G PCMCIA Network Driver + * + * Original code + * by Stephen Blackheath <stephen@blacksapphire.com>, + * Ben Martel <benm@symmetric.co.nz> + * + * Copyrighted as follows: + * Copyright (C) 2004 by Symmetric Systems Ltd (NZ) + * + * Various driver changes and rewrites, port to new kernels + * Copyright (C) 2006-2007 Jiri Kosina + * + * Misc code cleanups and updates + * Copyright (C) 2007 David Sterba + */ + +#ifndef _IPWIRELESS_CS_H_ +#define _IPWIRELESS_CS_H_ + +#include <linux/sched.h> +#include <linux/types.h> + +#include <pcmcia/cistpl.h> +#include <pcmcia/ds.h> + +#include "hardware.h" + +#define IPWIRELESS_PCCARD_NAME "ipwireless" +#define IPWIRELESS_PCMCIA_VERSION "1.1" +#define IPWIRELESS_PCMCIA_AUTHOR \ + "Stephen Blackheath, Ben Martel, Jiri Kosina and David Sterba" + +#define IPWIRELESS_TX_QUEUE_SIZE 262144 +#define IPWIRELESS_RX_QUEUE_SIZE 262144 + +#define IPWIRELESS_STATE_DEBUG + +struct ipw_hardware; +struct ipw_network; +struct ipw_tty; + +struct ipw_dev { + struct pcmcia_device *link; + int is_v2_card; + + void __iomem *attr_memory; + + void __iomem *common_memory; + + /* Reference to attribute memory, containing CIS data */ + void *attribute_memory; + + /* Hardware context */ + struct ipw_hardware *hardware; + /* Network layer context */ + struct ipw_network *network; + /* TTY device context */ + struct ipw_tty *tty; + struct work_struct work_reboot; +}; + +/* Module parametres */ +extern int ipwireless_debug; +extern int ipwireless_loopback; +extern int ipwireless_out_queue; + +#endif diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c new file mode 100644 index 00000000000..f7daeea598e --- /dev/null +++ b/drivers/tty/ipwireless/network.c @@ -0,0 +1,508 @@ +/* + * IPWireless 3G PCMCIA Network Driver + * + * Original code + * by Stephen Blackheath <stephen@blacksapphire.com>, + * Ben Martel <benm@symmetric.co.nz> + * + * Copyrighted as follows: + * Copyright (C) 2004 by Symmetric Systems Ltd (NZ) + * + * Various driver changes and rewrites, port to new kernels + * Copyright (C) 2006-2007 Jiri Kosina + * + * Misc code cleanups and updates + * Copyright (C) 2007 David Sterba + */ + +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/netdevice.h> +#include <linux/ppp_channel.h> +#include <linux/ppp_defs.h> +#include <linux/slab.h> +#include <linux/if_ppp.h> +#include <linux/skbuff.h> + +#include "network.h" +#include "hardware.h" +#include "main.h" +#include "tty.h" + +#define MAX_ASSOCIATED_TTYS 2 + +#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP) + +struct ipw_network { + /* Hardware context, used for calls to hardware layer. */ + struct ipw_hardware *hardware; + /* Context for kernel 'generic_ppp' functionality */ + struct ppp_channel *ppp_channel; + /* tty context connected with IPW console */ + struct ipw_tty *associated_ttys[NO_OF_IPW_CHANNELS][MAX_ASSOCIATED_TTYS]; + /* True if ppp needs waking up once we're ready to xmit */ + int ppp_blocked; + /* Number of packets queued up in hardware module. */ + int outgoing_packets_queued; + /* Spinlock to avoid interrupts during shutdown */ + spinlock_t lock; + struct mutex close_lock; + + /* PPP ioctl data, not actually used anywere */ + unsigned int flags; + unsigned int rbits; + u32 xaccm[8]; + u32 raccm; + int mru; + + int shutting_down; + unsigned int ras_control_lines; + + struct work_struct work_go_online; + struct work_struct work_go_offline; +}; + +static void notify_packet_sent(void *callback_data, unsigned int packet_length) +{ + struct ipw_network *network = callback_data; + unsigned long flags; + + spin_lock_irqsave(&network->lock, flags); + network->outgoing_packets_queued--; + if (network->ppp_channel != NULL) { + if (network->ppp_blocked) { + network->ppp_blocked = 0; + spin_unlock_irqrestore(&network->lock, flags); + ppp_output_wakeup(network->ppp_channel); + if (ipwireless_debug) + printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME + ": ppp unblocked\n"); + } else + spin_unlock_irqrestore(&network->lock, flags); + } else + spin_unlock_irqrestore(&network->lock, flags); +} + +/* + * Called by the ppp system when it has a packet to send to the hardware. + */ +static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel, + struct sk_buff *skb) +{ + struct ipw_network *network = ppp_channel->private; + unsigned long flags; + + spin_lock_irqsave(&network->lock, flags); + if (network->outgoing_packets_queued < ipwireless_out_queue) { + unsigned char *buf; + static unsigned char header[] = { + PPP_ALLSTATIONS, /* 0xff */ + PPP_UI, /* 0x03 */ + }; + int ret; + + network->outgoing_packets_queued++; + spin_unlock_irqrestore(&network->lock, flags); + + /* + * If we have the requested amount of headroom in the skb we + * were handed, then we can add the header efficiently. + */ + if (skb_headroom(skb) >= 2) { + memcpy(skb_push(skb, 2), header, 2); + ret = ipwireless_send_packet(network->hardware, + IPW_CHANNEL_RAS, skb->data, + skb->len, + notify_packet_sent, + network); + if (ret == -1) { + skb_pull(skb, 2); + return 0; + } + } else { + /* Otherwise (rarely) we do it inefficiently. */ + buf = kmalloc(skb->len + 2, GFP_ATOMIC); + if (!buf) + return 0; + memcpy(buf + 2, skb->data, skb->len); + memcpy(buf, header, 2); + ret = ipwireless_send_packet(network->hardware, + IPW_CHANNEL_RAS, buf, + skb->len + 2, + notify_packet_sent, + network); + kfree(buf); + if (ret == -1) + return 0; + } + kfree_skb(skb); + return 1; + } else { + /* + * Otherwise reject the packet, and flag that the ppp system + * needs to be unblocked once we are ready to send. + */ + network->ppp_blocked = 1; + spin_unlock_irqrestore(&network->lock, flags); + if (ipwireless_debug) + printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": ppp blocked\n"); + return 0; + } +} + +/* Handle an ioctl call that has come in via ppp. (copy of ppp_async_ioctl() */ +static int ipwireless_ppp_ioctl(struct ppp_channel *ppp_channel, + unsigned int cmd, unsigned long arg) +{ + struct ipw_network *network = ppp_channel->private; + int err, val; + u32 accm[8]; + int __user *user_arg = (int __user *) arg; + + err = -EFAULT; + switch (cmd) { + case PPPIOCGFLAGS: + val = network->flags | network->rbits; + if (put_user(val, user_arg)) + break; + err = 0; + break; + + case PPPIOCSFLAGS: + if (get_user(val, user_arg)) + break; + network->flags = val & ~SC_RCV_BITS; + network->rbits = val & SC_RCV_BITS; + err = 0; + break; + + case PPPIOCGASYNCMAP: + if (put_user(network->xaccm[0], user_arg)) + break; + err = 0; + break; + + case PPPIOCSASYNCMAP: + if (get_user(network->xaccm[0], user_arg)) + break; + err = 0; + break; + + case PPPIOCGRASYNCMAP: + if (put_user(network->raccm, user_arg)) + break; + err = 0; + break; + + case PPPIOCSRASYNCMAP: + if (get_user(network->raccm, user_arg)) + break; + err = 0; + break; + + case PPPIOCGXASYNCMAP: + if (copy_to_user((void __user *) arg, network->xaccm, + sizeof(network->xaccm))) + break; + err = 0; + break; + + case PPPIOCSXASYNCMAP: + if (copy_from_user(accm, (void __user *) arg, sizeof(accm))) + break; + accm[2] &= ~0x40000000U; /* can't escape 0x5e */ + accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */ + memcpy(network->xaccm, accm, sizeof(network->xaccm)); + err = 0; + break; + + case PPPIOCGMRU: + if (put_user(network->mru, user_arg)) + break; + err = 0; + break; + + case PPPIOCSMRU: + if (get_user(val, user_arg)) + break; + if (val < PPP_MRU) + val = PPP_MRU; + network->mru = val; + err = 0; + break; + + default: + err = -ENOTTY; + } + + return err; +} + +static const struct ppp_channel_ops ipwireless_ppp_channel_ops = { + .start_xmit = ipwireless_ppp_start_xmit, + .ioctl = ipwireless_ppp_ioctl +}; + +static void do_go_online(struct work_struct *work_go_online) +{ + struct ipw_network *network = + container_of(work_go_online, struct ipw_network, + work_go_online); + unsigned long flags; + + spin_lock_irqsave(&network->lock, flags); + if (!network->ppp_channel) { + struct ppp_channel *channel; + + spin_unlock_irqrestore(&network->lock, flags); + channel = kzalloc(sizeof(struct ppp_channel), GFP_KERNEL); + if (!channel) { + printk(KERN_ERR IPWIRELESS_PCCARD_NAME + ": unable to allocate PPP channel\n"); + return; + } + channel->private = network; + channel->mtu = 16384; /* Wild guess */ + channel->hdrlen = 2; + channel->ops = &ipwireless_ppp_channel_ops; + + network->flags = 0; + network->rbits = 0; + network->mru = PPP_MRU; + memset(network->xaccm, 0, sizeof(network->xaccm)); + network->xaccm[0] = ~0U; + network->xaccm[3] = 0x60000000U; + network->raccm = ~0U; + ppp_register_channel(channel); + spin_lock_irqsave(&network->lock, flags); + network->ppp_channel = channel; + } + spin_unlock_irqrestore(&network->lock, flags); +} + +static void do_go_offline(struct work_struct *work_go_offline) +{ + struct ipw_network *network = + container_of(work_go_offline, struct ipw_network, + work_go_offline); + unsigned long flags; + + mutex_lock(&network->close_lock); + spin_lock_irqsave(&network->lock, flags); + if (network->ppp_channel != NULL) { + struct ppp_channel *channel = network->ppp_channel; + + network->ppp_channel = NULL; + spin_unlock_irqrestore(&network->lock, flags); + mutex_unlock(&network->close_lock); + ppp_unregister_channel(channel); + } else { + spin_unlock_irqrestore(&network->lock, flags); + mutex_unlock(&network->close_lock); + } +} + +void ipwireless_network_notify_control_line_change(struct ipw_network *network, + unsigned int channel_idx, + unsigned int control_lines, + unsigned int changed_mask) +{ + int i; + + if (channel_idx == IPW_CHANNEL_RAS) + network->ras_control_lines = control_lines; + + for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) { + struct ipw_tty *tty = + network->associated_ttys[channel_idx][i]; + + /* + * If it's associated with a tty (other than the RAS channel + * when we're online), then send the data to that tty. The RAS + * channel's data is handled above - it always goes through + * ppp_generic. + */ + if (tty) + ipwireless_tty_notify_control_line_change(tty, + channel_idx, + control_lines, + changed_mask); + } +} + +/* + * Some versions of firmware stuff packets with 0xff 0x03 (PPP: ALLSTATIONS, UI) + * bytes, which are required on sent packet, but not always present on received + * packets + */ +static struct sk_buff *ipw_packet_received_skb(unsigned char *data, + unsigned int length) +{ + struct sk_buff *skb; + + if (length > 2 && data[0] == PPP_ALLSTATIONS && data[1] == PPP_UI) { + length -= 2; + data += 2; + } + + skb = dev_alloc_skb(length + 4); + skb_reserve(skb, 2); + memcpy(skb_put(skb, length), data, length); + + return skb; +} + +void ipwireless_network_packet_received(struct ipw_network *network, + unsigned int channel_idx, + unsigned char *data, + unsigned int length) +{ + int i; + unsigned long flags; + + for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) { + struct ipw_tty *tty = network->associated_ttys[channel_idx][i]; + + if (!tty) + continue; + + /* + * If it's associated with a tty (other than the RAS channel + * when we're online), then send the data to that tty. The RAS + * channel's data is handled above - it always goes through + * ppp_generic. + */ + if (channel_idx == IPW_CHANNEL_RAS + && (network->ras_control_lines & + IPW_CONTROL_LINE_DCD) != 0 + && ipwireless_tty_is_modem(tty)) { + /* + * If data came in on the RAS channel and this tty is + * the modem tty, and we are online, then we send it to + * the PPP layer. + */ + mutex_lock(&network->close_lock); + spin_lock_irqsave(&network->lock, flags); + if (network->ppp_channel != NULL) { + struct sk_buff *skb; + + spin_unlock_irqrestore(&network->lock, + flags); + + /* Send the data to the ppp_generic module. */ + skb = ipw_packet_received_skb(data, length); + ppp_input(network->ppp_channel, skb); + } else + spin_unlock_irqrestore(&network->lock, + flags); + mutex_unlock(&network->close_lock); + } + /* Otherwise we send it out the tty. */ + else + ipwireless_tty_received(tty, data, length); + } +} + +struct ipw_network *ipwireless_network_create(struct ipw_hardware *hw) +{ + struct ipw_network *network = + kzalloc(sizeof(struct ipw_network), GFP_ATOMIC); + + if (!network) + return NULL; + + spin_lock_init(&network->lock); + mutex_init(&network->close_lock); + + network->hardware = hw; + + INIT_WORK(&network->work_go_online, do_go_online); + INIT_WORK(&network->work_go_offline, do_go_offline); + + ipwireless_associate_network(hw, network); + + return network; +} + +void ipwireless_network_free(struct ipw_network *network) +{ + network->shutting_down = 1; + + ipwireless_ppp_close(network); + flush_work_sync(&network->work_go_online); + flush_work_sync(&network->work_go_offline); + + ipwireless_stop_interrupts(network->hardware); + ipwireless_associate_network(network->hardware, NULL); + + kfree(network); +} + +void ipwireless_associate_network_tty(struct ipw_network *network, + unsigned int channel_idx, + struct ipw_tty *tty) +{ + int i; + + for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) + if (network->associated_ttys[channel_idx][i] == NULL) { + network->associated_ttys[channel_idx][i] = tty; + break; + } +} + +void ipwireless_disassociate_network_ttys(struct ipw_network *network, + unsigned int channel_idx) +{ + int i; + + for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) + network->associated_ttys[channel_idx][i] = NULL; +} + +void ipwireless_ppp_open(struct ipw_network *network) +{ + if (ipwireless_debug) + printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": online\n"); + schedule_work(&network->work_go_online); +} + +void ipwireless_ppp_close(struct ipw_network *network) +{ + /* Disconnect from the wireless network. */ + if (ipwireless_debug) + printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": offline\n"); + schedule_work(&network->work_go_offline); +} + +int ipwireless_ppp_channel_index(struct ipw_network *network) +{ + int ret = -1; + unsigned long flags; + + spin_lock_irqsave(&network->lock, flags); + if (network->ppp_channel != NULL) + ret = ppp_channel_index(network->ppp_channel); + spin_unlock_irqrestore(&network->lock, flags); + + return ret; +} + +int ipwireless_ppp_unit_number(struct ipw_network *network) +{ + int ret = -1; + unsigned long flags; + + spin_lock_irqsave(&network->lock, flags); + if (network->ppp_channel != NULL) + ret = ppp_unit_number(network->ppp_channel); + spin_unlock_irqrestore(&network->lock, flags); + + return ret; +} + +int ipwireless_ppp_mru(const struct ipw_network *network) +{ + return network->mru; +} diff --git a/drivers/tty/ipwireless/network.h b/drivers/tty/ipwireless/network.h new file mode 100644 index 00000000000..561f765b333 --- /dev/null +++ b/drivers/tty/ipwireless/network.h @@ -0,0 +1,53 @@ +/* + * IPWireless 3G PCMCIA Network Driver + * + * Original code + * by Stephen Blackheath <stephen@blacksapphire.com>, + * Ben Martel <benm@symmetric.co.nz> + * + * Copyrighted as follows: + * Copyright (C) 2004 by Symmetric Systems Ltd (NZ) + * + * Various driver changes and rewrites, port to new kernels + * Copyright (C) 2006-2007 Jiri Kosina + * + * Misc code cleanups and updates + * Copyright (C) 2007 David Sterba + */ + +#ifndef _IPWIRELESS_CS_NETWORK_H_ +#define _IPWIRELESS_CS_NETWORK_H_ + +#include <linux/types.h> + +struct ipw_network; +struct ipw_tty; +struct ipw_hardware; + +/* Definitions of the different channels on the PCMCIA UE */ +#define IPW_CHANNEL_RAS 0 +#define IPW_CHANNEL_DIALLER 1 +#define IPW_CHANNEL_CONSOLE 2 +#define NO_OF_IPW_CHANNELS 5 + +void ipwireless_network_notify_control_line_change(struct ipw_network *net, + unsigned int channel_idx, unsigned int control_lines, + unsigned int control_mask); +void ipwireless_network_packet_received(struct ipw_network *net, + unsigned int channel_idx, unsigned char *data, + unsigned int length); +struct ipw_network *ipwireless_network_create(struct ipw_hardware *hw); +void ipwireless_network_free(struct ipw_network *net); +void ipwireless_associate_network_tty(struct ipw_network *net, + unsigned int channel_idx, struct ipw_tty *tty); +void ipwireless_disassociate_network_ttys(struct ipw_network *net, + unsigned int channel_idx); + +void ipwireless_ppp_open(struct ipw_network *net); + +void ipwireless_ppp_close(struct ipw_network *net); +int ipwireless_ppp_channel_index(struct ipw_network *net); +int ipwireless_ppp_unit_number(struct ipw_network *net); +int ipwireless_ppp_mru(const struct ipw_network *net); + +#endif diff --git a/drivers/tty/ipwireless/setup_protocol.h b/drivers/tty/ipwireless/setup_protocol.h new file mode 100644 index 00000000000..9d6bcc77c73 --- /dev/null +++ b/drivers/tty/ipwireless/setup_protocol.h @@ -0,0 +1,108 @@ +/* + * IPWireless 3G PCMCIA Network Driver + * + * Original code + * by Stephen Blackheath <stephen@blacksapphire.com>, + * Ben Martel <benm@symmetric.co.nz> + * + * Copyrighted as follows: + * Copyright (C) 2004 by Symmetric Systems Ltd (NZ) + * + * Various driver changes and rewrites, port to new kernels + * Copyright (C) 2006-2007 Jiri Kosina + * + * Misc code cleanups and updates + * Copyright (C) 2007 David Sterba + */ + +#ifndef _IPWIRELESS_CS_SETUP_PROTOCOL_H_ +#define _IPWIRELESS_CS_SETUP_PROTOCOL_H_ + +/* Version of the setup protocol and transport protocols */ +#define TL_SETUP_VERSION 1 + +#define TL_SETUP_VERSION_QRY_TMO 1000 +#define TL_SETUP_MAX_VERSION_QRY 30 + +/* Message numbers 0-9 are obsoleted and must not be reused! */ +#define TL_SETUP_SIGNO_GET_VERSION_QRY 10 +#define TL_SETUP_SIGNO_GET_VERSION_RSP 11 +#define TL_SETUP_SIGNO_CONFIG_MSG 12 +#define TL_SETUP_SIGNO_CONFIG_DONE_MSG 13 +#define TL_SETUP_SIGNO_OPEN_MSG 14 +#define TL_SETUP_SIGNO_CLOSE_MSG 15 + +#define TL_SETUP_SIGNO_INFO_MSG 20 +#define TL_SETUP_SIGNO_INFO_MSG_ACK 21 + +#define TL_SETUP_SIGNO_REBOOT_MSG 22 +#define TL_SETUP_SIGNO_REBOOT_MSG_ACK 23 + +/* Synchronous start-messages */ +struct tl_setup_get_version_qry { + unsigned char sig_no; /* TL_SETUP_SIGNO_GET_VERSION_QRY */ +} __attribute__ ((__packed__)); + +struct tl_setup_get_version_rsp { + unsigned char sig_no; /* TL_SETUP_SIGNO_GET_VERSION_RSP */ + unsigned char version; /* TL_SETUP_VERSION */ +} __attribute__ ((__packed__)); + +struct tl_setup_config_msg { + unsigned char sig_no; /* TL_SETUP_SIGNO_CONFIG_MSG */ + unsigned char port_no; + unsigned char prio_data; + unsigned char prio_ctrl; +} __attribute__ ((__packed__)); + +struct tl_setup_config_done_msg { + unsigned char sig_no; /* TL_SETUP_SIGNO_CONFIG_DONE_MSG */ +} __attribute__ ((__packed__)); + +/* Asyncronous messages */ +struct tl_setup_open_msg { + unsigned char sig_no; /* TL_SETUP_SIGNO_OPEN_MSG */ + unsigned char port_no; +} __attribute__ ((__packed__)); + +struct tl_setup_close_msg { + unsigned char sig_no; /* TL_SETUP_SIGNO_CLOSE_MSG */ + unsigned char port_no; +} __attribute__ ((__packed__)); + +/* Driver type - for use in tl_setup_info_msg.driver_type */ +#define COMM_DRIVER 0 +#define NDISWAN_DRIVER 1 +#define NDISWAN_DRIVER_MAJOR_VERSION 2 +#define NDISWAN_DRIVER_MINOR_VERSION 0 + +/* + * It should not matter when this message comes over as we just store the + * results and send the ACK. + */ +struct tl_setup_info_msg { + unsigned char sig_no; /* TL_SETUP_SIGNO_INFO_MSG */ + unsigned char driver_type; + unsigned char major_version; + unsigned char minor_version; +} __attribute__ ((__packed__)); + +struct tl_setup_info_msgAck { + unsigned char sig_no; /* TL_SETUP_SIGNO_INFO_MSG_ACK */ +} __attribute__ ((__packed__)); + +struct TlSetupRebootMsgAck { + unsigned char sig_no; /* TL_SETUP_SIGNO_REBOOT_MSG_ACK */ +} __attribute__ ((__packed__)); + +/* Define a union of all the msgs that the driver can receive from the card.*/ +union ipw_setup_rx_msg { + unsigned char sig_no; + struct tl_setup_get_version_rsp version_rsp_msg; + struct tl_setup_open_msg open_msg; + struct tl_setup_close_msg close_msg; + struct tl_setup_info_msg InfoMsg; + struct tl_setup_info_msgAck info_msg_ack; +} __attribute__ ((__packed__)); + +#endif /* _IPWIRELESS_CS_SETUP_PROTOCOL_H_ */ diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c new file mode 100644 index 00000000000..ef92869502a --- /dev/null +++ b/drivers/tty/ipwireless/tty.c @@ -0,0 +1,679 @@ +/* + * IPWireless 3G PCMCIA Network Driver + * + * Original code + * by Stephen Blackheath <stephen@blacksapphire.com>, + * Ben Martel <benm@symmetric.co.nz> + * + * Copyrighted as follows: + * Copyright (C) 2004 by Symmetric Systems Ltd (NZ) + * + * Various driver changes and rewrites, port to new kernels + * Copyright (C) 2006-2007 Jiri Kosina + * + * Misc code cleanups and updates + * Copyright (C) 2007 David Sterba + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/ppp_defs.h> +#include <linux/if.h> +#include <linux/if_ppp.h> +#include <linux/sched.h> +#include <linux/serial.h> +#include <linux/slab.h> +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/tty_flip.h> +#include <linux/uaccess.h> + +#include "tty.h" +#include "network.h" +#include "hardware.h" +#include "main.h" + +#define IPWIRELESS_PCMCIA_START (0) +#define IPWIRELESS_PCMCIA_MINORS (24) +#define IPWIRELESS_PCMCIA_MINOR_RANGE (8) + +#define TTYTYPE_MODEM (0) +#define TTYTYPE_MONITOR (1) +#define TTYTYPE_RAS_RAW (2) + +struct ipw_tty { + int index; + struct ipw_hardware *hardware; + unsigned int channel_idx; + unsigned int secondary_channel_idx; + int tty_type; + struct ipw_network *network; + struct tty_struct *linux_tty; + int open_count; + unsigned int control_lines; + struct mutex ipw_tty_mutex; + int tx_bytes_queued; + int closing; +}; + +static struct ipw_tty *ttys[IPWIRELESS_PCMCIA_MINORS]; + +static struct tty_driver *ipw_tty_driver; + +static char *tty_type_name(int tty_type) +{ + static char *channel_names[] = { + "modem", + "monitor", + "RAS-raw" + }; + + return channel_names[tty_type]; +} + +static void report_registering(struct ipw_tty *tty) +{ + char *iftype = tty_type_name(tty->tty_type); + + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": registering %s device ttyIPWp%d\n", iftype, tty->index); +} + +static void report_deregistering(struct ipw_tty *tty) +{ + char *iftype = tty_type_name(tty->tty_type); + + printk(KERN_INFO IPWIRELESS_PCCARD_NAME + ": deregistering %s device ttyIPWp%d\n", iftype, + tty->index); +} + +static struct ipw_tty *get_tty(int minor) +{ + if (minor < ipw_tty_driver->minor_start + || minor >= ipw_tty_driver->minor_start + + IPWIRELESS_PCMCIA_MINORS) + return NULL; + else { + int minor_offset = minor - ipw_tty_driver->minor_start; + + /* + * The 'ras_raw' channel is only available when 'loopback' mode + * is enabled. + * Number of minor starts with 16 (_RANGE * _RAS_RAW). + */ + if (!ipwireless_loopback && + minor_offset >= + IPWIRELESS_PCMCIA_MINOR_RANGE * TTYTYPE_RAS_RAW) + return NULL; + + return ttys[minor_offset]; + } +} + +static int ipw_open(struct tty_struct *linux_tty, struct file *filp) +{ + int minor = linux_tty->index; + struct ipw_tty *tty = get_tty(minor); + + if (!tty) + return -ENODEV; + + mutex_lock(&tty->ipw_tty_mutex); + + if (tty->closing) { + mutex_unlock(&tty->ipw_tty_mutex); + return -ENODEV; + } + if (tty->open_count == 0) + tty->tx_bytes_queued = 0; + + tty->open_count++; + + tty->linux_tty = linux_tty; + linux_tty->driver_data = tty; + linux_tty->low_latency = 1; + + if (tty->tty_type == TTYTYPE_MODEM) + ipwireless_ppp_open(tty->network); + + mutex_unlock(&tty->ipw_tty_mutex); + + return 0; +} + +static void do_ipw_close(struct ipw_tty *tty) +{ + tty->open_count--; + + if (tty->open_count == 0) { + struct tty_struct *linux_tty = tty->linux_tty; + + if (linux_tty != NULL) { + tty->linux_tty = NULL; + linux_tty->driver_data = NULL; + + if (tty->tty_type == TTYTYPE_MODEM) + ipwireless_ppp_close(tty->network); + } + } +} + +static void ipw_hangup(struct tty_struct *linux_tty) +{ + struct ipw_tty *tty = linux_tty->driver_data; + + if (!tty) + return; + + mutex_lock(&tty->ipw_tty_mutex); + if (tty->open_count == 0) { + mutex_unlock(&tty->ipw_tty_mutex); + return; + } + + do_ipw_close(tty); + + mutex_unlock(&tty->ipw_tty_mutex); +} + +static void ipw_close(struct tty_struct *linux_tty, struct file *filp) +{ + ipw_hangup(linux_tty); +} + +/* Take data received from hardware, and send it out the tty */ +void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data, + unsigned int length) +{ + struct tty_struct *linux_tty; + int work = 0; + + mutex_lock(&tty->ipw_tty_mutex); + linux_tty = tty->linux_tty; + if (linux_tty == NULL) { + mutex_unlock(&tty->ipw_tty_mutex); + return; + } + + if (!tty->open_count) { + mutex_unlock(&tty->ipw_tty_mutex); + return; + } + mutex_unlock(&tty->ipw_tty_mutex); + + work = tty_insert_flip_string(linux_tty, data, length); + + if (work != length) + printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME + ": %d chars not inserted to flip buffer!\n", + length - work); + + /* + * This may sleep if ->low_latency is set + */ + if (work) + tty_flip_buffer_push(linux_tty); +} + +static void ipw_write_packet_sent_callback(void *callback_data, + unsigned int packet_length) +{ + struct ipw_tty *tty = callback_data; + + /* + * Packet has been sent, so we subtract the number of bytes from our + * tally of outstanding TX bytes. + */ + tty->tx_bytes_queued -= packet_length; +} + +static int ipw_write(struct tty_struct *linux_tty, + const unsigned char *buf, int count) +{ + struct ipw_tty *tty = linux_tty->driver_data; + int room, ret; + + if (!tty) + return -ENODEV; + + mutex_lock(&tty->ipw_tty_mutex); + if (!tty->open_count) { + mutex_unlock(&tty->ipw_tty_mutex); + return -EINVAL; + } + + room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued; + if (room < 0) + room = 0; + /* Don't allow caller to write any more than we have room for */ + if (count > room) + count = room; + + if (count == 0) { + mutex_unlock(&tty->ipw_tty_mutex); + return 0; + } + + ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS, + buf, count, + ipw_write_packet_sent_callback, tty); + if (ret == -1) { + mutex_unlock(&tty->ipw_tty_mutex); + return 0; + } + + tty->tx_bytes_queued += count; + mutex_unlock(&tty->ipw_tty_mutex); + + return count; +} + +static int ipw_write_room(struct tty_struct *linux_tty) +{ + struct ipw_tty *tty = linux_tty->driver_data; + int room; + + /* FIXME: Exactly how is the tty object locked here .. */ + if (!tty) + return -ENODEV; + + if (!tty->open_count) + return -EINVAL; + + room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued; + if (room < 0) + room = 0; + + return room; +} + +static int ipwireless_get_serial_info(struct ipw_tty *tty, + struct serial_struct __user *retinfo) +{ + struct serial_struct tmp; + + if (!retinfo) + return (-EFAULT); + + memset(&tmp, 0, sizeof(tmp)); + tmp.type = PORT_UNKNOWN; + tmp.line = tty->index; + tmp.port = 0; + tmp.irq = 0; + tmp.flags = 0; + tmp.baud_base = 115200; + tmp.close_delay = 0; + tmp.closing_wait = 0; + tmp.custom_divisor = 0; + tmp.hub6 = 0; + if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) + return -EFAULT; + + return 0; +} + +static int ipw_chars_in_buffer(struct tty_struct *linux_tty) +{ + struct ipw_tty *tty = linux_tty->driver_data; + + if (!tty) + return 0; + + if (!tty->open_count) + return 0; + + return tty->tx_bytes_queued; +} + +static int get_control_lines(struct ipw_tty *tty) +{ + unsigned int my = tty->control_lines; + unsigned int out = 0; + + if (my & IPW_CONTROL_LINE_RTS) + out |= TIOCM_RTS; + if (my & IPW_CONTROL_LINE_DTR) + out |= TIOCM_DTR; + if (my & IPW_CONTROL_LINE_CTS) + out |= TIOCM_CTS; + if (my & IPW_CONTROL_LINE_DSR) + out |= TIOCM_DSR; + if (my & IPW_CONTROL_LINE_DCD) + out |= TIOCM_CD; + + return out; +} + +static int set_control_lines(struct ipw_tty *tty, unsigned int set, + unsigned int clear) +{ + int ret; + + if (set & TIOCM_RTS) { + ret = ipwireless_set_RTS(tty->hardware, tty->channel_idx, 1); + if (ret) + return ret; + if (tty->secondary_channel_idx != -1) { + ret = ipwireless_set_RTS(tty->hardware, + tty->secondary_channel_idx, 1); + if (ret) + return ret; + } + } + if (set & TIOCM_DTR) { + ret = ipwireless_set_DTR(tty->hardware, tty->channel_idx, 1); + if (ret) + return ret; + if (tty->secondary_channel_idx != -1) { + ret = ipwireless_set_DTR(tty->hardware, + tty->secondary_channel_idx, 1); + if (ret) + return ret; + } + } + if (clear & TIOCM_RTS) { + ret = ipwireless_set_RTS(tty->hardware, tty->channel_idx, 0); + if (tty->secondary_channel_idx != -1) { + ret = ipwireless_set_RTS(tty->hardware, + tty->secondary_channel_idx, 0); + if (ret) + return ret; + } + } + if (clear & TIOCM_DTR) { + ret = ipwireless_set_DTR(tty->hardware, tty->channel_idx, 0); + if (tty->secondary_channel_idx != -1) { + ret = ipwireless_set_DTR(tty->hardware, + tty->secondary_channel_idx, 0); + if (ret) + return ret; + } + } + return 0; +} + +static int ipw_tiocmget(struct tty_struct *linux_tty) +{ + struct ipw_tty *tty = linux_tty->driver_data; + /* FIXME: Exactly how is the tty object locked here .. */ + + if (!tty) + return -ENODEV; + + if (!tty->open_count) + return -EINVAL; + + return get_control_lines(tty); +} + +static int +ipw_tiocmset(struct tty_struct *linux_tty, + unsigned int set, unsigned int clear) +{ + struct ipw_tty *tty = linux_tty->driver_data; + /* FIXME: Exactly how is the tty object locked here .. */ + + if (!tty) + return -ENODEV; + + if (!tty->open_count) + return -EINVAL; + + return set_control_lines(tty, set, clear); +} + +static int ipw_ioctl(struct tty_struct *linux_tty, + unsigned int cmd, unsigned long arg) +{ + struct ipw_tty *tty = linux_tty->driver_data; + + if (!tty) + return -ENODEV; + + if (!tty->open_count) + return -EINVAL; + + /* FIXME: Exactly how is the tty object locked here .. */ + + switch (cmd) { + case TIOCGSERIAL: + return ipwireless_get_serial_info(tty, (void __user *) arg); + + case TIOCSSERIAL: + return 0; /* Keeps the PCMCIA scripts happy. */ + } + + if (tty->tty_type == TTYTYPE_MODEM) { + switch (cmd) { + case PPPIOCGCHAN: + { + int chan = ipwireless_ppp_channel_index( + tty->network); + + if (chan < 0) + return -ENODEV; + if (put_user(chan, (int __user *) arg)) + return -EFAULT; + } + return 0; + + case PPPIOCGUNIT: + { + int unit = ipwireless_ppp_unit_number( + tty->network); + + if (unit < 0) + return -ENODEV; + if (put_user(unit, (int __user *) arg)) + return -EFAULT; + } + return 0; + + case FIONREAD: + { + int val = 0; + + if (put_user(val, (int __user *) arg)) + return -EFAULT; + } + return 0; + case TCFLSH: + return tty_perform_flush(linux_tty, arg); + } + } + return -ENOIOCTLCMD; +} + +static int add_tty(int j, + struct ipw_hardware *hardware, + struct ipw_network *network, int channel_idx, + int secondary_channel_idx, int tty_type) +{ + ttys[j] = kzalloc(sizeof(struct ipw_tty), GFP_KERNEL); + if (!ttys[j]) + return -ENOMEM; + ttys[j]->index = j; + ttys[j]->hardware = hardware; + ttys[j]->channel_idx = channel_idx; + ttys[j]->secondary_channel_idx = secondary_channel_idx; + ttys[j]->network = network; + ttys[j]->tty_type = tty_type; + mutex_init(&ttys[j]->ipw_tty_mutex); + + tty_register_device(ipw_tty_driver, j, NULL); + ipwireless_associate_network_tty(network, channel_idx, ttys[j]); + + if (secondary_channel_idx != -1) + ipwireless_associate_network_tty(network, + secondary_channel_idx, + ttys[j]); + if (get_tty(j + ipw_tty_driver->minor_start) == ttys[j]) + report_registering(ttys[j]); + return 0; +} + +struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware, + struct ipw_network *network) +{ + int i, j; + + for (i = 0; i < IPWIRELESS_PCMCIA_MINOR_RANGE; i++) { + int allfree = 1; + + for (j = i; j < IPWIRELESS_PCMCIA_MINORS; + j += IPWIRELESS_PCMCIA_MINOR_RANGE) + if (ttys[j] != NULL) { + allfree = 0; + break; + } + + if (allfree) { + j = i; + + if (add_tty(j, hardware, network, + IPW_CHANNEL_DIALLER, IPW_CHANNEL_RAS, + TTYTYPE_MODEM)) + return NULL; + + j += IPWIRELESS_PCMCIA_MINOR_RANGE; + if (add_tty(j, hardware, network, + IPW_CHANNEL_DIALLER, -1, + TTYTYPE_MONITOR)) + return NULL; + + j += IPWIRELESS_PCMCIA_MINOR_RANGE; + if (add_tty(j, hardware, network, + IPW_CHANNEL_RAS, -1, + TTYTYPE_RAS_RAW)) + return NULL; + + return ttys[i]; + } + } + return NULL; +} + +/* + * Must be called before ipwireless_network_free(). + */ +void ipwireless_tty_free(struct ipw_tty *tty) +{ + int j; + struct ipw_network *network = ttys[tty->index]->network; + + for (j = tty->index; j < IPWIRELESS_PCMCIA_MINORS; + j += IPWIRELESS_PCMCIA_MINOR_RANGE) { + struct ipw_tty *ttyj = ttys[j]; + + if (ttyj) { + mutex_lock(&ttyj->ipw_tty_mutex); + if (get_tty(j + ipw_tty_driver->minor_start) == ttyj) + report_deregistering(ttyj); + ttyj->closing = 1; + if (ttyj->linux_tty != NULL) { + mutex_unlock(&ttyj->ipw_tty_mutex); + tty_hangup(ttyj->linux_tty); + /* Wait till the tty_hangup has completed */ + flush_work_sync(&ttyj->linux_tty->hangup_work); + /* FIXME: Exactly how is the tty object locked here + against a parallel ioctl etc */ + mutex_lock(&ttyj->ipw_tty_mutex); + } + while (ttyj->open_count) + do_ipw_close(ttyj); + ipwireless_disassociate_network_ttys(network, + ttyj->channel_idx); + tty_unregister_device(ipw_tty_driver, j); + ttys[j] = NULL; + mutex_unlock(&ttyj->ipw_tty_mutex); + kfree(ttyj); + } + } +} + +static const struct tty_operations tty_ops = { + .open = ipw_open, + .close = ipw_close, + .hangup = ipw_hangup, + .write = ipw_write, + .write_room = ipw_write_room, + .ioctl = ipw_ioctl, + .chars_in_buffer = ipw_chars_in_buffer, + .tiocmget = ipw_tiocmget, + .tiocmset = ipw_tiocmset, +}; + +int ipwireless_tty_init(void) +{ + int result; + + ipw_tty_driver = alloc_tty_driver(IPWIRELESS_PCMCIA_MINORS); + if (!ipw_tty_driver) + return -ENOMEM; + + ipw_tty_driver->owner = THIS_MODULE; + ipw_tty_driver->driver_name = IPWIRELESS_PCCARD_NAME; + ipw_tty_driver->name = "ttyIPWp"; + ipw_tty_driver->major = 0; + ipw_tty_driver->minor_start = IPWIRELESS_PCMCIA_START; + ipw_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + ipw_tty_driver->subtype = SERIAL_TYPE_NORMAL; + ipw_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; + ipw_tty_driver->init_termios = tty_std_termios; + ipw_tty_driver->init_termios.c_cflag = + B9600 | CS8 | CREAD | HUPCL | CLOCAL; + ipw_tty_driver->init_termios.c_ispeed = 9600; + ipw_tty_driver->init_termios.c_ospeed = 9600; + tty_set_operations(ipw_tty_driver, &tty_ops); + result = tty_register_driver(ipw_tty_driver); + if (result) { + printk(KERN_ERR IPWIRELESS_PCCARD_NAME + ": failed to register tty driver\n"); + put_tty_driver(ipw_tty_driver); + return result; + } + + return 0; +} + +void ipwireless_tty_release(void) +{ + int ret; + + ret = tty_unregister_driver(ipw_tty_driver); + put_tty_driver(ipw_tty_driver); + if (ret != 0) + printk(KERN_ERR IPWIRELESS_PCCARD_NAME + ": tty_unregister_driver failed with code %d\n", ret); +} + +int ipwireless_tty_is_modem(struct ipw_tty *tty) +{ + return tty->tty_type == TTYTYPE_MODEM; +} + +void +ipwireless_tty_notify_control_line_change(struct ipw_tty *tty, + unsigned int channel_idx, + unsigned int control_lines, + unsigned int changed_mask) +{ + unsigned int old_control_lines = tty->control_lines; + + tty->control_lines = (tty->control_lines & ~changed_mask) + | (control_lines & changed_mask); + + /* + * If DCD is de-asserted, we close the tty so pppd can tell that we + * have gone offline. + */ + if ((old_control_lines & IPW_CONTROL_LINE_DCD) + && !(tty->control_lines & IPW_CONTROL_LINE_DCD) + && tty->linux_tty) { + tty_hangup(tty->linux_tty); + } +} + diff --git a/drivers/tty/ipwireless/tty.h b/drivers/tty/ipwireless/tty.h new file mode 100644 index 00000000000..747b2d63786 --- /dev/null +++ b/drivers/tty/ipwireless/tty.h @@ -0,0 +1,45 @@ +/* + * IPWireless 3G PCMCIA Network Driver + * + * Original code + * by Stephen Blackheath <stephen@blacksapphire.com>, + * Ben Martel <benm@symmetric.co.nz> + * + * Copyrighted as follows: + * Copyright (C) 2004 by Symmetric Systems Ltd (NZ) + * + * Various driver changes and rewrites, port to new kernels + * Copyright (C) 2006-2007 Jiri Kosina + * + * Misc code cleanups and updates + * Copyright (C) 2007 David Sterba + */ + +#ifndef _IPWIRELESS_CS_TTY_H_ +#define _IPWIRELESS_CS_TTY_H_ + +#include <linux/types.h> +#include <linux/sched.h> + +#include <pcmcia/cistpl.h> +#include <pcmcia/ds.h> + +struct ipw_tty; +struct ipw_network; +struct ipw_hardware; + +int ipwireless_tty_init(void); +void ipwireless_tty_release(void); + +struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hw, + struct ipw_network *net); +void ipwireless_tty_free(struct ipw_tty *tty); +void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data, + unsigned int length); +int ipwireless_tty_is_modem(struct ipw_tty *tty); +void ipwireless_tty_notify_control_line_change(struct ipw_tty *tty, + unsigned int channel_idx, + unsigned int control_lines, + unsigned int changed_mask); + +#endif diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c new file mode 100644 index 00000000000..db1cf9c328d --- /dev/null +++ b/drivers/tty/isicom.c @@ -0,0 +1,1736 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Original driver code supplied by Multi-Tech + * + * Changes + * 1/9/98 alan@lxorguk.ukuu.org.uk + * Merge to 2.0.x kernel tree + * Obtain and use official major/minors + * Loader switched to a misc device + * (fixed range check bug as a side effect) + * Printk clean up + * 9/12/98 alan@lxorguk.ukuu.org.uk + * Rough port to 2.1.x + * + * 10/6/99 sameer Merged the ISA and PCI drivers to + * a new unified driver. + * + * 3/9/99 sameer Added support for ISI4616 cards. + * + * 16/9/99 sameer We do not force RTS low anymore. + * This is to prevent the firmware + * from getting confused. + * + * 26/10/99 sameer Cosmetic changes:The driver now + * dumps the Port Count information + * along with I/O address and IRQ. + * + * 13/12/99 sameer Fixed the problem with IRQ sharing. + * + * 10/5/00 sameer Fixed isicom_shutdown_board() + * to not lower DTR on all the ports + * when the last port on the card is + * closed. + * + * 10/5/00 sameer Signal mask setup command added + * to isicom_setup_port and + * isicom_shutdown_port. + * + * 24/5/00 sameer The driver is now SMP aware. + * + * + * 27/11/00 Vinayak P Risbud Fixed the Driver Crash Problem + * + * + * 03/01/01 anil .s Added support for resetting the + * internal modems on ISI cards. + * + * 08/02/01 anil .s Upgraded the driver for kernel + * 2.4.x + * + * 11/04/01 Kevin Fixed firmware load problem with + * ISIHP-4X card + * + * 30/04/01 anil .s Fixed the remote login through + * ISI port problem. Now the link + * does not go down before password + * prompt. + * + * 03/05/01 anil .s Fixed the problem with IRQ sharing + * among ISI-PCI cards. + * + * 03/05/01 anil .s Added support to display the version + * info during insmod as well as module + * listing by lsmod. + * + * 10/05/01 anil .s Done the modifications to the source + * file and Install script so that the + * same installation can be used for + * 2.2.x and 2.4.x kernel. + * + * 06/06/01 anil .s Now we drop both dtr and rts during + * shutdown_port as well as raise them + * during isicom_config_port. + * + * 09/06/01 acme@conectiva.com.br use capable, not suser, do + * restore_flags on failure in + * isicom_send_break, verify put_user + * result + * + * 11/02/03 ranjeeth Added support for 230 Kbps and 460 Kbps + * Baud index extended to 21 + * + * 20/03/03 ranjeeth Made to work for Linux Advanced server. + * Taken care of license warning. + * + * 10/12/03 Ravindra Made to work for Fedora Core 1 of + * Red Hat Distribution + * + * 06/01/05 Alan Cox Merged the ISI and base kernel strands + * into a single 2.6 driver + * + * *********************************************************** + * + * To use this driver you also need the support package. You + * can find this in RPM format on + * ftp://ftp.linux.org.uk/pub/linux/alan + * + * You can find the original tools for this direct from Multitech + * ftp://ftp.multitech.com/ISI-Cards/ + * + * Having installed the cards the module options (/etc/modprobe.conf) + * + * options isicom io=card1,card2,card3,card4 irq=card1,card2,card3,card4 + * + * Omit those entries for boards you don't have installed. + * + * TODO + * Merge testing + * 64-bit verification + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/firmware.h> +#include <linux/kernel.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/termios.h> +#include <linux/fs.h> +#include <linux/sched.h> +#include <linux/serial.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/delay.h> +#include <linux/ioport.h> +#include <linux/slab.h> + +#include <linux/uaccess.h> +#include <linux/io.h> +#include <asm/system.h> + +#include <linux/pci.h> + +#include <linux/isicom.h> + +#define InterruptTheCard(base) outw(0, (base) + 0xc) +#define ClearInterrupt(base) inw((base) + 0x0a) + +#ifdef DEBUG +#define isicom_paranoia_check(a, b, c) __isicom_paranoia_check((a), (b), (c)) +#else +#define isicom_paranoia_check(a, b, c) 0 +#endif + +static int isicom_probe(struct pci_dev *, const struct pci_device_id *); +static void __devexit isicom_remove(struct pci_dev *); + +static struct pci_device_id isicom_pci_tbl[] = { + { PCI_DEVICE(VENDOR_ID, 0x2028) }, + { PCI_DEVICE(VENDOR_ID, 0x2051) }, + { PCI_DEVICE(VENDOR_ID, 0x2052) }, + { PCI_DEVICE(VENDOR_ID, 0x2053) }, + { PCI_DEVICE(VENDOR_ID, 0x2054) }, + { PCI_DEVICE(VENDOR_ID, 0x2055) }, + { PCI_DEVICE(VENDOR_ID, 0x2056) }, + { PCI_DEVICE(VENDOR_ID, 0x2057) }, + { PCI_DEVICE(VENDOR_ID, 0x2058) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, isicom_pci_tbl); + +static struct pci_driver isicom_driver = { + .name = "isicom", + .id_table = isicom_pci_tbl, + .probe = isicom_probe, + .remove = __devexit_p(isicom_remove) +}; + +static int prev_card = 3; /* start servicing isi_card[0] */ +static struct tty_driver *isicom_normal; + +static void isicom_tx(unsigned long _data); +static void isicom_start(struct tty_struct *tty); + +static DEFINE_TIMER(tx, isicom_tx, 0, 0); + +/* baud index mappings from linux defns to isi */ + +static signed char linuxb_to_isib[] = { + -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 13, 15, 16, 17, 18, 19, 20, 21 +}; + +struct isi_board { + unsigned long base; + int irq; + unsigned char port_count; + unsigned short status; + unsigned short port_status; /* each bit for each port */ + unsigned short shift_count; + struct isi_port *ports; + signed char count; + spinlock_t card_lock; /* Card wide lock 11/5/00 -sameer */ + unsigned long flags; + unsigned int index; +}; + +struct isi_port { + unsigned short magic; + struct tty_port port; + u16 channel; + u16 status; + struct isi_board *card; + unsigned char *xmit_buf; + int xmit_head; + int xmit_tail; + int xmit_cnt; +}; + +static struct isi_board isi_card[BOARD_COUNT]; +static struct isi_port isi_ports[PORT_COUNT]; + +/* + * Locking functions for card level locking. We need to own both + * the kernel lock for the card and have the card in a position that + * it wants to talk. + */ + +static inline int WaitTillCardIsFree(unsigned long base) +{ + unsigned int count = 0; + unsigned int a = in_atomic(); /* do we run under spinlock? */ + + while (!(inw(base + 0xe) & 0x1) && count++ < 100) + if (a) + mdelay(1); + else + msleep(1); + + return !(inw(base + 0xe) & 0x1); +} + +static int lock_card(struct isi_board *card) +{ + unsigned long base = card->base; + unsigned int retries, a; + + for (retries = 0; retries < 10; retries++) { + spin_lock_irqsave(&card->card_lock, card->flags); + for (a = 0; a < 10; a++) { + if (inw(base + 0xe) & 0x1) + return 1; + udelay(10); + } + spin_unlock_irqrestore(&card->card_lock, card->flags); + msleep(10); + } + pr_warning("Failed to lock Card (0x%lx)\n", card->base); + + return 0; /* Failed to acquire the card! */ +} + +static void unlock_card(struct isi_board *card) +{ + spin_unlock_irqrestore(&card->card_lock, card->flags); +} + +/* + * ISI Card specific ops ... + */ + +/* card->lock HAS to be held */ +static void raise_dtr(struct isi_port *port) +{ + struct isi_board *card = port->card; + unsigned long base = card->base; + u16 channel = port->channel; + + if (WaitTillCardIsFree(base)) + return; + + outw(0x8000 | (channel << card->shift_count) | 0x02, base); + outw(0x0504, base); + InterruptTheCard(base); + port->status |= ISI_DTR; +} + +/* card->lock HAS to be held */ +static inline void drop_dtr(struct isi_port *port) +{ + struct isi_board *card = port->card; + unsigned long base = card->base; + u16 channel = port->channel; + + if (WaitTillCardIsFree(base)) + return; + + outw(0x8000 | (channel << card->shift_count) | 0x02, base); + outw(0x0404, base); + InterruptTheCard(base); + port->status &= ~ISI_DTR; +} + +/* card->lock HAS to be held */ +static inline void raise_rts(struct isi_port *port) +{ + struct isi_board *card = port->card; + unsigned long base = card->base; + u16 channel = port->channel; + + if (WaitTillCardIsFree(base)) + return; + + outw(0x8000 | (channel << card->shift_count) | 0x02, base); + outw(0x0a04, base); + InterruptTheCard(base); + port->status |= ISI_RTS; +} + +/* card->lock HAS to be held */ +static inline void drop_rts(struct isi_port *port) +{ + struct isi_board *card = port->card; + unsigned long base = card->base; + u16 channel = port->channel; + + if (WaitTillCardIsFree(base)) + return; + + outw(0x8000 | (channel << card->shift_count) | 0x02, base); + outw(0x0804, base); + InterruptTheCard(base); + port->status &= ~ISI_RTS; +} + +/* card->lock MUST NOT be held */ + +static void isicom_dtr_rts(struct tty_port *port, int on) +{ + struct isi_port *ip = container_of(port, struct isi_port, port); + struct isi_board *card = ip->card; + unsigned long base = card->base; + u16 channel = ip->channel; + + if (!lock_card(card)) + return; + + if (on) { + outw(0x8000 | (channel << card->shift_count) | 0x02, base); + outw(0x0f04, base); + InterruptTheCard(base); + ip->status |= (ISI_DTR | ISI_RTS); + } else { + outw(0x8000 | (channel << card->shift_count) | 0x02, base); + outw(0x0C04, base); + InterruptTheCard(base); + ip->status &= ~(ISI_DTR | ISI_RTS); + } + unlock_card(card); +} + +/* card->lock HAS to be held */ +static void drop_dtr_rts(struct isi_port *port) +{ + struct isi_board *card = port->card; + unsigned long base = card->base; + u16 channel = port->channel; + + if (WaitTillCardIsFree(base)) + return; + + outw(0x8000 | (channel << card->shift_count) | 0x02, base); + outw(0x0c04, base); + InterruptTheCard(base); + port->status &= ~(ISI_RTS | ISI_DTR); +} + +/* + * ISICOM Driver specific routines ... + * + */ + +static inline int __isicom_paranoia_check(struct isi_port const *port, + char *name, const char *routine) +{ + if (!port) { + pr_warning("Warning: bad isicom magic for dev %s in %s.\n", + name, routine); + return 1; + } + if (port->magic != ISICOM_MAGIC) { + pr_warning("Warning: NULL isicom port for dev %s in %s.\n", + name, routine); + return 1; + } + + return 0; +} + +/* + * Transmitter. + * + * We shovel data into the card buffers on a regular basis. The card + * will do the rest of the work for us. + */ + +static void isicom_tx(unsigned long _data) +{ + unsigned long flags, base; + unsigned int retries; + short count = (BOARD_COUNT-1), card; + short txcount, wrd, residue, word_count, cnt; + struct isi_port *port; + struct tty_struct *tty; + + /* find next active board */ + card = (prev_card + 1) & 0x0003; + while (count-- > 0) { + if (isi_card[card].status & BOARD_ACTIVE) + break; + card = (card + 1) & 0x0003; + } + if (!(isi_card[card].status & BOARD_ACTIVE)) + goto sched_again; + + prev_card = card; + + count = isi_card[card].port_count; + port = isi_card[card].ports; + base = isi_card[card].base; + + spin_lock_irqsave(&isi_card[card].card_lock, flags); + for (retries = 0; retries < 100; retries++) { + if (inw(base + 0xe) & 0x1) + break; + udelay(2); + } + if (retries >= 100) + goto unlock; + + tty = tty_port_tty_get(&port->port); + if (tty == NULL) + goto put_unlock; + + for (; count > 0; count--, port++) { + /* port not active or tx disabled to force flow control */ + if (!(port->port.flags & ASYNC_INITIALIZED) || + !(port->status & ISI_TXOK)) + continue; + + txcount = min_t(short, TX_SIZE, port->xmit_cnt); + if (txcount <= 0 || tty->stopped || tty->hw_stopped) + continue; + + if (!(inw(base + 0x02) & (1 << port->channel))) + continue; + + pr_debug("txing %d bytes, port%d.\n", + txcount, port->channel + 1); + outw((port->channel << isi_card[card].shift_count) | txcount, + base); + residue = NO; + wrd = 0; + while (1) { + cnt = min_t(int, txcount, (SERIAL_XMIT_SIZE + - port->xmit_tail)); + if (residue == YES) { + residue = NO; + if (cnt > 0) { + wrd |= (port->port.xmit_buf[port->xmit_tail] + << 8); + port->xmit_tail = (port->xmit_tail + 1) + & (SERIAL_XMIT_SIZE - 1); + port->xmit_cnt--; + txcount--; + cnt--; + outw(wrd, base); + } else { + outw(wrd, base); + break; + } + } + if (cnt <= 0) + break; + word_count = cnt >> 1; + outsw(base, port->port.xmit_buf+port->xmit_tail, word_count); + port->xmit_tail = (port->xmit_tail + + (word_count << 1)) & (SERIAL_XMIT_SIZE - 1); + txcount -= (word_count << 1); + port->xmit_cnt -= (word_count << 1); + if (cnt & 0x0001) { + residue = YES; + wrd = port->port.xmit_buf[port->xmit_tail]; + port->xmit_tail = (port->xmit_tail + 1) + & (SERIAL_XMIT_SIZE - 1); + port->xmit_cnt--; + txcount--; + } + } + + InterruptTheCard(base); + if (port->xmit_cnt <= 0) + port->status &= ~ISI_TXOK; + if (port->xmit_cnt <= WAKEUP_CHARS) + tty_wakeup(tty); + } + +put_unlock: + tty_kref_put(tty); +unlock: + spin_unlock_irqrestore(&isi_card[card].card_lock, flags); + /* schedule another tx for hopefully in about 10ms */ +sched_again: + mod_timer(&tx, jiffies + msecs_to_jiffies(10)); +} + +/* + * Main interrupt handler routine + */ + +static irqreturn_t isicom_interrupt(int irq, void *dev_id) +{ + struct isi_board *card = dev_id; + struct isi_port *port; + struct tty_struct *tty; + unsigned long base; + u16 header, word_count, count, channel; + short byte_count; + unsigned char *rp; + + if (!card || !(card->status & FIRMWARE_LOADED)) + return IRQ_NONE; + + base = card->base; + + /* did the card interrupt us? */ + if (!(inw(base + 0x0e) & 0x02)) + return IRQ_NONE; + + spin_lock(&card->card_lock); + + /* + * disable any interrupts from the PCI card and lower the + * interrupt line + */ + outw(0x8000, base+0x04); + ClearInterrupt(base); + + inw(base); /* get the dummy word out */ + header = inw(base); + channel = (header & 0x7800) >> card->shift_count; + byte_count = header & 0xff; + + if (channel + 1 > card->port_count) { + pr_warning("%s(0x%lx): %d(channel) > port_count.\n", + __func__, base, channel+1); + outw(0x0000, base+0x04); /* enable interrupts */ + spin_unlock(&card->card_lock); + return IRQ_HANDLED; + } + port = card->ports + channel; + if (!(port->port.flags & ASYNC_INITIALIZED)) { + outw(0x0000, base+0x04); /* enable interrupts */ + spin_unlock(&card->card_lock); + return IRQ_HANDLED; + } + + tty = tty_port_tty_get(&port->port); + if (tty == NULL) { + word_count = byte_count >> 1; + while (byte_count > 1) { + inw(base); + byte_count -= 2; + } + if (byte_count & 0x01) + inw(base); + outw(0x0000, base+0x04); /* enable interrupts */ + spin_unlock(&card->card_lock); + return IRQ_HANDLED; + } + + if (header & 0x8000) { /* Status Packet */ + header = inw(base); + switch (header & 0xff) { + case 0: /* Change in EIA signals */ + if (port->port.flags & ASYNC_CHECK_CD) { + if (port->status & ISI_DCD) { + if (!(header & ISI_DCD)) { + /* Carrier has been lost */ + pr_debug("%s: DCD->low.\n", + __func__); + port->status &= ~ISI_DCD; + tty_hangup(tty); + } + } else if (header & ISI_DCD) { + /* Carrier has been detected */ + pr_debug("%s: DCD->high.\n", + __func__); + port->status |= ISI_DCD; + wake_up_interruptible(&port->port.open_wait); + } + } else { + if (header & ISI_DCD) + port->status |= ISI_DCD; + else + port->status &= ~ISI_DCD; + } + + if (port->port.flags & ASYNC_CTS_FLOW) { + if (tty->hw_stopped) { + if (header & ISI_CTS) { + port->port.tty->hw_stopped = 0; + /* start tx ing */ + port->status |= (ISI_TXOK + | ISI_CTS); + tty_wakeup(tty); + } + } else if (!(header & ISI_CTS)) { + tty->hw_stopped = 1; + /* stop tx ing */ + port->status &= ~(ISI_TXOK | ISI_CTS); + } + } else { + if (header & ISI_CTS) + port->status |= ISI_CTS; + else + port->status &= ~ISI_CTS; + } + + if (header & ISI_DSR) + port->status |= ISI_DSR; + else + port->status &= ~ISI_DSR; + + if (header & ISI_RI) + port->status |= ISI_RI; + else + port->status &= ~ISI_RI; + + break; + + case 1: /* Received Break !!! */ + tty_insert_flip_char(tty, 0, TTY_BREAK); + if (port->port.flags & ASYNC_SAK) + do_SAK(tty); + tty_flip_buffer_push(tty); + break; + + case 2: /* Statistics */ + pr_debug("%s: stats!!!\n", __func__); + break; + + default: + pr_debug("%s: Unknown code in status packet.\n", + __func__); + break; + } + } else { /* Data Packet */ + + count = tty_prepare_flip_string(tty, &rp, byte_count & ~1); + pr_debug("%s: Can rx %d of %d bytes.\n", + __func__, count, byte_count); + word_count = count >> 1; + insw(base, rp, word_count); + byte_count -= (word_count << 1); + if (count & 0x0001) { + tty_insert_flip_char(tty, inw(base) & 0xff, + TTY_NORMAL); + byte_count -= 2; + } + if (byte_count > 0) { + pr_debug("%s(0x%lx:%d): Flip buffer overflow! dropping bytes...\n", + __func__, base, channel + 1); + /* drain out unread xtra data */ + while (byte_count > 0) { + inw(base); + byte_count -= 2; + } + } + tty_flip_buffer_push(tty); + } + outw(0x0000, base+0x04); /* enable interrupts */ + spin_unlock(&card->card_lock); + tty_kref_put(tty); + + return IRQ_HANDLED; +} + +static void isicom_config_port(struct tty_struct *tty) +{ + struct isi_port *port = tty->driver_data; + struct isi_board *card = port->card; + unsigned long baud; + unsigned long base = card->base; + u16 channel_setup, channel = port->channel, + shift_count = card->shift_count; + unsigned char flow_ctrl; + + /* FIXME: Switch to new tty baud API */ + baud = C_BAUD(tty); + if (baud & CBAUDEX) { + baud &= ~CBAUDEX; + + /* if CBAUDEX bit is on and the baud is set to either 50 or 75 + * then the card is programmed for 57.6Kbps or 115Kbps + * respectively. + */ + + /* 1,2,3,4 => 57.6, 115.2, 230, 460 kbps resp. */ + if (baud < 1 || baud > 4) + tty->termios->c_cflag &= ~CBAUDEX; + else + baud += 15; + } + if (baud == 15) { + + /* the ASYNC_SPD_HI and ASYNC_SPD_VHI options are set + * by the set_serial_info ioctl ... this is done by + * the 'setserial' utility. + */ + + if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) + baud++; /* 57.6 Kbps */ + if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) + baud += 2; /* 115 Kbps */ + if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) + baud += 3; /* 230 kbps*/ + if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) + baud += 4; /* 460 kbps*/ + } + if (linuxb_to_isib[baud] == -1) { + /* hang up */ + drop_dtr(port); + return; + } else + raise_dtr(port); + + if (WaitTillCardIsFree(base) == 0) { + outw(0x8000 | (channel << shift_count) | 0x03, base); + outw(linuxb_to_isib[baud] << 8 | 0x03, base); + channel_setup = 0; + switch (C_CSIZE(tty)) { + case CS5: + channel_setup |= ISICOM_CS5; + break; + case CS6: + channel_setup |= ISICOM_CS6; + break; + case CS7: + channel_setup |= ISICOM_CS7; + break; + case CS8: + channel_setup |= ISICOM_CS8; + break; + } + + if (C_CSTOPB(tty)) + channel_setup |= ISICOM_2SB; + if (C_PARENB(tty)) { + channel_setup |= ISICOM_EVPAR; + if (C_PARODD(tty)) + channel_setup |= ISICOM_ODPAR; + } + outw(channel_setup, base); + InterruptTheCard(base); + } + if (C_CLOCAL(tty)) + port->port.flags &= ~ASYNC_CHECK_CD; + else + port->port.flags |= ASYNC_CHECK_CD; + + /* flow control settings ...*/ + flow_ctrl = 0; + port->port.flags &= ~ASYNC_CTS_FLOW; + if (C_CRTSCTS(tty)) { + port->port.flags |= ASYNC_CTS_FLOW; + flow_ctrl |= ISICOM_CTSRTS; + } + if (I_IXON(tty)) + flow_ctrl |= ISICOM_RESPOND_XONXOFF; + if (I_IXOFF(tty)) + flow_ctrl |= ISICOM_INITIATE_XONXOFF; + + if (WaitTillCardIsFree(base) == 0) { + outw(0x8000 | (channel << shift_count) | 0x04, base); + outw(flow_ctrl << 8 | 0x05, base); + outw((STOP_CHAR(tty)) << 8 | (START_CHAR(tty)), base); + InterruptTheCard(base); + } + + /* rx enabled -> enable port for rx on the card */ + if (C_CREAD(tty)) { + card->port_status |= (1 << channel); + outw(card->port_status, base + 0x02); + } +} + +/* open et all */ + +static inline void isicom_setup_board(struct isi_board *bp) +{ + int channel; + struct isi_port *port; + + bp->count++; + if (!(bp->status & BOARD_INIT)) { + port = bp->ports; + for (channel = 0; channel < bp->port_count; channel++, port++) + drop_dtr_rts(port); + } + bp->status |= BOARD_ACTIVE | BOARD_INIT; +} + +/* Activate and thus setup board are protected from races against shutdown + by the tty_port mutex */ + +static int isicom_activate(struct tty_port *tport, struct tty_struct *tty) +{ + struct isi_port *port = container_of(tport, struct isi_port, port); + struct isi_board *card = port->card; + unsigned long flags; + + if (tty_port_alloc_xmit_buf(tport) < 0) + return -ENOMEM; + + spin_lock_irqsave(&card->card_lock, flags); + isicom_setup_board(card); + + port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; + + /* discard any residual data */ + if (WaitTillCardIsFree(card->base) == 0) { + outw(0x8000 | (port->channel << card->shift_count) | 0x02, + card->base); + outw(((ISICOM_KILLTX | ISICOM_KILLRX) << 8) | 0x06, card->base); + InterruptTheCard(card->base); + } + isicom_config_port(tty); + spin_unlock_irqrestore(&card->card_lock, flags); + + return 0; +} + +static int isicom_carrier_raised(struct tty_port *port) +{ + struct isi_port *ip = container_of(port, struct isi_port, port); + return (ip->status & ISI_DCD)?1 : 0; +} + +static struct tty_port *isicom_find_port(struct tty_struct *tty) +{ + struct isi_port *port; + struct isi_board *card; + unsigned int board; + int line = tty->index; + + if (line < 0 || line > PORT_COUNT-1) + return NULL; + board = BOARD(line); + card = &isi_card[board]; + + if (!(card->status & FIRMWARE_LOADED)) + return NULL; + + /* open on a port greater than the port count for the card !!! */ + if (line > ((board * 16) + card->port_count - 1)) + return NULL; + + port = &isi_ports[line]; + if (isicom_paranoia_check(port, tty->name, "isicom_open")) + return NULL; + + return &port->port; +} + +static int isicom_open(struct tty_struct *tty, struct file *filp) +{ + struct isi_port *port; + struct tty_port *tport; + + tport = isicom_find_port(tty); + if (tport == NULL) + return -ENODEV; + port = container_of(tport, struct isi_port, port); + + tty->driver_data = port; + return tty_port_open(tport, tty, filp); +} + +/* close et all */ + +/* card->lock HAS to be held */ +static void isicom_shutdown_port(struct isi_port *port) +{ + struct isi_board *card = port->card; + + if (--card->count < 0) { + pr_debug("%s: bad board(0x%lx) count %d.\n", + __func__, card->base, card->count); + card->count = 0; + } + /* last port was closed, shutdown that board too */ + if (!card->count) + card->status &= BOARD_ACTIVE; +} + +static void isicom_flush_buffer(struct tty_struct *tty) +{ + struct isi_port *port = tty->driver_data; + struct isi_board *card = port->card; + unsigned long flags; + + if (isicom_paranoia_check(port, tty->name, "isicom_flush_buffer")) + return; + + spin_lock_irqsave(&card->card_lock, flags); + port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; + spin_unlock_irqrestore(&card->card_lock, flags); + + tty_wakeup(tty); +} + +static void isicom_shutdown(struct tty_port *port) +{ + struct isi_port *ip = container_of(port, struct isi_port, port); + struct isi_board *card = ip->card; + unsigned long flags; + + /* indicate to the card that no more data can be received + on this port */ + spin_lock_irqsave(&card->card_lock, flags); + card->port_status &= ~(1 << ip->channel); + outw(card->port_status, card->base + 0x02); + isicom_shutdown_port(ip); + spin_unlock_irqrestore(&card->card_lock, flags); + tty_port_free_xmit_buf(port); +} + +static void isicom_close(struct tty_struct *tty, struct file *filp) +{ + struct isi_port *ip = tty->driver_data; + struct tty_port *port; + + if (ip == NULL) + return; + + port = &ip->port; + if (isicom_paranoia_check(ip, tty->name, "isicom_close")) + return; + tty_port_close(port, tty, filp); +} + +/* write et all */ +static int isicom_write(struct tty_struct *tty, const unsigned char *buf, + int count) +{ + struct isi_port *port = tty->driver_data; + struct isi_board *card = port->card; + unsigned long flags; + int cnt, total = 0; + + if (isicom_paranoia_check(port, tty->name, "isicom_write")) + return 0; + + spin_lock_irqsave(&card->card_lock, flags); + + while (1) { + cnt = min_t(int, count, min(SERIAL_XMIT_SIZE - port->xmit_cnt + - 1, SERIAL_XMIT_SIZE - port->xmit_head)); + if (cnt <= 0) + break; + + memcpy(port->port.xmit_buf + port->xmit_head, buf, cnt); + port->xmit_head = (port->xmit_head + cnt) & (SERIAL_XMIT_SIZE + - 1); + port->xmit_cnt += cnt; + buf += cnt; + count -= cnt; + total += cnt; + } + if (port->xmit_cnt && !tty->stopped && !tty->hw_stopped) + port->status |= ISI_TXOK; + spin_unlock_irqrestore(&card->card_lock, flags); + return total; +} + +/* put_char et all */ +static int isicom_put_char(struct tty_struct *tty, unsigned char ch) +{ + struct isi_port *port = tty->driver_data; + struct isi_board *card = port->card; + unsigned long flags; + + if (isicom_paranoia_check(port, tty->name, "isicom_put_char")) + return 0; + + spin_lock_irqsave(&card->card_lock, flags); + if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) { + spin_unlock_irqrestore(&card->card_lock, flags); + return 0; + } + + port->port.xmit_buf[port->xmit_head++] = ch; + port->xmit_head &= (SERIAL_XMIT_SIZE - 1); + port->xmit_cnt++; + spin_unlock_irqrestore(&card->card_lock, flags); + return 1; +} + +/* flush_chars et all */ +static void isicom_flush_chars(struct tty_struct *tty) +{ + struct isi_port *port = tty->driver_data; + + if (isicom_paranoia_check(port, tty->name, "isicom_flush_chars")) + return; + + if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || + !port->port.xmit_buf) + return; + + /* this tells the transmitter to consider this port for + data output to the card ... that's the best we can do. */ + port->status |= ISI_TXOK; +} + +/* write_room et all */ +static int isicom_write_room(struct tty_struct *tty) +{ + struct isi_port *port = tty->driver_data; + int free; + + if (isicom_paranoia_check(port, tty->name, "isicom_write_room")) + return 0; + + free = SERIAL_XMIT_SIZE - port->xmit_cnt - 1; + if (free < 0) + free = 0; + return free; +} + +/* chars_in_buffer et all */ +static int isicom_chars_in_buffer(struct tty_struct *tty) +{ + struct isi_port *port = tty->driver_data; + if (isicom_paranoia_check(port, tty->name, "isicom_chars_in_buffer")) + return 0; + return port->xmit_cnt; +} + +/* ioctl et all */ +static int isicom_send_break(struct tty_struct *tty, int length) +{ + struct isi_port *port = tty->driver_data; + struct isi_board *card = port->card; + unsigned long base = card->base; + + if (length == -1) + return -EOPNOTSUPP; + + if (!lock_card(card)) + return -EINVAL; + + outw(0x8000 | ((port->channel) << (card->shift_count)) | 0x3, base); + outw((length & 0xff) << 8 | 0x00, base); + outw((length & 0xff00), base); + InterruptTheCard(base); + + unlock_card(card); + return 0; +} + +static int isicom_tiocmget(struct tty_struct *tty) +{ + struct isi_port *port = tty->driver_data; + /* just send the port status */ + u16 status = port->status; + + if (isicom_paranoia_check(port, tty->name, "isicom_ioctl")) + return -ENODEV; + + return ((status & ISI_RTS) ? TIOCM_RTS : 0) | + ((status & ISI_DTR) ? TIOCM_DTR : 0) | + ((status & ISI_DCD) ? TIOCM_CAR : 0) | + ((status & ISI_DSR) ? TIOCM_DSR : 0) | + ((status & ISI_CTS) ? TIOCM_CTS : 0) | + ((status & ISI_RI ) ? TIOCM_RI : 0); +} + +static int isicom_tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear) +{ + struct isi_port *port = tty->driver_data; + unsigned long flags; + + if (isicom_paranoia_check(port, tty->name, "isicom_ioctl")) + return -ENODEV; + + spin_lock_irqsave(&port->card->card_lock, flags); + if (set & TIOCM_RTS) + raise_rts(port); + if (set & TIOCM_DTR) + raise_dtr(port); + + if (clear & TIOCM_RTS) + drop_rts(port); + if (clear & TIOCM_DTR) + drop_dtr(port); + spin_unlock_irqrestore(&port->card->card_lock, flags); + + return 0; +} + +static int isicom_set_serial_info(struct tty_struct *tty, + struct serial_struct __user *info) +{ + struct isi_port *port = tty->driver_data; + struct serial_struct newinfo; + int reconfig_port; + + if (copy_from_user(&newinfo, info, sizeof(newinfo))) + return -EFAULT; + + mutex_lock(&port->port.mutex); + reconfig_port = ((port->port.flags & ASYNC_SPD_MASK) != + (newinfo.flags & ASYNC_SPD_MASK)); + + if (!capable(CAP_SYS_ADMIN)) { + if ((newinfo.close_delay != port->port.close_delay) || + (newinfo.closing_wait != port->port.closing_wait) || + ((newinfo.flags & ~ASYNC_USR_MASK) != + (port->port.flags & ~ASYNC_USR_MASK))) { + mutex_unlock(&port->port.mutex); + return -EPERM; + } + port->port.flags = ((port->port.flags & ~ASYNC_USR_MASK) | + (newinfo.flags & ASYNC_USR_MASK)); + } else { + port->port.close_delay = newinfo.close_delay; + port->port.closing_wait = newinfo.closing_wait; + port->port.flags = ((port->port.flags & ~ASYNC_FLAGS) | + (newinfo.flags & ASYNC_FLAGS)); + } + if (reconfig_port) { + unsigned long flags; + spin_lock_irqsave(&port->card->card_lock, flags); + isicom_config_port(tty); + spin_unlock_irqrestore(&port->card->card_lock, flags); + } + mutex_unlock(&port->port.mutex); + return 0; +} + +static int isicom_get_serial_info(struct isi_port *port, + struct serial_struct __user *info) +{ + struct serial_struct out_info; + + mutex_lock(&port->port.mutex); + memset(&out_info, 0, sizeof(out_info)); +/* out_info.type = ? */ + out_info.line = port - isi_ports; + out_info.port = port->card->base; + out_info.irq = port->card->irq; + out_info.flags = port->port.flags; +/* out_info.baud_base = ? */ + out_info.close_delay = port->port.close_delay; + out_info.closing_wait = port->port.closing_wait; + mutex_unlock(&port->port.mutex); + if (copy_to_user(info, &out_info, sizeof(out_info))) + return -EFAULT; + return 0; +} + +static int isicom_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + struct isi_port *port = tty->driver_data; + void __user *argp = (void __user *)arg; + + if (isicom_paranoia_check(port, tty->name, "isicom_ioctl")) + return -ENODEV; + + switch (cmd) { + case TIOCGSERIAL: + return isicom_get_serial_info(port, argp); + + case TIOCSSERIAL: + return isicom_set_serial_info(tty, argp); + + default: + return -ENOIOCTLCMD; + } + return 0; +} + +/* set_termios et all */ +static void isicom_set_termios(struct tty_struct *tty, + struct ktermios *old_termios) +{ + struct isi_port *port = tty->driver_data; + unsigned long flags; + + if (isicom_paranoia_check(port, tty->name, "isicom_set_termios")) + return; + + if (tty->termios->c_cflag == old_termios->c_cflag && + tty->termios->c_iflag == old_termios->c_iflag) + return; + + spin_lock_irqsave(&port->card->card_lock, flags); + isicom_config_port(tty); + spin_unlock_irqrestore(&port->card->card_lock, flags); + + if ((old_termios->c_cflag & CRTSCTS) && + !(tty->termios->c_cflag & CRTSCTS)) { + tty->hw_stopped = 0; + isicom_start(tty); + } +} + +/* throttle et all */ +static void isicom_throttle(struct tty_struct *tty) +{ + struct isi_port *port = tty->driver_data; + struct isi_board *card = port->card; + + if (isicom_paranoia_check(port, tty->name, "isicom_throttle")) + return; + + /* tell the card that this port cannot handle any more data for now */ + card->port_status &= ~(1 << port->channel); + outw(card->port_status, card->base + 0x02); +} + +/* unthrottle et all */ +static void isicom_unthrottle(struct tty_struct *tty) +{ + struct isi_port *port = tty->driver_data; + struct isi_board *card = port->card; + + if (isicom_paranoia_check(port, tty->name, "isicom_unthrottle")) + return; + + /* tell the card that this port is ready to accept more data */ + card->port_status |= (1 << port->channel); + outw(card->port_status, card->base + 0x02); +} + +/* stop et all */ +static void isicom_stop(struct tty_struct *tty) +{ + struct isi_port *port = tty->driver_data; + + if (isicom_paranoia_check(port, tty->name, "isicom_stop")) + return; + + /* this tells the transmitter not to consider this port for + data output to the card. */ + port->status &= ~ISI_TXOK; +} + +/* start et all */ +static void isicom_start(struct tty_struct *tty) +{ + struct isi_port *port = tty->driver_data; + + if (isicom_paranoia_check(port, tty->name, "isicom_start")) + return; + + /* this tells the transmitter to consider this port for + data output to the card. */ + port->status |= ISI_TXOK; +} + +static void isicom_hangup(struct tty_struct *tty) +{ + struct isi_port *port = tty->driver_data; + + if (isicom_paranoia_check(port, tty->name, "isicom_hangup")) + return; + tty_port_hangup(&port->port); +} + + +/* + * Driver init and deinit functions + */ + +static const struct tty_operations isicom_ops = { + .open = isicom_open, + .close = isicom_close, + .write = isicom_write, + .put_char = isicom_put_char, + .flush_chars = isicom_flush_chars, + .write_room = isicom_write_room, + .chars_in_buffer = isicom_chars_in_buffer, + .ioctl = isicom_ioctl, + .set_termios = isicom_set_termios, + .throttle = isicom_throttle, + .unthrottle = isicom_unthrottle, + .stop = isicom_stop, + .start = isicom_start, + .hangup = isicom_hangup, + .flush_buffer = isicom_flush_buffer, + .tiocmget = isicom_tiocmget, + .tiocmset = isicom_tiocmset, + .break_ctl = isicom_send_break, +}; + +static const struct tty_port_operations isicom_port_ops = { + .carrier_raised = isicom_carrier_raised, + .dtr_rts = isicom_dtr_rts, + .activate = isicom_activate, + .shutdown = isicom_shutdown, +}; + +static int __devinit reset_card(struct pci_dev *pdev, + const unsigned int card, unsigned int *signature) +{ + struct isi_board *board = pci_get_drvdata(pdev); + unsigned long base = board->base; + unsigned int sig, portcount = 0; + int retval = 0; + + dev_dbg(&pdev->dev, "ISILoad:Resetting Card%d at 0x%lx\n", card + 1, + base); + + inw(base + 0x8); + + msleep(10); + + outw(0, base + 0x8); /* Reset */ + + msleep(1000); + + sig = inw(base + 0x4) & 0xff; + + if (sig != 0xa5 && sig != 0xbb && sig != 0xcc && sig != 0xdd && + sig != 0xee) { + dev_warn(&pdev->dev, "ISILoad:Card%u reset failure (Possible " + "bad I/O Port Address 0x%lx).\n", card + 1, base); + dev_dbg(&pdev->dev, "Sig=0x%x\n", sig); + retval = -EIO; + goto end; + } + + msleep(10); + + portcount = inw(base + 0x2); + if (!(inw(base + 0xe) & 0x1) || (portcount != 0 && portcount != 4 && + portcount != 8 && portcount != 16)) { + dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure.\n", + card + 1); + retval = -EIO; + goto end; + } + + switch (sig) { + case 0xa5: + case 0xbb: + case 0xdd: + board->port_count = (portcount == 4) ? 4 : 8; + board->shift_count = 12; + break; + case 0xcc: + case 0xee: + board->port_count = 16; + board->shift_count = 11; + break; + } + dev_info(&pdev->dev, "-Done\n"); + *signature = sig; + +end: + return retval; +} + +static int __devinit load_firmware(struct pci_dev *pdev, + const unsigned int index, const unsigned int signature) +{ + struct isi_board *board = pci_get_drvdata(pdev); + const struct firmware *fw; + unsigned long base = board->base; + unsigned int a; + u16 word_count, status; + int retval = -EIO; + char *name; + u8 *data; + + struct stframe { + u16 addr; + u16 count; + u8 data[0]; + } *frame; + + switch (signature) { + case 0xa5: + name = "isi608.bin"; + break; + case 0xbb: + name = "isi608em.bin"; + break; + case 0xcc: + name = "isi616em.bin"; + break; + case 0xdd: + name = "isi4608.bin"; + break; + case 0xee: + name = "isi4616.bin"; + break; + default: + dev_err(&pdev->dev, "Unknown signature.\n"); + goto end; + } + + retval = request_firmware(&fw, name, &pdev->dev); + if (retval) + goto end; + + retval = -EIO; + + for (frame = (struct stframe *)fw->data; + frame < (struct stframe *)(fw->data + fw->size); + frame = (struct stframe *)((u8 *)(frame + 1) + + frame->count)) { + if (WaitTillCardIsFree(base)) + goto errrelfw; + + outw(0xf0, base); /* start upload sequence */ + outw(0x00, base); + outw(frame->addr, base); /* lsb of address */ + + word_count = frame->count / 2 + frame->count % 2; + outw(word_count, base); + InterruptTheCard(base); + + udelay(100); /* 0x2f */ + + if (WaitTillCardIsFree(base)) + goto errrelfw; + + status = inw(base + 0x4); + if (status != 0) { + dev_warn(&pdev->dev, "Card%d rejected load header:\n" + "Address:0x%x\n" + "Count:0x%x\n" + "Status:0x%x\n", + index + 1, frame->addr, frame->count, status); + goto errrelfw; + } + outsw(base, frame->data, word_count); + + InterruptTheCard(base); + + udelay(50); /* 0x0f */ + + if (WaitTillCardIsFree(base)) + goto errrelfw; + + status = inw(base + 0x4); + if (status != 0) { + dev_err(&pdev->dev, "Card%d got out of sync.Card " + "Status:0x%x\n", index + 1, status); + goto errrelfw; + } + } + +/* XXX: should we test it by reading it back and comparing with original like + * in load firmware package? */ + for (frame = (struct stframe *)fw->data; + frame < (struct stframe *)(fw->data + fw->size); + frame = (struct stframe *)((u8 *)(frame + 1) + + frame->count)) { + if (WaitTillCardIsFree(base)) + goto errrelfw; + + outw(0xf1, base); /* start download sequence */ + outw(0x00, base); + outw(frame->addr, base); /* lsb of address */ + + word_count = (frame->count >> 1) + frame->count % 2; + outw(word_count + 1, base); + InterruptTheCard(base); + + udelay(50); /* 0xf */ + + if (WaitTillCardIsFree(base)) + goto errrelfw; + + status = inw(base + 0x4); + if (status != 0) { + dev_warn(&pdev->dev, "Card%d rejected verify header:\n" + "Address:0x%x\n" + "Count:0x%x\n" + "Status: 0x%x\n", + index + 1, frame->addr, frame->count, status); + goto errrelfw; + } + + data = kmalloc(word_count * 2, GFP_KERNEL); + if (data == NULL) { + dev_err(&pdev->dev, "Card%d, firmware upload " + "failed, not enough memory\n", index + 1); + goto errrelfw; + } + inw(base); + insw(base, data, word_count); + InterruptTheCard(base); + + for (a = 0; a < frame->count; a++) + if (data[a] != frame->data[a]) { + kfree(data); + dev_err(&pdev->dev, "Card%d, firmware upload " + "failed\n", index + 1); + goto errrelfw; + } + kfree(data); + + udelay(50); /* 0xf */ + + if (WaitTillCardIsFree(base)) + goto errrelfw; + + status = inw(base + 0x4); + if (status != 0) { + dev_err(&pdev->dev, "Card%d verify got out of sync. " + "Card Status:0x%x\n", index + 1, status); + goto errrelfw; + } + } + + /* xfer ctrl */ + if (WaitTillCardIsFree(base)) + goto errrelfw; + + outw(0xf2, base); + outw(0x800, base); + outw(0x0, base); + outw(0x0, base); + InterruptTheCard(base); + outw(0x0, base + 0x4); /* for ISI4608 cards */ + + board->status |= FIRMWARE_LOADED; + retval = 0; + +errrelfw: + release_firmware(fw); +end: + return retval; +} + +/* + * Insmod can set static symbols so keep these static + */ +static unsigned int card_count; + +static int __devinit isicom_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + unsigned int uninitialized_var(signature), index; + int retval = -EPERM; + struct isi_board *board = NULL; + + if (card_count >= BOARD_COUNT) + goto err; + + retval = pci_enable_device(pdev); + if (retval) { + dev_err(&pdev->dev, "failed to enable\n"); + goto err; + } + + dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device); + + /* allot the first empty slot in the array */ + for (index = 0; index < BOARD_COUNT; index++) { + if (isi_card[index].base == 0) { + board = &isi_card[index]; + break; + } + } + if (index == BOARD_COUNT) { + retval = -ENODEV; + goto err_disable; + } + + board->index = index; + board->base = pci_resource_start(pdev, 3); + board->irq = pdev->irq; + card_count++; + + pci_set_drvdata(pdev, board); + + retval = pci_request_region(pdev, 3, ISICOM_NAME); + if (retval) { + dev_err(&pdev->dev, "I/O Region 0x%lx-0x%lx is busy. Card%d " + "will be disabled.\n", board->base, board->base + 15, + index + 1); + retval = -EBUSY; + goto errdec; + } + + retval = request_irq(board->irq, isicom_interrupt, + IRQF_SHARED | IRQF_DISABLED, ISICOM_NAME, board); + if (retval < 0) { + dev_err(&pdev->dev, "Could not install handler at Irq %d. " + "Card%d will be disabled.\n", board->irq, index + 1); + goto errunrr; + } + + retval = reset_card(pdev, index, &signature); + if (retval < 0) + goto errunri; + + retval = load_firmware(pdev, index, signature); + if (retval < 0) + goto errunri; + + for (index = 0; index < board->port_count; index++) + tty_register_device(isicom_normal, board->index * 16 + index, + &pdev->dev); + + return 0; + +errunri: + free_irq(board->irq, board); +errunrr: + pci_release_region(pdev, 3); +errdec: + board->base = 0; + card_count--; +err_disable: + pci_disable_device(pdev); +err: + return retval; +} + +static void __devexit isicom_remove(struct pci_dev *pdev) +{ + struct isi_board *board = pci_get_drvdata(pdev); + unsigned int i; + + for (i = 0; i < board->port_count; i++) + tty_unregister_device(isicom_normal, board->index * 16 + i); + + free_irq(board->irq, board); + pci_release_region(pdev, 3); + board->base = 0; + card_count--; + pci_disable_device(pdev); +} + +static int __init isicom_init(void) +{ + int retval, idx, channel; + struct isi_port *port; + + for (idx = 0; idx < BOARD_COUNT; idx++) { + port = &isi_ports[idx * 16]; + isi_card[idx].ports = port; + spin_lock_init(&isi_card[idx].card_lock); + for (channel = 0; channel < 16; channel++, port++) { + tty_port_init(&port->port); + port->port.ops = &isicom_port_ops; + port->magic = ISICOM_MAGIC; + port->card = &isi_card[idx]; + port->channel = channel; + port->port.close_delay = 50 * HZ/100; + port->port.closing_wait = 3000 * HZ/100; + port->status = 0; + /* . . . */ + } + isi_card[idx].base = 0; + isi_card[idx].irq = 0; + } + + /* tty driver structure initialization */ + isicom_normal = alloc_tty_driver(PORT_COUNT); + if (!isicom_normal) { + retval = -ENOMEM; + goto error; + } + + isicom_normal->owner = THIS_MODULE; + isicom_normal->name = "ttyM"; + isicom_normal->major = ISICOM_NMAJOR; + isicom_normal->minor_start = 0; + isicom_normal->type = TTY_DRIVER_TYPE_SERIAL; + isicom_normal->subtype = SERIAL_TYPE_NORMAL; + isicom_normal->init_termios = tty_std_termios; + isicom_normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | + CLOCAL; + isicom_normal->flags = TTY_DRIVER_REAL_RAW | + TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK; + tty_set_operations(isicom_normal, &isicom_ops); + + retval = tty_register_driver(isicom_normal); + if (retval) { + pr_debug("Couldn't register the dialin driver\n"); + goto err_puttty; + } + + retval = pci_register_driver(&isicom_driver); + if (retval < 0) { + pr_err("Unable to register pci driver.\n"); + goto err_unrtty; + } + + mod_timer(&tx, jiffies + 1); + + return 0; +err_unrtty: + tty_unregister_driver(isicom_normal); +err_puttty: + put_tty_driver(isicom_normal); +error: + return retval; +} + +static void __exit isicom_exit(void) +{ + del_timer_sync(&tx); + + pci_unregister_driver(&isicom_driver); + tty_unregister_driver(isicom_normal); + put_tty_driver(isicom_normal); +} + +module_init(isicom_init); +module_exit(isicom_exit); + +MODULE_AUTHOR("MultiTech"); +MODULE_DESCRIPTION("Driver for the ISI series of cards by MultiTech"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("isi608.bin"); +MODULE_FIRMWARE("isi608em.bin"); +MODULE_FIRMWARE("isi616em.bin"); +MODULE_FIRMWARE("isi4608.bin"); +MODULE_FIRMWARE("isi4616.bin"); diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c new file mode 100644 index 00000000000..35b0c38590e --- /dev/null +++ b/drivers/tty/moxa.c @@ -0,0 +1,2092 @@ +/*****************************************************************************/ +/* + * moxa.c -- MOXA Intellio family multiport serial driver. + * + * Copyright (C) 1999-2000 Moxa Technologies (support@moxa.com). + * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> + * + * This code is loosely based on the Linux serial driver, written by + * Linus Torvalds, Theodore T'so and others. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/* + * MOXA Intellio Series Driver + * for : LINUX + * date : 1999/1/7 + * version : 5.1 + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/ioport.h> +#include <linux/errno.h> +#include <linux/firmware.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/timer.h> +#include <linux/interrupt.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/major.h> +#include <linux/string.h> +#include <linux/fcntl.h> +#include <linux/ptrace.h> +#include <linux/serial.h> +#include <linux/tty_driver.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/bitops.h> +#include <linux/slab.h> + +#include <asm/system.h> +#include <asm/io.h> +#include <asm/uaccess.h> + +#include "moxa.h" + +#define MOXA_VERSION "6.0k" + +#define MOXA_FW_HDRLEN 32 + +#define MOXAMAJOR 172 + +#define MAX_BOARDS 4 /* Don't change this value */ +#define MAX_PORTS_PER_BOARD 32 /* Don't change this value */ +#define MAX_PORTS (MAX_BOARDS * MAX_PORTS_PER_BOARD) + +#define MOXA_IS_320(brd) ((brd)->boardType == MOXA_BOARD_C320_ISA || \ + (brd)->boardType == MOXA_BOARD_C320_PCI) + +/* + * Define the Moxa PCI vendor and device IDs. + */ +#define MOXA_BUS_TYPE_ISA 0 +#define MOXA_BUS_TYPE_PCI 1 + +enum { + MOXA_BOARD_C218_PCI = 1, + MOXA_BOARD_C218_ISA, + MOXA_BOARD_C320_PCI, + MOXA_BOARD_C320_ISA, + MOXA_BOARD_CP204J, +}; + +static char *moxa_brdname[] = +{ + "C218 Turbo PCI series", + "C218 Turbo ISA series", + "C320 Turbo PCI series", + "C320 Turbo ISA series", + "CP-204J series", +}; + +#ifdef CONFIG_PCI +static struct pci_device_id moxa_pcibrds[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_C218), + .driver_data = MOXA_BOARD_C218_PCI }, + { PCI_DEVICE(PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_C320), + .driver_data = MOXA_BOARD_C320_PCI }, + { PCI_DEVICE(PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP204J), + .driver_data = MOXA_BOARD_CP204J }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, moxa_pcibrds); +#endif /* CONFIG_PCI */ + +struct moxa_port; + +static struct moxa_board_conf { + int boardType; + int numPorts; + int busType; + + unsigned int ready; + + struct moxa_port *ports; + + void __iomem *basemem; + void __iomem *intNdx; + void __iomem *intPend; + void __iomem *intTable; +} moxa_boards[MAX_BOARDS]; + +struct mxser_mstatus { + tcflag_t cflag; + int cts; + int dsr; + int ri; + int dcd; +}; + +struct moxaq_str { + int inq; + int outq; +}; + +struct moxa_port { + struct tty_port port; + struct moxa_board_conf *board; + void __iomem *tableAddr; + + int type; + int cflag; + unsigned long statusflags; + + u8 DCDState; /* Protected by the port lock */ + u8 lineCtrl; + u8 lowChkFlag; +}; + +struct mon_str { + int tick; + int rxcnt[MAX_PORTS]; + int txcnt[MAX_PORTS]; +}; + +/* statusflags */ +#define TXSTOPPED 1 +#define LOWWAIT 2 +#define EMPTYWAIT 3 + +#define SERIAL_DO_RESTART + +#define WAKEUP_CHARS 256 + +static int ttymajor = MOXAMAJOR; +static struct mon_str moxaLog; +static unsigned int moxaFuncTout = HZ / 2; +static unsigned int moxaLowWaterChk; +static DEFINE_MUTEX(moxa_openlock); +static DEFINE_SPINLOCK(moxa_lock); + +static unsigned long baseaddr[MAX_BOARDS]; +static unsigned int type[MAX_BOARDS]; +static unsigned int numports[MAX_BOARDS]; + +MODULE_AUTHOR("William Chen"); +MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("c218tunx.cod"); +MODULE_FIRMWARE("cp204unx.cod"); +MODULE_FIRMWARE("c320tunx.cod"); + +module_param_array(type, uint, NULL, 0); +MODULE_PARM_DESC(type, "card type: C218=2, C320=4"); +module_param_array(baseaddr, ulong, NULL, 0); +MODULE_PARM_DESC(baseaddr, "base address"); +module_param_array(numports, uint, NULL, 0); +MODULE_PARM_DESC(numports, "numports (ignored for C218)"); + +module_param(ttymajor, int, 0); + +/* + * static functions: + */ +static int moxa_open(struct tty_struct *, struct file *); +static void moxa_close(struct tty_struct *, struct file *); +static int moxa_write(struct tty_struct *, const unsigned char *, int); +static int moxa_write_room(struct tty_struct *); +static void moxa_flush_buffer(struct tty_struct *); +static int moxa_chars_in_buffer(struct tty_struct *); +static void moxa_set_termios(struct tty_struct *, struct ktermios *); +static void moxa_stop(struct tty_struct *); +static void moxa_start(struct tty_struct *); +static void moxa_hangup(struct tty_struct *); +static int moxa_tiocmget(struct tty_struct *tty); +static int moxa_tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear); +static void moxa_poll(unsigned long); +static void moxa_set_tty_param(struct tty_struct *, struct ktermios *); +static void moxa_shutdown(struct tty_port *); +static int moxa_carrier_raised(struct tty_port *); +static void moxa_dtr_rts(struct tty_port *, int); +/* + * moxa board interface functions: + */ +static void MoxaPortEnable(struct moxa_port *); +static void MoxaPortDisable(struct moxa_port *); +static int MoxaPortSetTermio(struct moxa_port *, struct ktermios *, speed_t); +static int MoxaPortGetLineOut(struct moxa_port *, int *, int *); +static void MoxaPortLineCtrl(struct moxa_port *, int, int); +static void MoxaPortFlowCtrl(struct moxa_port *, int, int, int, int, int); +static int MoxaPortLineStatus(struct moxa_port *); +static void MoxaPortFlushData(struct moxa_port *, int); +static int MoxaPortWriteData(struct tty_struct *, const unsigned char *, int); +static int MoxaPortReadData(struct moxa_port *); +static int MoxaPortTxQueue(struct moxa_port *); +static int MoxaPortRxQueue(struct moxa_port *); +static int MoxaPortTxFree(struct moxa_port *); +static void MoxaPortTxDisable(struct moxa_port *); +static void MoxaPortTxEnable(struct moxa_port *); +static int moxa_get_serial_info(struct moxa_port *, struct serial_struct __user *); +static int moxa_set_serial_info(struct moxa_port *, struct serial_struct __user *); +static void MoxaSetFifo(struct moxa_port *port, int enable); + +/* + * I/O functions + */ + +static DEFINE_SPINLOCK(moxafunc_lock); + +static void moxa_wait_finish(void __iomem *ofsAddr) +{ + unsigned long end = jiffies + moxaFuncTout; + + while (readw(ofsAddr + FuncCode) != 0) + if (time_after(jiffies, end)) + return; + if (readw(ofsAddr + FuncCode) != 0 && printk_ratelimit()) + printk(KERN_WARNING "moxa function expired\n"); +} + +static void moxafunc(void __iomem *ofsAddr, u16 cmd, u16 arg) +{ + unsigned long flags; + spin_lock_irqsave(&moxafunc_lock, flags); + writew(arg, ofsAddr + FuncArg); + writew(cmd, ofsAddr + FuncCode); + moxa_wait_finish(ofsAddr); + spin_unlock_irqrestore(&moxafunc_lock, flags); +} + +static int moxafuncret(void __iomem *ofsAddr, u16 cmd, u16 arg) +{ + unsigned long flags; + u16 ret; + spin_lock_irqsave(&moxafunc_lock, flags); + writew(arg, ofsAddr + FuncArg); + writew(cmd, ofsAddr + FuncCode); + moxa_wait_finish(ofsAddr); + ret = readw(ofsAddr + FuncArg); + spin_unlock_irqrestore(&moxafunc_lock, flags); + return ret; +} + +static void moxa_low_water_check(void __iomem *ofsAddr) +{ + u16 rptr, wptr, mask, len; + + if (readb(ofsAddr + FlagStat) & Xoff_state) { + rptr = readw(ofsAddr + RXrptr); + wptr = readw(ofsAddr + RXwptr); + mask = readw(ofsAddr + RX_mask); + len = (wptr - rptr) & mask; + if (len <= Low_water) + moxafunc(ofsAddr, FC_SendXon, 0); + } +} + +/* + * TTY operations + */ + +static int moxa_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + struct moxa_port *ch = tty->driver_data; + void __user *argp = (void __user *)arg; + int status, ret = 0; + + if (tty->index == MAX_PORTS) { + if (cmd != MOXA_GETDATACOUNT && cmd != MOXA_GET_IOQUEUE && + cmd != MOXA_GETMSTATUS) + return -EINVAL; + } else if (!ch) + return -ENODEV; + + switch (cmd) { + case MOXA_GETDATACOUNT: + moxaLog.tick = jiffies; + if (copy_to_user(argp, &moxaLog, sizeof(moxaLog))) + ret = -EFAULT; + break; + case MOXA_FLUSH_QUEUE: + MoxaPortFlushData(ch, arg); + break; + case MOXA_GET_IOQUEUE: { + struct moxaq_str __user *argm = argp; + struct moxaq_str tmp; + struct moxa_port *p; + unsigned int i, j; + + for (i = 0; i < MAX_BOARDS; i++) { + p = moxa_boards[i].ports; + for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) { + memset(&tmp, 0, sizeof(tmp)); + spin_lock_bh(&moxa_lock); + if (moxa_boards[i].ready) { + tmp.inq = MoxaPortRxQueue(p); + tmp.outq = MoxaPortTxQueue(p); + } + spin_unlock_bh(&moxa_lock); + if (copy_to_user(argm, &tmp, sizeof(tmp))) + return -EFAULT; + } + } + break; + } case MOXA_GET_OQUEUE: + status = MoxaPortTxQueue(ch); + ret = put_user(status, (unsigned long __user *)argp); + break; + case MOXA_GET_IQUEUE: + status = MoxaPortRxQueue(ch); + ret = put_user(status, (unsigned long __user *)argp); + break; + case MOXA_GETMSTATUS: { + struct mxser_mstatus __user *argm = argp; + struct mxser_mstatus tmp; + struct moxa_port *p; + unsigned int i, j; + + for (i = 0; i < MAX_BOARDS; i++) { + p = moxa_boards[i].ports; + for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) { + struct tty_struct *ttyp; + memset(&tmp, 0, sizeof(tmp)); + spin_lock_bh(&moxa_lock); + if (!moxa_boards[i].ready) { + spin_unlock_bh(&moxa_lock); + goto copy; + } + + status = MoxaPortLineStatus(p); + spin_unlock_bh(&moxa_lock); + + if (status & 1) + tmp.cts = 1; + if (status & 2) + tmp.dsr = 1; + if (status & 4) + tmp.dcd = 1; + + ttyp = tty_port_tty_get(&p->port); + if (!ttyp || !ttyp->termios) + tmp.cflag = p->cflag; + else + tmp.cflag = ttyp->termios->c_cflag; + tty_kref_put(tty); +copy: + if (copy_to_user(argm, &tmp, sizeof(tmp))) + return -EFAULT; + } + } + break; + } + case TIOCGSERIAL: + mutex_lock(&ch->port.mutex); + ret = moxa_get_serial_info(ch, argp); + mutex_unlock(&ch->port.mutex); + break; + case TIOCSSERIAL: + mutex_lock(&ch->port.mutex); + ret = moxa_set_serial_info(ch, argp); + mutex_unlock(&ch->port.mutex); + break; + default: + ret = -ENOIOCTLCMD; + } + return ret; +} + +static int moxa_break_ctl(struct tty_struct *tty, int state) +{ + struct moxa_port *port = tty->driver_data; + + moxafunc(port->tableAddr, state ? FC_SendBreak : FC_StopBreak, + Magic_code); + return 0; +} + +static const struct tty_operations moxa_ops = { + .open = moxa_open, + .close = moxa_close, + .write = moxa_write, + .write_room = moxa_write_room, + .flush_buffer = moxa_flush_buffer, + .chars_in_buffer = moxa_chars_in_buffer, + .ioctl = moxa_ioctl, + .set_termios = moxa_set_termios, + .stop = moxa_stop, + .start = moxa_start, + .hangup = moxa_hangup, + .break_ctl = moxa_break_ctl, + .tiocmget = moxa_tiocmget, + .tiocmset = moxa_tiocmset, +}; + +static const struct tty_port_operations moxa_port_ops = { + .carrier_raised = moxa_carrier_raised, + .dtr_rts = moxa_dtr_rts, + .shutdown = moxa_shutdown, +}; + +static struct tty_driver *moxaDriver; +static DEFINE_TIMER(moxaTimer, moxa_poll, 0, 0); + +/* + * HW init + */ + +static int moxa_check_fw_model(struct moxa_board_conf *brd, u8 model) +{ + switch (brd->boardType) { + case MOXA_BOARD_C218_ISA: + case MOXA_BOARD_C218_PCI: + if (model != 1) + goto err; + break; + case MOXA_BOARD_CP204J: + if (model != 3) + goto err; + break; + default: + if (model != 2) + goto err; + break; + } + return 0; +err: + return -EINVAL; +} + +static int moxa_check_fw(const void *ptr) +{ + const __le16 *lptr = ptr; + + if (*lptr != cpu_to_le16(0x7980)) + return -EINVAL; + + return 0; +} + +static int moxa_load_bios(struct moxa_board_conf *brd, const u8 *buf, + size_t len) +{ + void __iomem *baseAddr = brd->basemem; + u16 tmp; + + writeb(HW_reset, baseAddr + Control_reg); /* reset */ + msleep(10); + memset_io(baseAddr, 0, 4096); + memcpy_toio(baseAddr, buf, len); /* download BIOS */ + writeb(0, baseAddr + Control_reg); /* restart */ + + msleep(2000); + + switch (brd->boardType) { + case MOXA_BOARD_C218_ISA: + case MOXA_BOARD_C218_PCI: + tmp = readw(baseAddr + C218_key); + if (tmp != C218_KeyCode) + goto err; + break; + case MOXA_BOARD_CP204J: + tmp = readw(baseAddr + C218_key); + if (tmp != CP204J_KeyCode) + goto err; + break; + default: + tmp = readw(baseAddr + C320_key); + if (tmp != C320_KeyCode) + goto err; + tmp = readw(baseAddr + C320_status); + if (tmp != STS_init) { + printk(KERN_ERR "MOXA: bios upload failed -- CPU/Basic " + "module not found\n"); + return -EIO; + } + break; + } + + return 0; +err: + printk(KERN_ERR "MOXA: bios upload failed -- board not found\n"); + return -EIO; +} + +static int moxa_load_320b(struct moxa_board_conf *brd, const u8 *ptr, + size_t len) +{ + void __iomem *baseAddr = brd->basemem; + + if (len < 7168) { + printk(KERN_ERR "MOXA: invalid 320 bios -- too short\n"); + return -EINVAL; + } + + writew(len - 7168 - 2, baseAddr + C320bapi_len); + writeb(1, baseAddr + Control_reg); /* Select Page 1 */ + memcpy_toio(baseAddr + DynPage_addr, ptr, 7168); + writeb(2, baseAddr + Control_reg); /* Select Page 2 */ + memcpy_toio(baseAddr + DynPage_addr, ptr + 7168, len - 7168); + + return 0; +} + +static int moxa_real_load_code(struct moxa_board_conf *brd, const void *ptr, + size_t len) +{ + void __iomem *baseAddr = brd->basemem; + const __le16 *uptr = ptr; + size_t wlen, len2, j; + unsigned long key, loadbuf, loadlen, checksum, checksum_ok; + unsigned int i, retry; + u16 usum, keycode; + + keycode = (brd->boardType == MOXA_BOARD_CP204J) ? CP204J_KeyCode : + C218_KeyCode; + + switch (brd->boardType) { + case MOXA_BOARD_CP204J: + case MOXA_BOARD_C218_ISA: + case MOXA_BOARD_C218_PCI: + key = C218_key; + loadbuf = C218_LoadBuf; + loadlen = C218DLoad_len; + checksum = C218check_sum; + checksum_ok = C218chksum_ok; + break; + default: + key = C320_key; + keycode = C320_KeyCode; + loadbuf = C320_LoadBuf; + loadlen = C320DLoad_len; + checksum = C320check_sum; + checksum_ok = C320chksum_ok; + break; + } + + usum = 0; + wlen = len >> 1; + for (i = 0; i < wlen; i++) + usum += le16_to_cpu(uptr[i]); + retry = 0; + do { + wlen = len >> 1; + j = 0; + while (wlen) { + len2 = (wlen > 2048) ? 2048 : wlen; + wlen -= len2; + memcpy_toio(baseAddr + loadbuf, ptr + j, len2 << 1); + j += len2 << 1; + + writew(len2, baseAddr + loadlen); + writew(0, baseAddr + key); + for (i = 0; i < 100; i++) { + if (readw(baseAddr + key) == keycode) + break; + msleep(10); + } + if (readw(baseAddr + key) != keycode) + return -EIO; + } + writew(0, baseAddr + loadlen); + writew(usum, baseAddr + checksum); + writew(0, baseAddr + key); + for (i = 0; i < 100; i++) { + if (readw(baseAddr + key) == keycode) + break; + msleep(10); + } + retry++; + } while ((readb(baseAddr + checksum_ok) != 1) && (retry < 3)); + if (readb(baseAddr + checksum_ok) != 1) + return -EIO; + + writew(0, baseAddr + key); + for (i = 0; i < 600; i++) { + if (readw(baseAddr + Magic_no) == Magic_code) + break; + msleep(10); + } + if (readw(baseAddr + Magic_no) != Magic_code) + return -EIO; + + if (MOXA_IS_320(brd)) { + if (brd->busType == MOXA_BUS_TYPE_PCI) { /* ASIC board */ + writew(0x3800, baseAddr + TMS320_PORT1); + writew(0x3900, baseAddr + TMS320_PORT2); + writew(28499, baseAddr + TMS320_CLOCK); + } else { + writew(0x3200, baseAddr + TMS320_PORT1); + writew(0x3400, baseAddr + TMS320_PORT2); + writew(19999, baseAddr + TMS320_CLOCK); + } + } + writew(1, baseAddr + Disable_IRQ); + writew(0, baseAddr + Magic_no); + for (i = 0; i < 500; i++) { + if (readw(baseAddr + Magic_no) == Magic_code) + break; + msleep(10); + } + if (readw(baseAddr + Magic_no) != Magic_code) + return -EIO; + + if (MOXA_IS_320(brd)) { + j = readw(baseAddr + Module_cnt); + if (j <= 0) + return -EIO; + brd->numPorts = j * 8; + writew(j, baseAddr + Module_no); + writew(0, baseAddr + Magic_no); + for (i = 0; i < 600; i++) { + if (readw(baseAddr + Magic_no) == Magic_code) + break; + msleep(10); + } + if (readw(baseAddr + Magic_no) != Magic_code) + return -EIO; + } + brd->intNdx = baseAddr + IRQindex; + brd->intPend = baseAddr + IRQpending; + brd->intTable = baseAddr + IRQtable; + + return 0; +} + +static int moxa_load_code(struct moxa_board_conf *brd, const void *ptr, + size_t len) +{ + void __iomem *ofsAddr, *baseAddr = brd->basemem; + struct moxa_port *port; + int retval, i; + + if (len % 2) { + printk(KERN_ERR "MOXA: bios length is not even\n"); + return -EINVAL; + } + + retval = moxa_real_load_code(brd, ptr, len); /* may change numPorts */ + if (retval) + return retval; + + switch (brd->boardType) { + case MOXA_BOARD_C218_ISA: + case MOXA_BOARD_C218_PCI: + case MOXA_BOARD_CP204J: + port = brd->ports; + for (i = 0; i < brd->numPorts; i++, port++) { + port->board = brd; + port->DCDState = 0; + port->tableAddr = baseAddr + Extern_table + + Extern_size * i; + ofsAddr = port->tableAddr; + writew(C218rx_mask, ofsAddr + RX_mask); + writew(C218tx_mask, ofsAddr + TX_mask); + writew(C218rx_spage + i * C218buf_pageno, ofsAddr + Page_rxb); + writew(readw(ofsAddr + Page_rxb) + C218rx_pageno, ofsAddr + EndPage_rxb); + + writew(C218tx_spage + i * C218buf_pageno, ofsAddr + Page_txb); + writew(readw(ofsAddr + Page_txb) + C218tx_pageno, ofsAddr + EndPage_txb); + + } + break; + default: + port = brd->ports; + for (i = 0; i < brd->numPorts; i++, port++) { + port->board = brd; + port->DCDState = 0; + port->tableAddr = baseAddr + Extern_table + + Extern_size * i; + ofsAddr = port->tableAddr; + switch (brd->numPorts) { + case 8: + writew(C320p8rx_mask, ofsAddr + RX_mask); + writew(C320p8tx_mask, ofsAddr + TX_mask); + writew(C320p8rx_spage + i * C320p8buf_pgno, ofsAddr + Page_rxb); + writew(readw(ofsAddr + Page_rxb) + C320p8rx_pgno, ofsAddr + EndPage_rxb); + writew(C320p8tx_spage + i * C320p8buf_pgno, ofsAddr + Page_txb); + writew(readw(ofsAddr + Page_txb) + C320p8tx_pgno, ofsAddr + EndPage_txb); + + break; + case 16: + writew(C320p16rx_mask, ofsAddr + RX_mask); + writew(C320p16tx_mask, ofsAddr + TX_mask); + writew(C320p16rx_spage + i * C320p16buf_pgno, ofsAddr + Page_rxb); + writew(readw(ofsAddr + Page_rxb) + C320p16rx_pgno, ofsAddr + EndPage_rxb); + writew(C320p16tx_spage + i * C320p16buf_pgno, ofsAddr + Page_txb); + writew(readw(ofsAddr + Page_txb) + C320p16tx_pgno, ofsAddr + EndPage_txb); + break; + + case 24: + writew(C320p24rx_mask, ofsAddr + RX_mask); + writew(C320p24tx_mask, ofsAddr + TX_mask); + writew(C320p24rx_spage + i * C320p24buf_pgno, ofsAddr + Page_rxb); + writew(readw(ofsAddr + Page_rxb) + C320p24rx_pgno, ofsAddr + EndPage_rxb); + writew(C320p24tx_spage + i * C320p24buf_pgno, ofsAddr + Page_txb); + writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb); + break; + case 32: + writew(C320p32rx_mask, ofsAddr + RX_mask); + writew(C320p32tx_mask, ofsAddr + TX_mask); + writew(C320p32tx_ofs, ofsAddr + Ofs_txb); + writew(C320p32rx_spage + i * C320p32buf_pgno, ofsAddr + Page_rxb); + writew(readb(ofsAddr + Page_rxb), ofsAddr + EndPage_rxb); + writew(C320p32tx_spage + i * C320p32buf_pgno, ofsAddr + Page_txb); + writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb); + break; + } + } + break; + } + return 0; +} + +static int moxa_load_fw(struct moxa_board_conf *brd, const struct firmware *fw) +{ + const void *ptr = fw->data; + char rsn[64]; + u16 lens[5]; + size_t len; + unsigned int a, lenp, lencnt; + int ret = -EINVAL; + struct { + __le32 magic; /* 0x34303430 */ + u8 reserved1[2]; + u8 type; /* UNIX = 3 */ + u8 model; /* C218T=1, C320T=2, CP204=3 */ + u8 reserved2[8]; + __le16 len[5]; + } const *hdr = ptr; + + BUILD_BUG_ON(ARRAY_SIZE(hdr->len) != ARRAY_SIZE(lens)); + + if (fw->size < MOXA_FW_HDRLEN) { + strcpy(rsn, "too short (even header won't fit)"); + goto err; + } + if (hdr->magic != cpu_to_le32(0x30343034)) { + sprintf(rsn, "bad magic: %.8x", le32_to_cpu(hdr->magic)); + goto err; + } + if (hdr->type != 3) { + sprintf(rsn, "not for linux, type is %u", hdr->type); + goto err; + } + if (moxa_check_fw_model(brd, hdr->model)) { + sprintf(rsn, "not for this card, model is %u", hdr->model); + goto err; + } + + len = MOXA_FW_HDRLEN; + lencnt = hdr->model == 2 ? 5 : 3; + for (a = 0; a < ARRAY_SIZE(lens); a++) { + lens[a] = le16_to_cpu(hdr->len[a]); + if (lens[a] && len + lens[a] <= fw->size && + moxa_check_fw(&fw->data[len])) + printk(KERN_WARNING "MOXA firmware: unexpected input " + "at offset %u, but going on\n", (u32)len); + if (!lens[a] && a < lencnt) { + sprintf(rsn, "too few entries in fw file"); + goto err; + } + len += lens[a]; + } + + if (len != fw->size) { + sprintf(rsn, "bad length: %u (should be %u)", (u32)fw->size, + (u32)len); + goto err; + } + + ptr += MOXA_FW_HDRLEN; + lenp = 0; /* bios */ + + strcpy(rsn, "read above"); + + ret = moxa_load_bios(brd, ptr, lens[lenp]); + if (ret) + goto err; + + /* we skip the tty section (lens[1]), since we don't need it */ + ptr += lens[lenp] + lens[lenp + 1]; + lenp += 2; /* comm */ + + if (hdr->model == 2) { + ret = moxa_load_320b(brd, ptr, lens[lenp]); + if (ret) + goto err; + /* skip another tty */ + ptr += lens[lenp] + lens[lenp + 1]; + lenp += 2; + } + + ret = moxa_load_code(brd, ptr, lens[lenp]); + if (ret) + goto err; + + return 0; +err: + printk(KERN_ERR "firmware failed to load, reason: %s\n", rsn); + return ret; +} + +static int moxa_init_board(struct moxa_board_conf *brd, struct device *dev) +{ + const struct firmware *fw; + const char *file; + struct moxa_port *p; + unsigned int i; + int ret; + + brd->ports = kcalloc(MAX_PORTS_PER_BOARD, sizeof(*brd->ports), + GFP_KERNEL); + if (brd->ports == NULL) { + printk(KERN_ERR "cannot allocate memory for ports\n"); + ret = -ENOMEM; + goto err; + } + + for (i = 0, p = brd->ports; i < MAX_PORTS_PER_BOARD; i++, p++) { + tty_port_init(&p->port); + p->port.ops = &moxa_port_ops; + p->type = PORT_16550A; + p->cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL; + } + + switch (brd->boardType) { + case MOXA_BOARD_C218_ISA: + case MOXA_BOARD_C218_PCI: + file = "c218tunx.cod"; + break; + case MOXA_BOARD_CP204J: + file = "cp204unx.cod"; + break; + default: + file = "c320tunx.cod"; + break; + } + + ret = request_firmware(&fw, file, dev); + if (ret) { + printk(KERN_ERR "MOXA: request_firmware failed. Make sure " + "you've placed '%s' file into your firmware " + "loader directory (e.g. /lib/firmware)\n", + file); + goto err_free; + } + + ret = moxa_load_fw(brd, fw); + + release_firmware(fw); + + if (ret) + goto err_free; + + spin_lock_bh(&moxa_lock); + brd->ready = 1; + if (!timer_pending(&moxaTimer)) + mod_timer(&moxaTimer, jiffies + HZ / 50); + spin_unlock_bh(&moxa_lock); + + return 0; +err_free: + kfree(brd->ports); +err: + return ret; +} + +static void moxa_board_deinit(struct moxa_board_conf *brd) +{ + unsigned int a, opened; + + mutex_lock(&moxa_openlock); + spin_lock_bh(&moxa_lock); + brd->ready = 0; + spin_unlock_bh(&moxa_lock); + + /* pci hot-un-plug support */ + for (a = 0; a < brd->numPorts; a++) + if (brd->ports[a].port.flags & ASYNC_INITIALIZED) { + struct tty_struct *tty = tty_port_tty_get( + &brd->ports[a].port); + if (tty) { + tty_hangup(tty); + tty_kref_put(tty); + } + } + while (1) { + opened = 0; + for (a = 0; a < brd->numPorts; a++) + if (brd->ports[a].port.flags & ASYNC_INITIALIZED) + opened++; + mutex_unlock(&moxa_openlock); + if (!opened) + break; + msleep(50); + mutex_lock(&moxa_openlock); + } + + iounmap(brd->basemem); + brd->basemem = NULL; + kfree(brd->ports); +} + +#ifdef CONFIG_PCI +static int __devinit moxa_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct moxa_board_conf *board; + unsigned int i; + int board_type = ent->driver_data; + int retval; + + retval = pci_enable_device(pdev); + if (retval) { + dev_err(&pdev->dev, "can't enable pci device\n"); + goto err; + } + + for (i = 0; i < MAX_BOARDS; i++) + if (moxa_boards[i].basemem == NULL) + break; + + retval = -ENODEV; + if (i >= MAX_BOARDS) { + dev_warn(&pdev->dev, "more than %u MOXA Intellio family boards " + "found. Board is ignored.\n", MAX_BOARDS); + goto err; + } + + board = &moxa_boards[i]; + + retval = pci_request_region(pdev, 2, "moxa-base"); + if (retval) { + dev_err(&pdev->dev, "can't request pci region 2\n"); + goto err; + } + + board->basemem = ioremap_nocache(pci_resource_start(pdev, 2), 0x4000); + if (board->basemem == NULL) { + dev_err(&pdev->dev, "can't remap io space 2\n"); + goto err_reg; + } + + board->boardType = board_type; + switch (board_type) { + case MOXA_BOARD_C218_ISA: + case MOXA_BOARD_C218_PCI: + board->numPorts = 8; + break; + + case MOXA_BOARD_CP204J: + board->numPorts = 4; + break; + default: + board->numPorts = 0; + break; + } + board->busType = MOXA_BUS_TYPE_PCI; + + retval = moxa_init_board(board, &pdev->dev); + if (retval) + goto err_base; + + pci_set_drvdata(pdev, board); + + dev_info(&pdev->dev, "board '%s' ready (%u ports, firmware loaded)\n", + moxa_brdname[board_type - 1], board->numPorts); + + return 0; +err_base: + iounmap(board->basemem); + board->basemem = NULL; +err_reg: + pci_release_region(pdev, 2); +err: + return retval; +} + +static void __devexit moxa_pci_remove(struct pci_dev *pdev) +{ + struct moxa_board_conf *brd = pci_get_drvdata(pdev); + + moxa_board_deinit(brd); + + pci_release_region(pdev, 2); +} + +static struct pci_driver moxa_pci_driver = { + .name = "moxa", + .id_table = moxa_pcibrds, + .probe = moxa_pci_probe, + .remove = __devexit_p(moxa_pci_remove) +}; +#endif /* CONFIG_PCI */ + +static int __init moxa_init(void) +{ + unsigned int isabrds = 0; + int retval = 0; + struct moxa_board_conf *brd = moxa_boards; + unsigned int i; + + printk(KERN_INFO "MOXA Intellio family driver version %s\n", + MOXA_VERSION); + moxaDriver = alloc_tty_driver(MAX_PORTS + 1); + if (!moxaDriver) + return -ENOMEM; + + moxaDriver->owner = THIS_MODULE; + moxaDriver->name = "ttyMX"; + moxaDriver->major = ttymajor; + moxaDriver->minor_start = 0; + moxaDriver->type = TTY_DRIVER_TYPE_SERIAL; + moxaDriver->subtype = SERIAL_TYPE_NORMAL; + moxaDriver->init_termios = tty_std_termios; + moxaDriver->init_termios.c_cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL; + moxaDriver->init_termios.c_ispeed = 9600; + moxaDriver->init_termios.c_ospeed = 9600; + moxaDriver->flags = TTY_DRIVER_REAL_RAW; + tty_set_operations(moxaDriver, &moxa_ops); + + if (tty_register_driver(moxaDriver)) { + printk(KERN_ERR "can't register MOXA Smartio tty driver!\n"); + put_tty_driver(moxaDriver); + return -1; + } + + /* Find the boards defined from module args. */ + + for (i = 0; i < MAX_BOARDS; i++) { + if (!baseaddr[i]) + break; + if (type[i] == MOXA_BOARD_C218_ISA || + type[i] == MOXA_BOARD_C320_ISA) { + pr_debug("Moxa board %2d: %s board(baseAddr=%lx)\n", + isabrds + 1, moxa_brdname[type[i] - 1], + baseaddr[i]); + brd->boardType = type[i]; + brd->numPorts = type[i] == MOXA_BOARD_C218_ISA ? 8 : + numports[i]; + brd->busType = MOXA_BUS_TYPE_ISA; + brd->basemem = ioremap_nocache(baseaddr[i], 0x4000); + if (!brd->basemem) { + printk(KERN_ERR "MOXA: can't remap %lx\n", + baseaddr[i]); + continue; + } + if (moxa_init_board(brd, NULL)) { + iounmap(brd->basemem); + brd->basemem = NULL; + continue; + } + + printk(KERN_INFO "MOXA isa board found at 0x%.8lu and " + "ready (%u ports, firmware loaded)\n", + baseaddr[i], brd->numPorts); + + brd++; + isabrds++; + } + } + +#ifdef CONFIG_PCI + retval = pci_register_driver(&moxa_pci_driver); + if (retval) { + printk(KERN_ERR "Can't register MOXA pci driver!\n"); + if (isabrds) + retval = 0; + } +#endif + + return retval; +} + +static void __exit moxa_exit(void) +{ + unsigned int i; + +#ifdef CONFIG_PCI + pci_unregister_driver(&moxa_pci_driver); +#endif + + for (i = 0; i < MAX_BOARDS; i++) /* ISA boards */ + if (moxa_boards[i].ready) + moxa_board_deinit(&moxa_boards[i]); + + del_timer_sync(&moxaTimer); + + if (tty_unregister_driver(moxaDriver)) + printk(KERN_ERR "Couldn't unregister MOXA Intellio family " + "serial driver\n"); + put_tty_driver(moxaDriver); +} + +module_init(moxa_init); +module_exit(moxa_exit); + +static void moxa_shutdown(struct tty_port *port) +{ + struct moxa_port *ch = container_of(port, struct moxa_port, port); + MoxaPortDisable(ch); + MoxaPortFlushData(ch, 2); + clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags); +} + +static int moxa_carrier_raised(struct tty_port *port) +{ + struct moxa_port *ch = container_of(port, struct moxa_port, port); + int dcd; + + spin_lock_irq(&port->lock); + dcd = ch->DCDState; + spin_unlock_irq(&port->lock); + return dcd; +} + +static void moxa_dtr_rts(struct tty_port *port, int onoff) +{ + struct moxa_port *ch = container_of(port, struct moxa_port, port); + MoxaPortLineCtrl(ch, onoff, onoff); +} + + +static int moxa_open(struct tty_struct *tty, struct file *filp) +{ + struct moxa_board_conf *brd; + struct moxa_port *ch; + int port; + int retval; + + port = tty->index; + if (port == MAX_PORTS) { + return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; + } + if (mutex_lock_interruptible(&moxa_openlock)) + return -ERESTARTSYS; + brd = &moxa_boards[port / MAX_PORTS_PER_BOARD]; + if (!brd->ready) { + mutex_unlock(&moxa_openlock); + return -ENODEV; + } + + if (port % MAX_PORTS_PER_BOARD >= brd->numPorts) { + mutex_unlock(&moxa_openlock); + return -ENODEV; + } + + ch = &brd->ports[port % MAX_PORTS_PER_BOARD]; + ch->port.count++; + tty->driver_data = ch; + tty_port_tty_set(&ch->port, tty); + mutex_lock(&ch->port.mutex); + if (!(ch->port.flags & ASYNC_INITIALIZED)) { + ch->statusflags = 0; + moxa_set_tty_param(tty, tty->termios); + MoxaPortLineCtrl(ch, 1, 1); + MoxaPortEnable(ch); + MoxaSetFifo(ch, ch->type == PORT_16550A); + ch->port.flags |= ASYNC_INITIALIZED; + } + mutex_unlock(&ch->port.mutex); + mutex_unlock(&moxa_openlock); + + retval = tty_port_block_til_ready(&ch->port, tty, filp); + if (retval == 0) + set_bit(ASYNCB_NORMAL_ACTIVE, &ch->port.flags); + return retval; +} + +static void moxa_close(struct tty_struct *tty, struct file *filp) +{ + struct moxa_port *ch = tty->driver_data; + ch->cflag = tty->termios->c_cflag; + tty_port_close(&ch->port, tty, filp); +} + +static int moxa_write(struct tty_struct *tty, + const unsigned char *buf, int count) +{ + struct moxa_port *ch = tty->driver_data; + int len; + + if (ch == NULL) + return 0; + + spin_lock_bh(&moxa_lock); + len = MoxaPortWriteData(tty, buf, count); + spin_unlock_bh(&moxa_lock); + + set_bit(LOWWAIT, &ch->statusflags); + return len; +} + +static int moxa_write_room(struct tty_struct *tty) +{ + struct moxa_port *ch; + + if (tty->stopped) + return 0; + ch = tty->driver_data; + if (ch == NULL) + return 0; + return MoxaPortTxFree(ch); +} + +static void moxa_flush_buffer(struct tty_struct *tty) +{ + struct moxa_port *ch = tty->driver_data; + + if (ch == NULL) + return; + MoxaPortFlushData(ch, 1); + tty_wakeup(tty); +} + +static int moxa_chars_in_buffer(struct tty_struct *tty) +{ + struct moxa_port *ch = tty->driver_data; + int chars; + + chars = MoxaPortTxQueue(ch); + if (chars) + /* + * Make it possible to wakeup anything waiting for output + * in tty_ioctl.c, etc. + */ + set_bit(EMPTYWAIT, &ch->statusflags); + return chars; +} + +static int moxa_tiocmget(struct tty_struct *tty) +{ + struct moxa_port *ch = tty->driver_data; + int flag = 0, dtr, rts; + + MoxaPortGetLineOut(ch, &dtr, &rts); + if (dtr) + flag |= TIOCM_DTR; + if (rts) + flag |= TIOCM_RTS; + dtr = MoxaPortLineStatus(ch); + if (dtr & 1) + flag |= TIOCM_CTS; + if (dtr & 2) + flag |= TIOCM_DSR; + if (dtr & 4) + flag |= TIOCM_CD; + return flag; +} + +static int moxa_tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear) +{ + struct moxa_port *ch; + int port; + int dtr, rts; + + port = tty->index; + mutex_lock(&moxa_openlock); + ch = tty->driver_data; + if (!ch) { + mutex_unlock(&moxa_openlock); + return -EINVAL; + } + + MoxaPortGetLineOut(ch, &dtr, &rts); + if (set & TIOCM_RTS) + rts = 1; + if (set & TIOCM_DTR) + dtr = 1; + if (clear & TIOCM_RTS) + rts = 0; + if (clear & TIOCM_DTR) + dtr = 0; + MoxaPortLineCtrl(ch, dtr, rts); + mutex_unlock(&moxa_openlock); + return 0; +} + +static void moxa_set_termios(struct tty_struct *tty, + struct ktermios *old_termios) +{ + struct moxa_port *ch = tty->driver_data; + + if (ch == NULL) + return; + moxa_set_tty_param(tty, old_termios); + if (!(old_termios->c_cflag & CLOCAL) && C_CLOCAL(tty)) + wake_up_interruptible(&ch->port.open_wait); +} + +static void moxa_stop(struct tty_struct *tty) +{ + struct moxa_port *ch = tty->driver_data; + + if (ch == NULL) + return; + MoxaPortTxDisable(ch); + set_bit(TXSTOPPED, &ch->statusflags); +} + + +static void moxa_start(struct tty_struct *tty) +{ + struct moxa_port *ch = tty->driver_data; + + if (ch == NULL) + return; + + if (!(ch->statusflags & TXSTOPPED)) + return; + + MoxaPortTxEnable(ch); + clear_bit(TXSTOPPED, &ch->statusflags); +} + +static void moxa_hangup(struct tty_struct *tty) +{ + struct moxa_port *ch = tty->driver_data; + tty_port_hangup(&ch->port); +} + +static void moxa_new_dcdstate(struct moxa_port *p, u8 dcd) +{ + struct tty_struct *tty; + unsigned long flags; + dcd = !!dcd; + + spin_lock_irqsave(&p->port.lock, flags); + if (dcd != p->DCDState) { + p->DCDState = dcd; + spin_unlock_irqrestore(&p->port.lock, flags); + tty = tty_port_tty_get(&p->port); + if (tty && C_CLOCAL(tty) && !dcd) + tty_hangup(tty); + tty_kref_put(tty); + } + else + spin_unlock_irqrestore(&p->port.lock, flags); +} + +static int moxa_poll_port(struct moxa_port *p, unsigned int handle, + u16 __iomem *ip) +{ + struct tty_struct *tty = tty_port_tty_get(&p->port); + void __iomem *ofsAddr; + unsigned int inited = p->port.flags & ASYNC_INITIALIZED; + u16 intr; + + if (tty) { + if (test_bit(EMPTYWAIT, &p->statusflags) && + MoxaPortTxQueue(p) == 0) { + clear_bit(EMPTYWAIT, &p->statusflags); + tty_wakeup(tty); + } + if (test_bit(LOWWAIT, &p->statusflags) && !tty->stopped && + MoxaPortTxQueue(p) <= WAKEUP_CHARS) { + clear_bit(LOWWAIT, &p->statusflags); + tty_wakeup(tty); + } + + if (inited && !test_bit(TTY_THROTTLED, &tty->flags) && + MoxaPortRxQueue(p) > 0) { /* RX */ + MoxaPortReadData(p); + tty_schedule_flip(tty); + } + } else { + clear_bit(EMPTYWAIT, &p->statusflags); + MoxaPortFlushData(p, 0); /* flush RX */ + } + + if (!handle) /* nothing else to do */ + goto put; + + intr = readw(ip); /* port irq status */ + if (intr == 0) + goto put; + + writew(0, ip); /* ACK port */ + ofsAddr = p->tableAddr; + if (intr & IntrTx) /* disable tx intr */ + writew(readw(ofsAddr + HostStat) & ~WakeupTx, + ofsAddr + HostStat); + + if (!inited) + goto put; + + if (tty && (intr & IntrBreak) && !I_IGNBRK(tty)) { /* BREAK */ + tty_insert_flip_char(tty, 0, TTY_BREAK); + tty_schedule_flip(tty); + } + + if (intr & IntrLine) + moxa_new_dcdstate(p, readb(ofsAddr + FlagStat) & DCD_state); +put: + tty_kref_put(tty); + + return 0; +} + +static void moxa_poll(unsigned long ignored) +{ + struct moxa_board_conf *brd; + u16 __iomem *ip; + unsigned int card, port, served = 0; + + spin_lock(&moxa_lock); + for (card = 0; card < MAX_BOARDS; card++) { + brd = &moxa_boards[card]; + if (!brd->ready) + continue; + + served++; + + ip = NULL; + if (readb(brd->intPend) == 0xff) + ip = brd->intTable + readb(brd->intNdx); + + for (port = 0; port < brd->numPorts; port++) + moxa_poll_port(&brd->ports[port], !!ip, ip + port); + + if (ip) + writeb(0, brd->intPend); /* ACK */ + + if (moxaLowWaterChk) { + struct moxa_port *p = brd->ports; + for (port = 0; port < brd->numPorts; port++, p++) + if (p->lowChkFlag) { + p->lowChkFlag = 0; + moxa_low_water_check(p->tableAddr); + } + } + } + moxaLowWaterChk = 0; + + if (served) + mod_timer(&moxaTimer, jiffies + HZ / 50); + spin_unlock(&moxa_lock); +} + +/******************************************************************************/ + +static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_termios) +{ + register struct ktermios *ts = tty->termios; + struct moxa_port *ch = tty->driver_data; + int rts, cts, txflow, rxflow, xany, baud; + + rts = cts = txflow = rxflow = xany = 0; + if (ts->c_cflag & CRTSCTS) + rts = cts = 1; + if (ts->c_iflag & IXON) + txflow = 1; + if (ts->c_iflag & IXOFF) + rxflow = 1; + if (ts->c_iflag & IXANY) + xany = 1; + + /* Clear the features we don't support */ + ts->c_cflag &= ~CMSPAR; + MoxaPortFlowCtrl(ch, rts, cts, txflow, rxflow, xany); + baud = MoxaPortSetTermio(ch, ts, tty_get_baud_rate(tty)); + if (baud == -1) + baud = tty_termios_baud_rate(old_termios); + /* Not put the baud rate into the termios data */ + tty_encode_baud_rate(tty, baud, baud); +} + +/***************************************************************************** + * Driver level functions: * + *****************************************************************************/ + +static void MoxaPortFlushData(struct moxa_port *port, int mode) +{ + void __iomem *ofsAddr; + if (mode < 0 || mode > 2) + return; + ofsAddr = port->tableAddr; + moxafunc(ofsAddr, FC_FlushQueue, mode); + if (mode != 1) { + port->lowChkFlag = 0; + moxa_low_water_check(ofsAddr); + } +} + +/* + * Moxa Port Number Description: + * + * MOXA serial driver supports up to 4 MOXA-C218/C320 boards. And, + * the port number using in MOXA driver functions will be 0 to 31 for + * first MOXA board, 32 to 63 for second, 64 to 95 for third and 96 + * to 127 for fourth. For example, if you setup three MOXA boards, + * first board is C218, second board is C320-16 and third board is + * C320-32. The port number of first board (C218 - 8 ports) is from + * 0 to 7. The port number of second board (C320 - 16 ports) is form + * 32 to 47. The port number of third board (C320 - 32 ports) is from + * 64 to 95. And those port numbers form 8 to 31, 48 to 63 and 96 to + * 127 will be invalid. + * + * + * Moxa Functions Description: + * + * Function 1: Driver initialization routine, this routine must be + * called when initialized driver. + * Syntax: + * void MoxaDriverInit(); + * + * + * Function 2: Moxa driver private IOCTL command processing. + * Syntax: + * int MoxaDriverIoctl(unsigned int cmd, unsigned long arg, int port); + * + * unsigned int cmd : IOCTL command + * unsigned long arg : IOCTL argument + * int port : port number (0 - 127) + * + * return: 0 (OK) + * -EINVAL + * -ENOIOCTLCMD + * + * + * Function 6: Enable this port to start Tx/Rx data. + * Syntax: + * void MoxaPortEnable(int port); + * int port : port number (0 - 127) + * + * + * Function 7: Disable this port + * Syntax: + * void MoxaPortDisable(int port); + * int port : port number (0 - 127) + * + * + * Function 10: Setting baud rate of this port. + * Syntax: + * speed_t MoxaPortSetBaud(int port, speed_t baud); + * int port : port number (0 - 127) + * long baud : baud rate (50 - 115200) + * + * return: 0 : this port is invalid or baud < 50 + * 50 - 115200 : the real baud rate set to the port, if + * the argument baud is large than maximun + * available baud rate, the real setting + * baud rate will be the maximun baud rate. + * + * + * Function 12: Configure the port. + * Syntax: + * int MoxaPortSetTermio(int port, struct ktermios *termio, speed_t baud); + * int port : port number (0 - 127) + * struct ktermios * termio : termio structure pointer + * speed_t baud : baud rate + * + * return: -1 : this port is invalid or termio == NULL + * 0 : setting O.K. + * + * + * Function 13: Get the DTR/RTS state of this port. + * Syntax: + * int MoxaPortGetLineOut(int port, int *dtrState, int *rtsState); + * int port : port number (0 - 127) + * int * dtrState : pointer to INT to receive the current DTR + * state. (if NULL, this function will not + * write to this address) + * int * rtsState : pointer to INT to receive the current RTS + * state. (if NULL, this function will not + * write to this address) + * + * return: -1 : this port is invalid + * 0 : O.K. + * + * + * Function 14: Setting the DTR/RTS output state of this port. + * Syntax: + * void MoxaPortLineCtrl(int port, int dtrState, int rtsState); + * int port : port number (0 - 127) + * int dtrState : DTR output state (0: off, 1: on) + * int rtsState : RTS output state (0: off, 1: on) + * + * + * Function 15: Setting the flow control of this port. + * Syntax: + * void MoxaPortFlowCtrl(int port, int rtsFlow, int ctsFlow, int rxFlow, + * int txFlow,int xany); + * int port : port number (0 - 127) + * int rtsFlow : H/W RTS flow control (0: no, 1: yes) + * int ctsFlow : H/W CTS flow control (0: no, 1: yes) + * int rxFlow : S/W Rx XON/XOFF flow control (0: no, 1: yes) + * int txFlow : S/W Tx XON/XOFF flow control (0: no, 1: yes) + * int xany : S/W XANY flow control (0: no, 1: yes) + * + * + * Function 16: Get ths line status of this port + * Syntax: + * int MoxaPortLineStatus(int port); + * int port : port number (0 - 127) + * + * return: Bit 0 - CTS state (0: off, 1: on) + * Bit 1 - DSR state (0: off, 1: on) + * Bit 2 - DCD state (0: off, 1: on) + * + * + * Function 19: Flush the Rx/Tx buffer data of this port. + * Syntax: + * void MoxaPortFlushData(int port, int mode); + * int port : port number (0 - 127) + * int mode + * 0 : flush the Rx buffer + * 1 : flush the Tx buffer + * 2 : flush the Rx and Tx buffer + * + * + * Function 20: Write data. + * Syntax: + * int MoxaPortWriteData(int port, unsigned char * buffer, int length); + * int port : port number (0 - 127) + * unsigned char * buffer : pointer to write data buffer. + * int length : write data length + * + * return: 0 - length : real write data length + * + * + * Function 21: Read data. + * Syntax: + * int MoxaPortReadData(int port, struct tty_struct *tty); + * int port : port number (0 - 127) + * struct tty_struct *tty : tty for data + * + * return: 0 - length : real read data length + * + * + * Function 24: Get the Tx buffer current queued data bytes + * Syntax: + * int MoxaPortTxQueue(int port); + * int port : port number (0 - 127) + * + * return: .. : Tx buffer current queued data bytes + * + * + * Function 25: Get the Tx buffer current free space + * Syntax: + * int MoxaPortTxFree(int port); + * int port : port number (0 - 127) + * + * return: .. : Tx buffer current free space + * + * + * Function 26: Get the Rx buffer current queued data bytes + * Syntax: + * int MoxaPortRxQueue(int port); + * int port : port number (0 - 127) + * + * return: .. : Rx buffer current queued data bytes + * + * + * Function 28: Disable port data transmission. + * Syntax: + * void MoxaPortTxDisable(int port); + * int port : port number (0 - 127) + * + * + * Function 29: Enable port data transmission. + * Syntax: + * void MoxaPortTxEnable(int port); + * int port : port number (0 - 127) + * + * + * Function 31: Get the received BREAK signal count and reset it. + * Syntax: + * int MoxaPortResetBrkCnt(int port); + * int port : port number (0 - 127) + * + * return: 0 - .. : BREAK signal count + * + * + */ + +static void MoxaPortEnable(struct moxa_port *port) +{ + void __iomem *ofsAddr; + u16 lowwater = 512; + + ofsAddr = port->tableAddr; + writew(lowwater, ofsAddr + Low_water); + if (MOXA_IS_320(port->board)) + moxafunc(ofsAddr, FC_SetBreakIrq, 0); + else + writew(readw(ofsAddr + HostStat) | WakeupBreak, + ofsAddr + HostStat); + + moxafunc(ofsAddr, FC_SetLineIrq, Magic_code); + moxafunc(ofsAddr, FC_FlushQueue, 2); + + moxafunc(ofsAddr, FC_EnableCH, Magic_code); + MoxaPortLineStatus(port); +} + +static void MoxaPortDisable(struct moxa_port *port) +{ + void __iomem *ofsAddr = port->tableAddr; + + moxafunc(ofsAddr, FC_SetFlowCtl, 0); /* disable flow control */ + moxafunc(ofsAddr, FC_ClrLineIrq, Magic_code); + writew(0, ofsAddr + HostStat); + moxafunc(ofsAddr, FC_DisableCH, Magic_code); +} + +static speed_t MoxaPortSetBaud(struct moxa_port *port, speed_t baud) +{ + void __iomem *ofsAddr = port->tableAddr; + unsigned int clock, val; + speed_t max; + + max = MOXA_IS_320(port->board) ? 460800 : 921600; + if (baud < 50) + return 0; + if (baud > max) + baud = max; + clock = 921600; + val = clock / baud; + moxafunc(ofsAddr, FC_SetBaud, val); + baud = clock / val; + return baud; +} + +static int MoxaPortSetTermio(struct moxa_port *port, struct ktermios *termio, + speed_t baud) +{ + void __iomem *ofsAddr; + tcflag_t cflag; + tcflag_t mode = 0; + + ofsAddr = port->tableAddr; + cflag = termio->c_cflag; /* termio->c_cflag */ + + mode = termio->c_cflag & CSIZE; + if (mode == CS5) + mode = MX_CS5; + else if (mode == CS6) + mode = MX_CS6; + else if (mode == CS7) + mode = MX_CS7; + else if (mode == CS8) + mode = MX_CS8; + + if (termio->c_cflag & CSTOPB) { + if (mode == MX_CS5) + mode |= MX_STOP15; + else + mode |= MX_STOP2; + } else + mode |= MX_STOP1; + + if (termio->c_cflag & PARENB) { + if (termio->c_cflag & PARODD) + mode |= MX_PARODD; + else + mode |= MX_PAREVEN; + } else + mode |= MX_PARNONE; + + moxafunc(ofsAddr, FC_SetDataMode, (u16)mode); + + if (MOXA_IS_320(port->board) && baud >= 921600) + return -1; + + baud = MoxaPortSetBaud(port, baud); + + if (termio->c_iflag & (IXON | IXOFF | IXANY)) { + spin_lock_irq(&moxafunc_lock); + writeb(termio->c_cc[VSTART], ofsAddr + FuncArg); + writeb(termio->c_cc[VSTOP], ofsAddr + FuncArg1); + writeb(FC_SetXonXoff, ofsAddr + FuncCode); + moxa_wait_finish(ofsAddr); + spin_unlock_irq(&moxafunc_lock); + + } + return baud; +} + +static int MoxaPortGetLineOut(struct moxa_port *port, int *dtrState, + int *rtsState) +{ + if (dtrState) + *dtrState = !!(port->lineCtrl & DTR_ON); + if (rtsState) + *rtsState = !!(port->lineCtrl & RTS_ON); + + return 0; +} + +static void MoxaPortLineCtrl(struct moxa_port *port, int dtr, int rts) +{ + u8 mode = 0; + + if (dtr) + mode |= DTR_ON; + if (rts) + mode |= RTS_ON; + port->lineCtrl = mode; + moxafunc(port->tableAddr, FC_LineControl, mode); +} + +static void MoxaPortFlowCtrl(struct moxa_port *port, int rts, int cts, + int txflow, int rxflow, int txany) +{ + int mode = 0; + + if (rts) + mode |= RTS_FlowCtl; + if (cts) + mode |= CTS_FlowCtl; + if (txflow) + mode |= Tx_FlowCtl; + if (rxflow) + mode |= Rx_FlowCtl; + if (txany) + mode |= IXM_IXANY; + moxafunc(port->tableAddr, FC_SetFlowCtl, mode); +} + +static int MoxaPortLineStatus(struct moxa_port *port) +{ + void __iomem *ofsAddr; + int val; + + ofsAddr = port->tableAddr; + if (MOXA_IS_320(port->board)) + val = moxafuncret(ofsAddr, FC_LineStatus, 0); + else + val = readw(ofsAddr + FlagStat) >> 4; + val &= 0x0B; + if (val & 8) + val |= 4; + moxa_new_dcdstate(port, val & 8); + val &= 7; + return val; +} + +static int MoxaPortWriteData(struct tty_struct *tty, + const unsigned char *buffer, int len) +{ + struct moxa_port *port = tty->driver_data; + void __iomem *baseAddr, *ofsAddr, *ofs; + unsigned int c, total; + u16 head, tail, tx_mask, spage, epage; + u16 pageno, pageofs, bufhead; + + ofsAddr = port->tableAddr; + baseAddr = port->board->basemem; + tx_mask = readw(ofsAddr + TX_mask); + spage = readw(ofsAddr + Page_txb); + epage = readw(ofsAddr + EndPage_txb); + tail = readw(ofsAddr + TXwptr); + head = readw(ofsAddr + TXrptr); + c = (head > tail) ? (head - tail - 1) : (head - tail + tx_mask); + if (c > len) + c = len; + moxaLog.txcnt[port->port.tty->index] += c; + total = c; + if (spage == epage) { + bufhead = readw(ofsAddr + Ofs_txb); + writew(spage, baseAddr + Control_reg); + while (c > 0) { + if (head > tail) + len = head - tail - 1; + else + len = tx_mask + 1 - tail; + len = (c > len) ? len : c; + ofs = baseAddr + DynPage_addr + bufhead + tail; + memcpy_toio(ofs, buffer, len); + buffer += len; + tail = (tail + len) & tx_mask; + c -= len; + } + } else { + pageno = spage + (tail >> 13); + pageofs = tail & Page_mask; + while (c > 0) { + len = Page_size - pageofs; + if (len > c) + len = c; + writeb(pageno, baseAddr + Control_reg); + ofs = baseAddr + DynPage_addr + pageofs; + memcpy_toio(ofs, buffer, len); + buffer += len; + if (++pageno == epage) + pageno = spage; + pageofs = 0; + c -= len; + } + tail = (tail + total) & tx_mask; + } + writew(tail, ofsAddr + TXwptr); + writeb(1, ofsAddr + CD180TXirq); /* start to send */ + return total; +} + +static int MoxaPortReadData(struct moxa_port *port) +{ + struct tty_struct *tty = port->port.tty; + unsigned char *dst; + void __iomem *baseAddr, *ofsAddr, *ofs; + unsigned int count, len, total; + u16 tail, rx_mask, spage, epage; + u16 pageno, pageofs, bufhead, head; + + ofsAddr = port->tableAddr; + baseAddr = port->board->basemem; + head = readw(ofsAddr + RXrptr); + tail = readw(ofsAddr + RXwptr); + rx_mask = readw(ofsAddr + RX_mask); + spage = readw(ofsAddr + Page_rxb); + epage = readw(ofsAddr + EndPage_rxb); + count = (tail >= head) ? (tail - head) : (tail - head + rx_mask + 1); + if (count == 0) + return 0; + + total = count; + moxaLog.rxcnt[tty->index] += total; + if (spage == epage) { + bufhead = readw(ofsAddr + Ofs_rxb); + writew(spage, baseAddr + Control_reg); + while (count > 0) { + ofs = baseAddr + DynPage_addr + bufhead + head; + len = (tail >= head) ? (tail - head) : + (rx_mask + 1 - head); + len = tty_prepare_flip_string(tty, &dst, + min(len, count)); + memcpy_fromio(dst, ofs, len); + head = (head + len) & rx_mask; + count -= len; + } + } else { + pageno = spage + (head >> 13); + pageofs = head & Page_mask; + while (count > 0) { + writew(pageno, baseAddr + Control_reg); + ofs = baseAddr + DynPage_addr + pageofs; + len = tty_prepare_flip_string(tty, &dst, + min(Page_size - pageofs, count)); + memcpy_fromio(dst, ofs, len); + + count -= len; + pageofs = (pageofs + len) & Page_mask; + if (pageofs == 0 && ++pageno == epage) + pageno = spage; + } + head = (head + total) & rx_mask; + } + writew(head, ofsAddr + RXrptr); + if (readb(ofsAddr + FlagStat) & Xoff_state) { + moxaLowWaterChk = 1; + port->lowChkFlag = 1; + } + return total; +} + + +static int MoxaPortTxQueue(struct moxa_port *port) +{ + void __iomem *ofsAddr = port->tableAddr; + u16 rptr, wptr, mask; + + rptr = readw(ofsAddr + TXrptr); + wptr = readw(ofsAddr + TXwptr); + mask = readw(ofsAddr + TX_mask); + return (wptr - rptr) & mask; +} + +static int MoxaPortTxFree(struct moxa_port *port) +{ + void __iomem *ofsAddr = port->tableAddr; + u16 rptr, wptr, mask; + + rptr = readw(ofsAddr + TXrptr); + wptr = readw(ofsAddr + TXwptr); + mask = readw(ofsAddr + TX_mask); + return mask - ((wptr - rptr) & mask); +} + +static int MoxaPortRxQueue(struct moxa_port *port) +{ + void __iomem *ofsAddr = port->tableAddr; + u16 rptr, wptr, mask; + + rptr = readw(ofsAddr + RXrptr); + wptr = readw(ofsAddr + RXwptr); + mask = readw(ofsAddr + RX_mask); + return (wptr - rptr) & mask; +} + +static void MoxaPortTxDisable(struct moxa_port *port) +{ + moxafunc(port->tableAddr, FC_SetXoffState, Magic_code); +} + +static void MoxaPortTxEnable(struct moxa_port *port) +{ + moxafunc(port->tableAddr, FC_SetXonState, Magic_code); +} + +static int moxa_get_serial_info(struct moxa_port *info, + struct serial_struct __user *retinfo) +{ + struct serial_struct tmp = { + .type = info->type, + .line = info->port.tty->index, + .flags = info->port.flags, + .baud_base = 921600, + .close_delay = info->port.close_delay + }; + return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0; +} + + +static int moxa_set_serial_info(struct moxa_port *info, + struct serial_struct __user *new_info) +{ + struct serial_struct new_serial; + + if (copy_from_user(&new_serial, new_info, sizeof(new_serial))) + return -EFAULT; + + if (new_serial.irq != 0 || new_serial.port != 0 || + new_serial.custom_divisor != 0 || + new_serial.baud_base != 921600) + return -EPERM; + + if (!capable(CAP_SYS_ADMIN)) { + if (((new_serial.flags & ~ASYNC_USR_MASK) != + (info->port.flags & ~ASYNC_USR_MASK))) + return -EPERM; + } else + info->port.close_delay = new_serial.close_delay * HZ / 100; + + new_serial.flags = (new_serial.flags & ~ASYNC_FLAGS); + new_serial.flags |= (info->port.flags & ASYNC_FLAGS); + + MoxaSetFifo(info, new_serial.type == PORT_16550A); + + info->type = new_serial.type; + return 0; +} + + + +/***************************************************************************** + * Static local functions: * + *****************************************************************************/ + +static void MoxaSetFifo(struct moxa_port *port, int enable) +{ + void __iomem *ofsAddr = port->tableAddr; + + if (!enable) { + moxafunc(ofsAddr, FC_SetRxFIFOTrig, 0); + moxafunc(ofsAddr, FC_SetTxFIFOCnt, 1); + } else { + moxafunc(ofsAddr, FC_SetRxFIFOTrig, 3); + moxafunc(ofsAddr, FC_SetTxFIFOCnt, 16); + } +} diff --git a/drivers/tty/moxa.h b/drivers/tty/moxa.h new file mode 100644 index 00000000000..87d16ce57be --- /dev/null +++ b/drivers/tty/moxa.h @@ -0,0 +1,304 @@ +#ifndef MOXA_H_FILE +#define MOXA_H_FILE + +#define MOXA 0x400 +#define MOXA_GET_IQUEUE (MOXA + 1) /* get input buffered count */ +#define MOXA_GET_OQUEUE (MOXA + 2) /* get output buffered count */ +#define MOXA_GETDATACOUNT (MOXA + 23) +#define MOXA_GET_IOQUEUE (MOXA + 27) +#define MOXA_FLUSH_QUEUE (MOXA + 28) +#define MOXA_GETMSTATUS (MOXA + 65) + +/* + * System Configuration + */ + +#define Magic_code 0x404 + +/* + * for C218 BIOS initialization + */ +#define C218_ConfBase 0x800 +#define C218_status (C218_ConfBase + 0) /* BIOS running status */ +#define C218_diag (C218_ConfBase + 2) /* diagnostic status */ +#define C218_key (C218_ConfBase + 4) /* WORD (0x218 for C218) */ +#define C218DLoad_len (C218_ConfBase + 6) /* WORD */ +#define C218check_sum (C218_ConfBase + 8) /* BYTE */ +#define C218chksum_ok (C218_ConfBase + 0x0a) /* BYTE (1:ok) */ +#define C218_TestRx (C218_ConfBase + 0x10) /* 8 bytes for 8 ports */ +#define C218_TestTx (C218_ConfBase + 0x18) /* 8 bytes for 8 ports */ +#define C218_RXerr (C218_ConfBase + 0x20) /* 8 bytes for 8 ports */ +#define C218_ErrFlag (C218_ConfBase + 0x28) /* 8 bytes for 8 ports */ + +#define C218_LoadBuf 0x0F00 +#define C218_KeyCode 0x218 +#define CP204J_KeyCode 0x204 + +/* + * for C320 BIOS initialization + */ +#define C320_ConfBase 0x800 +#define C320_LoadBuf 0x0f00 +#define STS_init 0x05 /* for C320_status */ + +#define C320_status C320_ConfBase + 0 /* BIOS running status */ +#define C320_diag C320_ConfBase + 2 /* diagnostic status */ +#define C320_key C320_ConfBase + 4 /* WORD (0320H for C320) */ +#define C320DLoad_len C320_ConfBase + 6 /* WORD */ +#define C320check_sum C320_ConfBase + 8 /* WORD */ +#define C320chksum_ok C320_ConfBase + 0x0a /* WORD (1:ok) */ +#define C320bapi_len C320_ConfBase + 0x0c /* WORD */ +#define C320UART_no C320_ConfBase + 0x0e /* WORD */ + +#define C320_KeyCode 0x320 + +#define FixPage_addr 0x0000 /* starting addr of static page */ +#define DynPage_addr 0x2000 /* starting addr of dynamic page */ +#define C218_start 0x3000 /* starting addr of C218 BIOS prg */ +#define Control_reg 0x1ff0 /* select page and reset control */ +#define HW_reset 0x80 + +/* + * Function Codes + */ +#define FC_CardReset 0x80 +#define FC_ChannelReset 1 /* C320 firmware not supported */ +#define FC_EnableCH 2 +#define FC_DisableCH 3 +#define FC_SetParam 4 +#define FC_SetMode 5 +#define FC_SetRate 6 +#define FC_LineControl 7 +#define FC_LineStatus 8 +#define FC_XmitControl 9 +#define FC_FlushQueue 10 +#define FC_SendBreak 11 +#define FC_StopBreak 12 +#define FC_LoopbackON 13 +#define FC_LoopbackOFF 14 +#define FC_ClrIrqTable 15 +#define FC_SendXon 16 +#define FC_SetTermIrq 17 /* C320 firmware not supported */ +#define FC_SetCntIrq 18 /* C320 firmware not supported */ +#define FC_SetBreakIrq 19 +#define FC_SetLineIrq 20 +#define FC_SetFlowCtl 21 +#define FC_GenIrq 22 +#define FC_InCD180 23 +#define FC_OutCD180 24 +#define FC_InUARTreg 23 +#define FC_OutUARTreg 24 +#define FC_SetXonXoff 25 +#define FC_OutCD180CCR 26 +#define FC_ExtIQueue 27 +#define FC_ExtOQueue 28 +#define FC_ClrLineIrq 29 +#define FC_HWFlowCtl 30 +#define FC_GetClockRate 35 +#define FC_SetBaud 36 +#define FC_SetDataMode 41 +#define FC_GetCCSR 43 +#define FC_GetDataError 45 +#define FC_RxControl 50 +#define FC_ImmSend 51 +#define FC_SetXonState 52 +#define FC_SetXoffState 53 +#define FC_SetRxFIFOTrig 54 +#define FC_SetTxFIFOCnt 55 +#define FC_UnixRate 56 +#define FC_UnixResetTimer 57 + +#define RxFIFOTrig1 0 +#define RxFIFOTrig4 1 +#define RxFIFOTrig8 2 +#define RxFIFOTrig14 3 + +/* + * Dual-Ported RAM + */ +#define DRAM_global 0 +#define INT_data (DRAM_global + 0) +#define Config_base (DRAM_global + 0x108) + +#define IRQindex (INT_data + 0) +#define IRQpending (INT_data + 4) +#define IRQtable (INT_data + 8) + +/* + * Interrupt Status + */ +#define IntrRx 0x01 /* receiver data O.K. */ +#define IntrTx 0x02 /* transmit buffer empty */ +#define IntrFunc 0x04 /* function complete */ +#define IntrBreak 0x08 /* received break */ +#define IntrLine 0x10 /* line status change + for transmitter */ +#define IntrIntr 0x20 /* received INTR code */ +#define IntrQuit 0x40 /* received QUIT code */ +#define IntrEOF 0x80 /* received EOF code */ + +#define IntrRxTrigger 0x100 /* rx data count reach tigger value */ +#define IntrTxTrigger 0x200 /* tx data count below trigger value */ + +#define Magic_no (Config_base + 0) +#define Card_model_no (Config_base + 2) +#define Total_ports (Config_base + 4) +#define Module_cnt (Config_base + 8) +#define Module_no (Config_base + 10) +#define Timer_10ms (Config_base + 14) +#define Disable_IRQ (Config_base + 20) +#define TMS320_PORT1 (Config_base + 22) +#define TMS320_PORT2 (Config_base + 24) +#define TMS320_CLOCK (Config_base + 26) + +/* + * DATA BUFFER in DRAM + */ +#define Extern_table 0x400 /* Base address of the external table + (24 words * 64) total 3K bytes + (24 words * 128) total 6K bytes */ +#define Extern_size 0x60 /* 96 bytes */ +#define RXrptr 0x00 /* read pointer for RX buffer */ +#define RXwptr 0x02 /* write pointer for RX buffer */ +#define TXrptr 0x04 /* read pointer for TX buffer */ +#define TXwptr 0x06 /* write pointer for TX buffer */ +#define HostStat 0x08 /* IRQ flag and general flag */ +#define FlagStat 0x0A +#define FlowControl 0x0C /* B7 B6 B5 B4 B3 B2 B1 B0 */ + /* x x x x | | | | */ + /* | | | + CTS flow */ + /* | | +--- RTS flow */ + /* | +------ TX Xon/Xoff */ + /* +--------- RX Xon/Xoff */ +#define Break_cnt 0x0E /* received break count */ +#define CD180TXirq 0x10 /* if non-0: enable TX irq */ +#define RX_mask 0x12 +#define TX_mask 0x14 +#define Ofs_rxb 0x16 +#define Ofs_txb 0x18 +#define Page_rxb 0x1A +#define Page_txb 0x1C +#define EndPage_rxb 0x1E +#define EndPage_txb 0x20 +#define Data_error 0x22 +#define RxTrigger 0x28 +#define TxTrigger 0x2a + +#define rRXwptr 0x34 +#define Low_water 0x36 + +#define FuncCode 0x40 +#define FuncArg 0x42 +#define FuncArg1 0x44 + +#define C218rx_size 0x2000 /* 8K bytes */ +#define C218tx_size 0x8000 /* 32K bytes */ + +#define C218rx_mask (C218rx_size - 1) +#define C218tx_mask (C218tx_size - 1) + +#define C320p8rx_size 0x2000 +#define C320p8tx_size 0x8000 +#define C320p8rx_mask (C320p8rx_size - 1) +#define C320p8tx_mask (C320p8tx_size - 1) + +#define C320p16rx_size 0x2000 +#define C320p16tx_size 0x4000 +#define C320p16rx_mask (C320p16rx_size - 1) +#define C320p16tx_mask (C320p16tx_size - 1) + +#define C320p24rx_size 0x2000 +#define C320p24tx_size 0x2000 +#define C320p24rx_mask (C320p24rx_size - 1) +#define C320p24tx_mask (C320p24tx_size - 1) + +#define C320p32rx_size 0x1000 +#define C320p32tx_size 0x1000 +#define C320p32rx_mask (C320p32rx_size - 1) +#define C320p32tx_mask (C320p32tx_size - 1) + +#define Page_size 0x2000U +#define Page_mask (Page_size - 1) +#define C218rx_spage 3 +#define C218tx_spage 4 +#define C218rx_pageno 1 +#define C218tx_pageno 4 +#define C218buf_pageno 5 + +#define C320p8rx_spage 3 +#define C320p8tx_spage 4 +#define C320p8rx_pgno 1 +#define C320p8tx_pgno 4 +#define C320p8buf_pgno 5 + +#define C320p16rx_spage 3 +#define C320p16tx_spage 4 +#define C320p16rx_pgno 1 +#define C320p16tx_pgno 2 +#define C320p16buf_pgno 3 + +#define C320p24rx_spage 3 +#define C320p24tx_spage 4 +#define C320p24rx_pgno 1 +#define C320p24tx_pgno 1 +#define C320p24buf_pgno 2 + +#define C320p32rx_spage 3 +#define C320p32tx_ofs C320p32rx_size +#define C320p32tx_spage 3 +#define C320p32buf_pgno 1 + +/* + * Host Status + */ +#define WakeupRx 0x01 +#define WakeupTx 0x02 +#define WakeupBreak 0x08 +#define WakeupLine 0x10 +#define WakeupIntr 0x20 +#define WakeupQuit 0x40 +#define WakeupEOF 0x80 /* used in VTIME control */ +#define WakeupRxTrigger 0x100 +#define WakeupTxTrigger 0x200 +/* + * Flag status + */ +#define Rx_over 0x01 +#define Xoff_state 0x02 +#define Tx_flowOff 0x04 +#define Tx_enable 0x08 +#define CTS_state 0x10 +#define DSR_state 0x20 +#define DCD_state 0x80 +/* + * FlowControl + */ +#define CTS_FlowCtl 1 +#define RTS_FlowCtl 2 +#define Tx_FlowCtl 4 +#define Rx_FlowCtl 8 +#define IXM_IXANY 0x10 + +#define LowWater 128 + +#define DTR_ON 1 +#define RTS_ON 2 +#define CTS_ON 1 +#define DSR_ON 2 +#define DCD_ON 8 + +/* mode definition */ +#define MX_CS8 0x03 +#define MX_CS7 0x02 +#define MX_CS6 0x01 +#define MX_CS5 0x00 + +#define MX_STOP1 0x00 +#define MX_STOP15 0x04 +#define MX_STOP2 0x08 + +#define MX_PARNONE 0x00 +#define MX_PAREVEN 0x40 +#define MX_PARODD 0xC0 + +#endif diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c new file mode 100644 index 00000000000..d188f378684 --- /dev/null +++ b/drivers/tty/mxser.c @@ -0,0 +1,2757 @@ +/* + * mxser.c -- MOXA Smartio/Industio family multiport serial driver. + * + * Copyright (C) 1999-2006 Moxa Technologies (support@moxa.com). + * Copyright (C) 2006-2008 Jiri Slaby <jirislaby@gmail.com> + * + * This code is loosely based on the 1.8 moxa driver which is based on + * Linux serial driver, written by Linus Torvalds, Theodore T'so and + * others. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Fed through a cleanup, indent and remove of non 2.6 code by Alan Cox + * <alan@lxorguk.ukuu.org.uk>. The original 1.8 code is available on + * www.moxa.com. + * - Fixed x86_64 cleanness + */ + +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/timer.h> +#include <linux/interrupt.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/serial.h> +#include <linux/serial_reg.h> +#include <linux/major.h> +#include <linux/string.h> +#include <linux/fcntl.h> +#include <linux/ptrace.h> +#include <linux/ioport.h> +#include <linux/mm.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/bitops.h> +#include <linux/slab.h> + +#include <asm/system.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/uaccess.h> + +#include "mxser.h" + +#define MXSER_VERSION "2.0.5" /* 1.14 */ +#define MXSERMAJOR 174 + +#define MXSER_BOARDS 4 /* Max. boards */ +#define MXSER_PORTS_PER_BOARD 8 /* Max. ports per board */ +#define MXSER_PORTS (MXSER_BOARDS * MXSER_PORTS_PER_BOARD) +#define MXSER_ISR_PASS_LIMIT 100 + +/*CheckIsMoxaMust return value*/ +#define MOXA_OTHER_UART 0x00 +#define MOXA_MUST_MU150_HWID 0x01 +#define MOXA_MUST_MU860_HWID 0x02 + +#define WAKEUP_CHARS 256 + +#define UART_MCR_AFE 0x20 +#define UART_LSR_SPECIAL 0x1E + +#define PCI_DEVICE_ID_POS104UL 0x1044 +#define PCI_DEVICE_ID_CB108 0x1080 +#define PCI_DEVICE_ID_CP102UF 0x1023 +#define PCI_DEVICE_ID_CP112UL 0x1120 +#define PCI_DEVICE_ID_CB114 0x1142 +#define PCI_DEVICE_ID_CP114UL 0x1143 +#define PCI_DEVICE_ID_CB134I 0x1341 +#define PCI_DEVICE_ID_CP138U 0x1380 + + +#define C168_ASIC_ID 1 +#define C104_ASIC_ID 2 +#define C102_ASIC_ID 0xB +#define CI132_ASIC_ID 4 +#define CI134_ASIC_ID 3 +#define CI104J_ASIC_ID 5 + +#define MXSER_HIGHBAUD 1 +#define MXSER_HAS2 2 + +/* This is only for PCI */ +static const struct { + int type; + int tx_fifo; + int rx_fifo; + int xmit_fifo_size; + int rx_high_water; + int rx_trigger; + int rx_low_water; + long max_baud; +} Gpci_uart_info[] = { + {MOXA_OTHER_UART, 16, 16, 16, 14, 14, 1, 921600L}, + {MOXA_MUST_MU150_HWID, 64, 64, 64, 48, 48, 16, 230400L}, + {MOXA_MUST_MU860_HWID, 128, 128, 128, 96, 96, 32, 921600L} +}; +#define UART_INFO_NUM ARRAY_SIZE(Gpci_uart_info) + +struct mxser_cardinfo { + char *name; + unsigned int nports; + unsigned int flags; +}; + +static const struct mxser_cardinfo mxser_cards[] = { +/* 0*/ { "C168 series", 8, }, + { "C104 series", 4, }, + { "CI-104J series", 4, }, + { "C168H/PCI series", 8, }, + { "C104H/PCI series", 4, }, +/* 5*/ { "C102 series", 4, MXSER_HAS2 }, /* C102-ISA */ + { "CI-132 series", 4, MXSER_HAS2 }, + { "CI-134 series", 4, }, + { "CP-132 series", 2, }, + { "CP-114 series", 4, }, +/*10*/ { "CT-114 series", 4, }, + { "CP-102 series", 2, MXSER_HIGHBAUD }, + { "CP-104U series", 4, }, + { "CP-168U series", 8, }, + { "CP-132U series", 2, }, +/*15*/ { "CP-134U series", 4, }, + { "CP-104JU series", 4, }, + { "Moxa UC7000 Serial", 8, }, /* RC7000 */ + { "CP-118U series", 8, }, + { "CP-102UL series", 2, }, +/*20*/ { "CP-102U series", 2, }, + { "CP-118EL series", 8, }, + { "CP-168EL series", 8, }, + { "CP-104EL series", 4, }, + { "CB-108 series", 8, }, +/*25*/ { "CB-114 series", 4, }, + { "CB-134I series", 4, }, + { "CP-138U series", 8, }, + { "POS-104UL series", 4, }, + { "CP-114UL series", 4, }, +/*30*/ { "CP-102UF series", 2, }, + { "CP-112UL series", 2, }, +}; + +/* driver_data correspond to the lines in the structure above + see also ISA probe function before you change something */ +static struct pci_device_id mxser_pcibrds[] = { + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C168), .driver_data = 3 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C104), .driver_data = 4 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132), .driver_data = 8 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP114), .driver_data = 9 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CT114), .driver_data = 10 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102), .driver_data = 11 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104U), .driver_data = 12 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168U), .driver_data = 13 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132U), .driver_data = 14 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP134U), .driver_data = 15 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104JU),.driver_data = 16 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_RC7000), .driver_data = 17 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118U), .driver_data = 18 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102UL),.driver_data = 19 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102U), .driver_data = 20 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118EL),.driver_data = 21 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168EL),.driver_data = 22 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104EL),.driver_data = 23 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB108), .driver_data = 24 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB114), .driver_data = 25 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB134I), .driver_data = 26 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP138U), .driver_data = 27 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_POS104UL), .driver_data = 28 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP114UL), .driver_data = 29 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP102UF), .driver_data = 30 }, + { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP112UL), .driver_data = 31 }, + { } +}; +MODULE_DEVICE_TABLE(pci, mxser_pcibrds); + +static unsigned long ioaddr[MXSER_BOARDS]; +static int ttymajor = MXSERMAJOR; + +/* Variables for insmod */ + +MODULE_AUTHOR("Casper Yang"); +MODULE_DESCRIPTION("MOXA Smartio/Industio Family Multiport Board Device Driver"); +module_param_array(ioaddr, ulong, NULL, 0); +MODULE_PARM_DESC(ioaddr, "ISA io addresses to look for a moxa board"); +module_param(ttymajor, int, 0); +MODULE_LICENSE("GPL"); + +struct mxser_log { + int tick; + unsigned long rxcnt[MXSER_PORTS]; + unsigned long txcnt[MXSER_PORTS]; +}; + +struct mxser_mon { + unsigned long rxcnt; + unsigned long txcnt; + unsigned long up_rxcnt; + unsigned long up_txcnt; + int modem_status; + unsigned char hold_reason; +}; + +struct mxser_mon_ext { + unsigned long rx_cnt[32]; + unsigned long tx_cnt[32]; + unsigned long up_rxcnt[32]; + unsigned long up_txcnt[32]; + int modem_status[32]; + + long baudrate[32]; + int databits[32]; + int stopbits[32]; + int parity[32]; + int flowctrl[32]; + int fifo[32]; + int iftype[32]; +}; + +struct mxser_board; + +struct mxser_port { + struct tty_port port; + struct mxser_board *board; + + unsigned long ioaddr; + unsigned long opmode_ioaddr; + int max_baud; + + int rx_high_water; + int rx_trigger; /* Rx fifo trigger level */ + int rx_low_water; + int baud_base; /* max. speed */ + int type; /* UART type */ + + int x_char; /* xon/xoff character */ + int IER; /* Interrupt Enable Register */ + int MCR; /* Modem control register */ + + unsigned char stop_rx; + unsigned char ldisc_stop_rx; + + int custom_divisor; + unsigned char err_shadow; + + struct async_icount icount; /* kernel counters for 4 input interrupts */ + int timeout; + + int read_status_mask; + int ignore_status_mask; + int xmit_fifo_size; + int xmit_head; + int xmit_tail; + int xmit_cnt; + + struct ktermios normal_termios; + + struct mxser_mon mon_data; + + spinlock_t slock; +}; + +struct mxser_board { + unsigned int idx; + int irq; + const struct mxser_cardinfo *info; + unsigned long vector; + unsigned long vector_mask; + + int chip_flag; + int uart_type; + + struct mxser_port ports[MXSER_PORTS_PER_BOARD]; +}; + +struct mxser_mstatus { + tcflag_t cflag; + int cts; + int dsr; + int ri; + int dcd; +}; + +static struct mxser_board mxser_boards[MXSER_BOARDS]; +static struct tty_driver *mxvar_sdriver; +static struct mxser_log mxvar_log; +static int mxser_set_baud_method[MXSER_PORTS + 1]; + +static void mxser_enable_must_enchance_mode(unsigned long baseio) +{ + u8 oldlcr; + u8 efr; + + oldlcr = inb(baseio + UART_LCR); + outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); + + efr = inb(baseio + MOXA_MUST_EFR_REGISTER); + efr |= MOXA_MUST_EFR_EFRB_ENABLE; + + outb(efr, baseio + MOXA_MUST_EFR_REGISTER); + outb(oldlcr, baseio + UART_LCR); +} + +#ifdef CONFIG_PCI +static void mxser_disable_must_enchance_mode(unsigned long baseio) +{ + u8 oldlcr; + u8 efr; + + oldlcr = inb(baseio + UART_LCR); + outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); + + efr = inb(baseio + MOXA_MUST_EFR_REGISTER); + efr &= ~MOXA_MUST_EFR_EFRB_ENABLE; + + outb(efr, baseio + MOXA_MUST_EFR_REGISTER); + outb(oldlcr, baseio + UART_LCR); +} +#endif + +static void mxser_set_must_xon1_value(unsigned long baseio, u8 value) +{ + u8 oldlcr; + u8 efr; + + oldlcr = inb(baseio + UART_LCR); + outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); + + efr = inb(baseio + MOXA_MUST_EFR_REGISTER); + efr &= ~MOXA_MUST_EFR_BANK_MASK; + efr |= MOXA_MUST_EFR_BANK0; + + outb(efr, baseio + MOXA_MUST_EFR_REGISTER); + outb(value, baseio + MOXA_MUST_XON1_REGISTER); + outb(oldlcr, baseio + UART_LCR); +} + +static void mxser_set_must_xoff1_value(unsigned long baseio, u8 value) +{ + u8 oldlcr; + u8 efr; + + oldlcr = inb(baseio + UART_LCR); + outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); + + efr = inb(baseio + MOXA_MUST_EFR_REGISTER); + efr &= ~MOXA_MUST_EFR_BANK_MASK; + efr |= MOXA_MUST_EFR_BANK0; + + outb(efr, baseio + MOXA_MUST_EFR_REGISTER); + outb(value, baseio + MOXA_MUST_XOFF1_REGISTER); + outb(oldlcr, baseio + UART_LCR); +} + +static void mxser_set_must_fifo_value(struct mxser_port *info) +{ + u8 oldlcr; + u8 efr; + + oldlcr = inb(info->ioaddr + UART_LCR); + outb(MOXA_MUST_ENTER_ENCHANCE, info->ioaddr + UART_LCR); + + efr = inb(info->ioaddr + MOXA_MUST_EFR_REGISTER); + efr &= ~MOXA_MUST_EFR_BANK_MASK; + efr |= MOXA_MUST_EFR_BANK1; + + outb(efr, info->ioaddr + MOXA_MUST_EFR_REGISTER); + outb((u8)info->rx_high_water, info->ioaddr + MOXA_MUST_RBRTH_REGISTER); + outb((u8)info->rx_trigger, info->ioaddr + MOXA_MUST_RBRTI_REGISTER); + outb((u8)info->rx_low_water, info->ioaddr + MOXA_MUST_RBRTL_REGISTER); + outb(oldlcr, info->ioaddr + UART_LCR); +} + +static void mxser_set_must_enum_value(unsigned long baseio, u8 value) +{ + u8 oldlcr; + u8 efr; + + oldlcr = inb(baseio + UART_LCR); + outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); + + efr = inb(baseio + MOXA_MUST_EFR_REGISTER); + efr &= ~MOXA_MUST_EFR_BANK_MASK; + efr |= MOXA_MUST_EFR_BANK2; + + outb(efr, baseio + MOXA_MUST_EFR_REGISTER); + outb(value, baseio + MOXA_MUST_ENUM_REGISTER); + outb(oldlcr, baseio + UART_LCR); +} + +#ifdef CONFIG_PCI +static void mxser_get_must_hardware_id(unsigned long baseio, u8 *pId) +{ + u8 oldlcr; + u8 efr; + + oldlcr = inb(baseio + UART_LCR); + outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); + + efr = inb(baseio + MOXA_MUST_EFR_REGISTER); + efr &= ~MOXA_MUST_EFR_BANK_MASK; + efr |= MOXA_MUST_EFR_BANK2; + + outb(efr, baseio + MOXA_MUST_EFR_REGISTER); + *pId = inb(baseio + MOXA_MUST_HWID_REGISTER); + outb(oldlcr, baseio + UART_LCR); +} +#endif + +static void SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(unsigned long baseio) +{ + u8 oldlcr; + u8 efr; + + oldlcr = inb(baseio + UART_LCR); + outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); + + efr = inb(baseio + MOXA_MUST_EFR_REGISTER); + efr &= ~MOXA_MUST_EFR_SF_MASK; + + outb(efr, baseio + MOXA_MUST_EFR_REGISTER); + outb(oldlcr, baseio + UART_LCR); +} + +static void mxser_enable_must_tx_software_flow_control(unsigned long baseio) +{ + u8 oldlcr; + u8 efr; + + oldlcr = inb(baseio + UART_LCR); + outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); + + efr = inb(baseio + MOXA_MUST_EFR_REGISTER); + efr &= ~MOXA_MUST_EFR_SF_TX_MASK; + efr |= MOXA_MUST_EFR_SF_TX1; + + outb(efr, baseio + MOXA_MUST_EFR_REGISTER); + outb(oldlcr, baseio + UART_LCR); +} + +static void mxser_disable_must_tx_software_flow_control(unsigned long baseio) +{ + u8 oldlcr; + u8 efr; + + oldlcr = inb(baseio + UART_LCR); + outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); + + efr = inb(baseio + MOXA_MUST_EFR_REGISTER); + efr &= ~MOXA_MUST_EFR_SF_TX_MASK; + + outb(efr, baseio + MOXA_MUST_EFR_REGISTER); + outb(oldlcr, baseio + UART_LCR); +} + +static void mxser_enable_must_rx_software_flow_control(unsigned long baseio) +{ + u8 oldlcr; + u8 efr; + + oldlcr = inb(baseio + UART_LCR); + outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); + + efr = inb(baseio + MOXA_MUST_EFR_REGISTER); + efr &= ~MOXA_MUST_EFR_SF_RX_MASK; + efr |= MOXA_MUST_EFR_SF_RX1; + + outb(efr, baseio + MOXA_MUST_EFR_REGISTER); + outb(oldlcr, baseio + UART_LCR); +} + +static void mxser_disable_must_rx_software_flow_control(unsigned long baseio) +{ + u8 oldlcr; + u8 efr; + + oldlcr = inb(baseio + UART_LCR); + outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); + + efr = inb(baseio + MOXA_MUST_EFR_REGISTER); + efr &= ~MOXA_MUST_EFR_SF_RX_MASK; + + outb(efr, baseio + MOXA_MUST_EFR_REGISTER); + outb(oldlcr, baseio + UART_LCR); +} + +#ifdef CONFIG_PCI +static int __devinit CheckIsMoxaMust(unsigned long io) +{ + u8 oldmcr, hwid; + int i; + + outb(0, io + UART_LCR); + mxser_disable_must_enchance_mode(io); + oldmcr = inb(io + UART_MCR); + outb(0, io + UART_MCR); + mxser_set_must_xon1_value(io, 0x11); + if ((hwid = inb(io + UART_MCR)) != 0) { + outb(oldmcr, io + UART_MCR); + return MOXA_OTHER_UART; + } + + mxser_get_must_hardware_id(io, &hwid); + for (i = 1; i < UART_INFO_NUM; i++) { /* 0 = OTHER_UART */ + if (hwid == Gpci_uart_info[i].type) + return (int)hwid; + } + return MOXA_OTHER_UART; +} +#endif + +static void process_txrx_fifo(struct mxser_port *info) +{ + int i; + + if ((info->type == PORT_16450) || (info->type == PORT_8250)) { + info->rx_trigger = 1; + info->rx_high_water = 1; + info->rx_low_water = 1; + info->xmit_fifo_size = 1; + } else + for (i = 0; i < UART_INFO_NUM; i++) + if (info->board->chip_flag == Gpci_uart_info[i].type) { + info->rx_trigger = Gpci_uart_info[i].rx_trigger; + info->rx_low_water = Gpci_uart_info[i].rx_low_water; + info->rx_high_water = Gpci_uart_info[i].rx_high_water; + info->xmit_fifo_size = Gpci_uart_info[i].xmit_fifo_size; + break; + } +} + +static unsigned char mxser_get_msr(int baseaddr, int mode, int port) +{ + static unsigned char mxser_msr[MXSER_PORTS + 1]; + unsigned char status = 0; + + status = inb(baseaddr + UART_MSR); + + mxser_msr[port] &= 0x0F; + mxser_msr[port] |= status; + status = mxser_msr[port]; + if (mode) + mxser_msr[port] = 0; + + return status; +} + +static int mxser_carrier_raised(struct tty_port *port) +{ + struct mxser_port *mp = container_of(port, struct mxser_port, port); + return (inb(mp->ioaddr + UART_MSR) & UART_MSR_DCD)?1:0; +} + +static void mxser_dtr_rts(struct tty_port *port, int on) +{ + struct mxser_port *mp = container_of(port, struct mxser_port, port); + unsigned long flags; + + spin_lock_irqsave(&mp->slock, flags); + if (on) + outb(inb(mp->ioaddr + UART_MCR) | + UART_MCR_DTR | UART_MCR_RTS, mp->ioaddr + UART_MCR); + else + outb(inb(mp->ioaddr + UART_MCR)&~(UART_MCR_DTR | UART_MCR_RTS), + mp->ioaddr + UART_MCR); + spin_unlock_irqrestore(&mp->slock, flags); +} + +static int mxser_set_baud(struct tty_struct *tty, long newspd) +{ + struct mxser_port *info = tty->driver_data; + int quot = 0, baud; + unsigned char cval; + + if (!info->ioaddr) + return -1; + + if (newspd > info->max_baud) + return -1; + + if (newspd == 134) { + quot = 2 * info->baud_base / 269; + tty_encode_baud_rate(tty, 134, 134); + } else if (newspd) { + quot = info->baud_base / newspd; + if (quot == 0) + quot = 1; + baud = info->baud_base/quot; + tty_encode_baud_rate(tty, baud, baud); + } else { + quot = 0; + } + + info->timeout = ((info->xmit_fifo_size * HZ * 10 * quot) / info->baud_base); + info->timeout += HZ / 50; /* Add .02 seconds of slop */ + + if (quot) { + info->MCR |= UART_MCR_DTR; + outb(info->MCR, info->ioaddr + UART_MCR); + } else { + info->MCR &= ~UART_MCR_DTR; + outb(info->MCR, info->ioaddr + UART_MCR); + return 0; + } + + cval = inb(info->ioaddr + UART_LCR); + + outb(cval | UART_LCR_DLAB, info->ioaddr + UART_LCR); /* set DLAB */ + + outb(quot & 0xff, info->ioaddr + UART_DLL); /* LS of divisor */ + outb(quot >> 8, info->ioaddr + UART_DLM); /* MS of divisor */ + outb(cval, info->ioaddr + UART_LCR); /* reset DLAB */ + +#ifdef BOTHER + if (C_BAUD(tty) == BOTHER) { + quot = info->baud_base % newspd; + quot *= 8; + if (quot % newspd > newspd / 2) { + quot /= newspd; + quot++; + } else + quot /= newspd; + + mxser_set_must_enum_value(info->ioaddr, quot); + } else +#endif + mxser_set_must_enum_value(info->ioaddr, 0); + + return 0; +} + +/* + * This routine is called to set the UART divisor registers to match + * the specified baud rate for a serial port. + */ +static int mxser_change_speed(struct tty_struct *tty, + struct ktermios *old_termios) +{ + struct mxser_port *info = tty->driver_data; + unsigned cflag, cval, fcr; + int ret = 0; + unsigned char status; + + cflag = tty->termios->c_cflag; + if (!info->ioaddr) + return ret; + + if (mxser_set_baud_method[tty->index] == 0) + mxser_set_baud(tty, tty_get_baud_rate(tty)); + + /* byte size and parity */ + switch (cflag & CSIZE) { + case CS5: + cval = 0x00; + break; + case CS6: + cval = 0x01; + break; + case CS7: + cval = 0x02; + break; + case CS8: + cval = 0x03; + break; + default: + cval = 0x00; + break; /* too keep GCC shut... */ + } + if (cflag & CSTOPB) + cval |= 0x04; + if (cflag & PARENB) + cval |= UART_LCR_PARITY; + if (!(cflag & PARODD)) + cval |= UART_LCR_EPAR; + if (cflag & CMSPAR) + cval |= UART_LCR_SPAR; + + if ((info->type == PORT_8250) || (info->type == PORT_16450)) { + if (info->board->chip_flag) { + fcr = UART_FCR_ENABLE_FIFO; + fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE; + mxser_set_must_fifo_value(info); + } else + fcr = 0; + } else { + fcr = UART_FCR_ENABLE_FIFO; + if (info->board->chip_flag) { + fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE; + mxser_set_must_fifo_value(info); + } else { + switch (info->rx_trigger) { + case 1: + fcr |= UART_FCR_TRIGGER_1; + break; + case 4: + fcr |= UART_FCR_TRIGGER_4; + break; + case 8: + fcr |= UART_FCR_TRIGGER_8; + break; + default: + fcr |= UART_FCR_TRIGGER_14; + break; + } + } + } + + /* CTS flow control flag and modem status interrupts */ + info->IER &= ~UART_IER_MSI; + info->MCR &= ~UART_MCR_AFE; + if (cflag & CRTSCTS) { + info->port.flags |= ASYNC_CTS_FLOW; + info->IER |= UART_IER_MSI; + if ((info->type == PORT_16550A) || (info->board->chip_flag)) { + info->MCR |= UART_MCR_AFE; + } else { + status = inb(info->ioaddr + UART_MSR); + if (tty->hw_stopped) { + if (status & UART_MSR_CTS) { + tty->hw_stopped = 0; + if (info->type != PORT_16550A && + !info->board->chip_flag) { + outb(info->IER & ~UART_IER_THRI, + info->ioaddr + + UART_IER); + info->IER |= UART_IER_THRI; + outb(info->IER, info->ioaddr + + UART_IER); + } + tty_wakeup(tty); + } + } else { + if (!(status & UART_MSR_CTS)) { + tty->hw_stopped = 1; + if ((info->type != PORT_16550A) && + (!info->board->chip_flag)) { + info->IER &= ~UART_IER_THRI; + outb(info->IER, info->ioaddr + + UART_IER); + } + } + } + } + } else { + info->port.flags &= ~ASYNC_CTS_FLOW; + } + outb(info->MCR, info->ioaddr + UART_MCR); + if (cflag & CLOCAL) { + info->port.flags &= ~ASYNC_CHECK_CD; + } else { + info->port.flags |= ASYNC_CHECK_CD; + info->IER |= UART_IER_MSI; + } + outb(info->IER, info->ioaddr + UART_IER); + + /* + * Set up parity check flag + */ + info->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; + if (I_INPCK(tty)) + info->read_status_mask |= UART_LSR_FE | UART_LSR_PE; + if (I_BRKINT(tty) || I_PARMRK(tty)) + info->read_status_mask |= UART_LSR_BI; + + info->ignore_status_mask = 0; + + if (I_IGNBRK(tty)) { + info->ignore_status_mask |= UART_LSR_BI; + info->read_status_mask |= UART_LSR_BI; + /* + * If we're ignore parity and break indicators, ignore + * overruns too. (For real raw support). + */ + if (I_IGNPAR(tty)) { + info->ignore_status_mask |= + UART_LSR_OE | + UART_LSR_PE | + UART_LSR_FE; + info->read_status_mask |= + UART_LSR_OE | + UART_LSR_PE | + UART_LSR_FE; + } + } + if (info->board->chip_flag) { + mxser_set_must_xon1_value(info->ioaddr, START_CHAR(tty)); + mxser_set_must_xoff1_value(info->ioaddr, STOP_CHAR(tty)); + if (I_IXON(tty)) { + mxser_enable_must_rx_software_flow_control( + info->ioaddr); + } else { + mxser_disable_must_rx_software_flow_control( + info->ioaddr); + } + if (I_IXOFF(tty)) { + mxser_enable_must_tx_software_flow_control( + info->ioaddr); + } else { + mxser_disable_must_tx_software_flow_control( + info->ioaddr); + } + } + + + outb(fcr, info->ioaddr + UART_FCR); /* set fcr */ + outb(cval, info->ioaddr + UART_LCR); + + return ret; +} + +static void mxser_check_modem_status(struct tty_struct *tty, + struct mxser_port *port, int status) +{ + /* update input line counters */ + if (status & UART_MSR_TERI) + port->icount.rng++; + if (status & UART_MSR_DDSR) + port->icount.dsr++; + if (status & UART_MSR_DDCD) + port->icount.dcd++; + if (status & UART_MSR_DCTS) + port->icount.cts++; + port->mon_data.modem_status = status; + wake_up_interruptible(&port->port.delta_msr_wait); + + if ((port->port.flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) { + if (status & UART_MSR_DCD) + wake_up_interruptible(&port->port.open_wait); + } + + if (port->port.flags & ASYNC_CTS_FLOW) { + if (tty->hw_stopped) { + if (status & UART_MSR_CTS) { + tty->hw_stopped = 0; + + if ((port->type != PORT_16550A) && + (!port->board->chip_flag)) { + outb(port->IER & ~UART_IER_THRI, + port->ioaddr + UART_IER); + port->IER |= UART_IER_THRI; + outb(port->IER, port->ioaddr + + UART_IER); + } + tty_wakeup(tty); + } + } else { + if (!(status & UART_MSR_CTS)) { + tty->hw_stopped = 1; + if (port->type != PORT_16550A && + !port->board->chip_flag) { + port->IER &= ~UART_IER_THRI; + outb(port->IER, port->ioaddr + + UART_IER); + } + } + } + } +} + +static int mxser_activate(struct tty_port *port, struct tty_struct *tty) +{ + struct mxser_port *info = container_of(port, struct mxser_port, port); + unsigned long page; + unsigned long flags; + + page = __get_free_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + spin_lock_irqsave(&info->slock, flags); + + if (!info->ioaddr || !info->type) { + set_bit(TTY_IO_ERROR, &tty->flags); + free_page(page); + spin_unlock_irqrestore(&info->slock, flags); + return 0; + } + info->port.xmit_buf = (unsigned char *) page; + + /* + * Clear the FIFO buffers and disable them + * (they will be reenabled in mxser_change_speed()) + */ + if (info->board->chip_flag) + outb((UART_FCR_CLEAR_RCVR | + UART_FCR_CLEAR_XMIT | + MOXA_MUST_FCR_GDA_MODE_ENABLE), info->ioaddr + UART_FCR); + else + outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT), + info->ioaddr + UART_FCR); + + /* + * At this point there's no way the LSR could still be 0xFF; + * if it is, then bail out, because there's likely no UART + * here. + */ + if (inb(info->ioaddr + UART_LSR) == 0xff) { + spin_unlock_irqrestore(&info->slock, flags); + if (capable(CAP_SYS_ADMIN)) { + set_bit(TTY_IO_ERROR, &tty->flags); + return 0; + } else + return -ENODEV; + } + + /* + * Clear the interrupt registers. + */ + (void) inb(info->ioaddr + UART_LSR); + (void) inb(info->ioaddr + UART_RX); + (void) inb(info->ioaddr + UART_IIR); + (void) inb(info->ioaddr + UART_MSR); + + /* + * Now, initialize the UART + */ + outb(UART_LCR_WLEN8, info->ioaddr + UART_LCR); /* reset DLAB */ + info->MCR = UART_MCR_DTR | UART_MCR_RTS; + outb(info->MCR, info->ioaddr + UART_MCR); + + /* + * Finally, enable interrupts + */ + info->IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI; + + if (info->board->chip_flag) + info->IER |= MOXA_MUST_IER_EGDAI; + outb(info->IER, info->ioaddr + UART_IER); /* enable interrupts */ + + /* + * And clear the interrupt registers again for luck. + */ + (void) inb(info->ioaddr + UART_LSR); + (void) inb(info->ioaddr + UART_RX); + (void) inb(info->ioaddr + UART_IIR); + (void) inb(info->ioaddr + UART_MSR); + + clear_bit(TTY_IO_ERROR, &tty->flags); + info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; + + /* + * and set the speed of the serial port + */ + mxser_change_speed(tty, NULL); + spin_unlock_irqrestore(&info->slock, flags); + + return 0; +} + +/* + * This routine will shutdown a serial port + */ +static void mxser_shutdown_port(struct tty_port *port) +{ + struct mxser_port *info = container_of(port, struct mxser_port, port); + unsigned long flags; + + spin_lock_irqsave(&info->slock, flags); + + /* + * clear delta_msr_wait queue to avoid mem leaks: we may free the irq + * here so the queue might never be waken up + */ + wake_up_interruptible(&info->port.delta_msr_wait); + + /* + * Free the xmit buffer, if necessary + */ + if (info->port.xmit_buf) { + free_page((unsigned long) info->port.xmit_buf); + info->port.xmit_buf = NULL; + } + + info->IER = 0; + outb(0x00, info->ioaddr + UART_IER); + + /* clear Rx/Tx FIFO's */ + if (info->board->chip_flag) + outb(UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT | + MOXA_MUST_FCR_GDA_MODE_ENABLE, + info->ioaddr + UART_FCR); + else + outb(UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT, + info->ioaddr + UART_FCR); + + /* read data port to reset things */ + (void) inb(info->ioaddr + UART_RX); + + + if (info->board->chip_flag) + SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(info->ioaddr); + + spin_unlock_irqrestore(&info->slock, flags); +} + +/* + * This routine is called whenever a serial port is opened. It + * enables interrupts for a serial port, linking in its async structure into + * the IRQ chain. It also performs the serial-specific + * initialization for the tty structure. + */ +static int mxser_open(struct tty_struct *tty, struct file *filp) +{ + struct mxser_port *info; + int line; + + line = tty->index; + if (line == MXSER_PORTS) + return 0; + if (line < 0 || line > MXSER_PORTS) + return -ENODEV; + info = &mxser_boards[line / MXSER_PORTS_PER_BOARD].ports[line % MXSER_PORTS_PER_BOARD]; + if (!info->ioaddr) + return -ENODEV; + + tty->driver_data = info; + return tty_port_open(&info->port, tty, filp); +} + +static void mxser_flush_buffer(struct tty_struct *tty) +{ + struct mxser_port *info = tty->driver_data; + char fcr; + unsigned long flags; + + + spin_lock_irqsave(&info->slock, flags); + info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; + + fcr = inb(info->ioaddr + UART_FCR); + outb((fcr | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT), + info->ioaddr + UART_FCR); + outb(fcr, info->ioaddr + UART_FCR); + + spin_unlock_irqrestore(&info->slock, flags); + + tty_wakeup(tty); +} + + +static void mxser_close_port(struct tty_port *port) +{ + struct mxser_port *info = container_of(port, struct mxser_port, port); + unsigned long timeout; + /* + * At this point we stop accepting input. To do this, we + * disable the receive line status interrupts, and tell the + * interrupt driver to stop checking the data ready bit in the + * line status register. + */ + info->IER &= ~UART_IER_RLSI; + if (info->board->chip_flag) + info->IER &= ~MOXA_MUST_RECV_ISR; + + outb(info->IER, info->ioaddr + UART_IER); + /* + * Before we drop DTR, make sure the UART transmitter + * has completely drained; this is especially + * important if there is a transmit FIFO! + */ + timeout = jiffies + HZ; + while (!(inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT)) { + schedule_timeout_interruptible(5); + if (time_after(jiffies, timeout)) + break; + } +} + +/* + * This routine is called when the serial port gets closed. First, we + * wait for the last remaining data to be sent. Then, we unlink its + * async structure from the interrupt chain if necessary, and we free + * that IRQ if nothing is left in the chain. + */ +static void mxser_close(struct tty_struct *tty, struct file *filp) +{ + struct mxser_port *info = tty->driver_data; + struct tty_port *port = &info->port; + + if (tty->index == MXSER_PORTS || info == NULL) + return; + if (tty_port_close_start(port, tty, filp) == 0) + return; + mutex_lock(&port->mutex); + mxser_close_port(port); + mxser_flush_buffer(tty); + mxser_shutdown_port(port); + clear_bit(ASYNCB_INITIALIZED, &port->flags); + mutex_unlock(&port->mutex); + /* Right now the tty_port set is done outside of the close_end helper + as we don't yet have everyone using refcounts */ + tty_port_close_end(port, tty); + tty_port_tty_set(port, NULL); +} + +static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int count) +{ + int c, total = 0; + struct mxser_port *info = tty->driver_data; + unsigned long flags; + + if (!info->port.xmit_buf) + return 0; + + while (1) { + c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1, + SERIAL_XMIT_SIZE - info->xmit_head)); + if (c <= 0) + break; + + memcpy(info->port.xmit_buf + info->xmit_head, buf, c); + spin_lock_irqsave(&info->slock, flags); + info->xmit_head = (info->xmit_head + c) & + (SERIAL_XMIT_SIZE - 1); + info->xmit_cnt += c; + spin_unlock_irqrestore(&info->slock, flags); + + buf += c; + count -= c; + total += c; + } + + if (info->xmit_cnt && !tty->stopped) { + if (!tty->hw_stopped || + (info->type == PORT_16550A) || + (info->board->chip_flag)) { + spin_lock_irqsave(&info->slock, flags); + outb(info->IER & ~UART_IER_THRI, info->ioaddr + + UART_IER); + info->IER |= UART_IER_THRI; + outb(info->IER, info->ioaddr + UART_IER); + spin_unlock_irqrestore(&info->slock, flags); + } + } + return total; +} + +static int mxser_put_char(struct tty_struct *tty, unsigned char ch) +{ + struct mxser_port *info = tty->driver_data; + unsigned long flags; + + if (!info->port.xmit_buf) + return 0; + + if (info->xmit_cnt >= SERIAL_XMIT_SIZE - 1) + return 0; + + spin_lock_irqsave(&info->slock, flags); + info->port.xmit_buf[info->xmit_head++] = ch; + info->xmit_head &= SERIAL_XMIT_SIZE - 1; + info->xmit_cnt++; + spin_unlock_irqrestore(&info->slock, flags); + if (!tty->stopped) { + if (!tty->hw_stopped || + (info->type == PORT_16550A) || + info->board->chip_flag) { + spin_lock_irqsave(&info->slock, flags); + outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER); + info->IER |= UART_IER_THRI; + outb(info->IER, info->ioaddr + UART_IER); + spin_unlock_irqrestore(&info->slock, flags); + } + } + return 1; +} + + +static void mxser_flush_chars(struct tty_struct *tty) +{ + struct mxser_port *info = tty->driver_data; + unsigned long flags; + + if (info->xmit_cnt <= 0 || tty->stopped || !info->port.xmit_buf || + (tty->hw_stopped && info->type != PORT_16550A && + !info->board->chip_flag)) + return; + + spin_lock_irqsave(&info->slock, flags); + + outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER); + info->IER |= UART_IER_THRI; + outb(info->IER, info->ioaddr + UART_IER); + + spin_unlock_irqrestore(&info->slock, flags); +} + +static int mxser_write_room(struct tty_struct *tty) +{ + struct mxser_port *info = tty->driver_data; + int ret; + + ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1; + return ret < 0 ? 0 : ret; +} + +static int mxser_chars_in_buffer(struct tty_struct *tty) +{ + struct mxser_port *info = tty->driver_data; + return info->xmit_cnt; +} + +/* + * ------------------------------------------------------------ + * friends of mxser_ioctl() + * ------------------------------------------------------------ + */ +static int mxser_get_serial_info(struct tty_struct *tty, + struct serial_struct __user *retinfo) +{ + struct mxser_port *info = tty->driver_data; + struct serial_struct tmp = { + .type = info->type, + .line = tty->index, + .port = info->ioaddr, + .irq = info->board->irq, + .flags = info->port.flags, + .baud_base = info->baud_base, + .close_delay = info->port.close_delay, + .closing_wait = info->port.closing_wait, + .custom_divisor = info->custom_divisor, + .hub6 = 0 + }; + if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) + return -EFAULT; + return 0; +} + +static int mxser_set_serial_info(struct tty_struct *tty, + struct serial_struct __user *new_info) +{ + struct mxser_port *info = tty->driver_data; + struct tty_port *port = &info->port; + struct serial_struct new_serial; + speed_t baud; + unsigned long sl_flags; + unsigned int flags; + int retval = 0; + + if (!new_info || !info->ioaddr) + return -ENODEV; + if (copy_from_user(&new_serial, new_info, sizeof(new_serial))) + return -EFAULT; + + if (new_serial.irq != info->board->irq || + new_serial.port != info->ioaddr) + return -EINVAL; + + flags = port->flags & ASYNC_SPD_MASK; + + if (!capable(CAP_SYS_ADMIN)) { + if ((new_serial.baud_base != info->baud_base) || + (new_serial.close_delay != info->port.close_delay) || + ((new_serial.flags & ~ASYNC_USR_MASK) != (info->port.flags & ~ASYNC_USR_MASK))) + return -EPERM; + info->port.flags = ((info->port.flags & ~ASYNC_USR_MASK) | + (new_serial.flags & ASYNC_USR_MASK)); + } else { + /* + * OK, past this point, all the error checking has been done. + * At this point, we start making changes..... + */ + port->flags = ((port->flags & ~ASYNC_FLAGS) | + (new_serial.flags & ASYNC_FLAGS)); + port->close_delay = new_serial.close_delay * HZ / 100; + port->closing_wait = new_serial.closing_wait * HZ / 100; + tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0; + if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST && + (new_serial.baud_base != info->baud_base || + new_serial.custom_divisor != + info->custom_divisor)) { + if (new_serial.custom_divisor == 0) + return -EINVAL; + baud = new_serial.baud_base / new_serial.custom_divisor; + tty_encode_baud_rate(tty, baud, baud); + } + } + + info->type = new_serial.type; + + process_txrx_fifo(info); + + if (test_bit(ASYNCB_INITIALIZED, &port->flags)) { + if (flags != (port->flags & ASYNC_SPD_MASK)) { + spin_lock_irqsave(&info->slock, sl_flags); + mxser_change_speed(tty, NULL); + spin_unlock_irqrestore(&info->slock, sl_flags); + } + } else { + retval = mxser_activate(port, tty); + if (retval == 0) + set_bit(ASYNCB_INITIALIZED, &port->flags); + } + return retval; +} + +/* + * mxser_get_lsr_info - get line status register info + * + * Purpose: Let user call ioctl() to get info when the UART physically + * is emptied. On bus types like RS485, the transmitter must + * release the bus after transmitting. This must be done when + * the transmit shift register is empty, not be done when the + * transmit holding register is empty. This functionality + * allows an RS485 driver to be written in user space. + */ +static int mxser_get_lsr_info(struct mxser_port *info, + unsigned int __user *value) +{ + unsigned char status; + unsigned int result; + unsigned long flags; + + spin_lock_irqsave(&info->slock, flags); + status = inb(info->ioaddr + UART_LSR); + spin_unlock_irqrestore(&info->slock, flags); + result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0); + return put_user(result, value); +} + +static int mxser_tiocmget(struct tty_struct *tty) +{ + struct mxser_port *info = tty->driver_data; + unsigned char control, status; + unsigned long flags; + + + if (tty->index == MXSER_PORTS) + return -ENOIOCTLCMD; + if (test_bit(TTY_IO_ERROR, &tty->flags)) + return -EIO; + + control = info->MCR; + + spin_lock_irqsave(&info->slock, flags); + status = inb(info->ioaddr + UART_MSR); + if (status & UART_MSR_ANY_DELTA) + mxser_check_modem_status(tty, info, status); + spin_unlock_irqrestore(&info->slock, flags); + return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) | + ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) | + ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) | + ((status & UART_MSR_RI) ? TIOCM_RNG : 0) | + ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) | + ((status & UART_MSR_CTS) ? TIOCM_CTS : 0); +} + +static int mxser_tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear) +{ + struct mxser_port *info = tty->driver_data; + unsigned long flags; + + + if (tty->index == MXSER_PORTS) + return -ENOIOCTLCMD; + if (test_bit(TTY_IO_ERROR, &tty->flags)) + return -EIO; + + spin_lock_irqsave(&info->slock, flags); + + if (set & TIOCM_RTS) + info->MCR |= UART_MCR_RTS; + if (set & TIOCM_DTR) + info->MCR |= UART_MCR_DTR; + + if (clear & TIOCM_RTS) + info->MCR &= ~UART_MCR_RTS; + if (clear & TIOCM_DTR) + info->MCR &= ~UART_MCR_DTR; + + outb(info->MCR, info->ioaddr + UART_MCR); + spin_unlock_irqrestore(&info->slock, flags); + return 0; +} + +static int __init mxser_program_mode(int port) +{ + int id, i, j, n; + + outb(0, port); + outb(0, port); + outb(0, port); + (void)inb(port); + (void)inb(port); + outb(0, port); + (void)inb(port); + + id = inb(port + 1) & 0x1F; + if ((id != C168_ASIC_ID) && + (id != C104_ASIC_ID) && + (id != C102_ASIC_ID) && + (id != CI132_ASIC_ID) && + (id != CI134_ASIC_ID) && + (id != CI104J_ASIC_ID)) + return -1; + for (i = 0, j = 0; i < 4; i++) { + n = inb(port + 2); + if (n == 'M') { + j = 1; + } else if ((j == 1) && (n == 1)) { + j = 2; + break; + } else + j = 0; + } + if (j != 2) + id = -2; + return id; +} + +static void __init mxser_normal_mode(int port) +{ + int i, n; + + outb(0xA5, port + 1); + outb(0x80, port + 3); + outb(12, port + 0); /* 9600 bps */ + outb(0, port + 1); + outb(0x03, port + 3); /* 8 data bits */ + outb(0x13, port + 4); /* loop back mode */ + for (i = 0; i < 16; i++) { + n = inb(port + 5); + if ((n & 0x61) == 0x60) + break; + if ((n & 1) == 1) + (void)inb(port); + } + outb(0x00, port + 4); +} + +#define CHIP_SK 0x01 /* Serial Data Clock in Eprom */ +#define CHIP_DO 0x02 /* Serial Data Output in Eprom */ +#define CHIP_CS 0x04 /* Serial Chip Select in Eprom */ +#define CHIP_DI 0x08 /* Serial Data Input in Eprom */ +#define EN_CCMD 0x000 /* Chip's command register */ +#define EN0_RSARLO 0x008 /* Remote start address reg 0 */ +#define EN0_RSARHI 0x009 /* Remote start address reg 1 */ +#define EN0_RCNTLO 0x00A /* Remote byte count reg WR */ +#define EN0_RCNTHI 0x00B /* Remote byte count reg WR */ +#define EN0_DCFG 0x00E /* Data configuration reg WR */ +#define EN0_PORT 0x010 /* Rcv missed frame error counter RD */ +#define ENC_PAGE0 0x000 /* Select page 0 of chip registers */ +#define ENC_PAGE3 0x0C0 /* Select page 3 of chip registers */ +static int __init mxser_read_register(int port, unsigned short *regs) +{ + int i, k, value, id; + unsigned int j; + + id = mxser_program_mode(port); + if (id < 0) + return id; + for (i = 0; i < 14; i++) { + k = (i & 0x3F) | 0x180; + for (j = 0x100; j > 0; j >>= 1) { + outb(CHIP_CS, port); + if (k & j) { + outb(CHIP_CS | CHIP_DO, port); + outb(CHIP_CS | CHIP_DO | CHIP_SK, port); /* A? bit of read */ + } else { + outb(CHIP_CS, port); + outb(CHIP_CS | CHIP_SK, port); /* A? bit of read */ + } + } + (void)inb(port); + value = 0; + for (k = 0, j = 0x8000; k < 16; k++, j >>= 1) { + outb(CHIP_CS, port); + outb(CHIP_CS | CHIP_SK, port); + if (inb(port) & CHIP_DI) + value |= j; + } + regs[i] = value; + outb(0, port); + } + mxser_normal_mode(port); + return id; +} + +static int mxser_ioctl_special(unsigned int cmd, void __user *argp) +{ + struct mxser_port *ip; + struct tty_port *port; + struct tty_struct *tty; + int result, status; + unsigned int i, j; + int ret = 0; + + switch (cmd) { + case MOXA_GET_MAJOR: + if (printk_ratelimit()) + printk(KERN_WARNING "mxser: '%s' uses deprecated ioctl " + "%x (GET_MAJOR), fix your userspace\n", + current->comm, cmd); + return put_user(ttymajor, (int __user *)argp); + + case MOXA_CHKPORTENABLE: + result = 0; + for (i = 0; i < MXSER_BOARDS; i++) + for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) + if (mxser_boards[i].ports[j].ioaddr) + result |= (1 << i); + return put_user(result, (unsigned long __user *)argp); + case MOXA_GETDATACOUNT: + /* The receive side is locked by port->slock but it isn't + clear that an exact snapshot is worth copying here */ + if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log))) + ret = -EFAULT; + return ret; + case MOXA_GETMSTATUS: { + struct mxser_mstatus ms, __user *msu = argp; + for (i = 0; i < MXSER_BOARDS; i++) + for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) { + ip = &mxser_boards[i].ports[j]; + port = &ip->port; + memset(&ms, 0, sizeof(ms)); + + mutex_lock(&port->mutex); + if (!ip->ioaddr) + goto copy; + + tty = tty_port_tty_get(port); + + if (!tty || !tty->termios) + ms.cflag = ip->normal_termios.c_cflag; + else + ms.cflag = tty->termios->c_cflag; + tty_kref_put(tty); + spin_lock_irq(&ip->slock); + status = inb(ip->ioaddr + UART_MSR); + spin_unlock_irq(&ip->slock); + if (status & UART_MSR_DCD) + ms.dcd = 1; + if (status & UART_MSR_DSR) + ms.dsr = 1; + if (status & UART_MSR_CTS) + ms.cts = 1; + copy: + mutex_unlock(&port->mutex); + if (copy_to_user(msu, &ms, sizeof(ms))) + return -EFAULT; + msu++; + } + return 0; + } + case MOXA_ASPP_MON_EXT: { + struct mxser_mon_ext *me; /* it's 2k, stack unfriendly */ + unsigned int cflag, iflag, p; + u8 opmode; + + me = kzalloc(sizeof(*me), GFP_KERNEL); + if (!me) + return -ENOMEM; + + for (i = 0, p = 0; i < MXSER_BOARDS; i++) { + for (j = 0; j < MXSER_PORTS_PER_BOARD; j++, p++) { + if (p >= ARRAY_SIZE(me->rx_cnt)) { + i = MXSER_BOARDS; + break; + } + ip = &mxser_boards[i].ports[j]; + port = &ip->port; + + mutex_lock(&port->mutex); + if (!ip->ioaddr) { + mutex_unlock(&port->mutex); + continue; + } + + spin_lock_irq(&ip->slock); + status = mxser_get_msr(ip->ioaddr, 0, p); + + if (status & UART_MSR_TERI) + ip->icount.rng++; + if (status & UART_MSR_DDSR) + ip->icount.dsr++; + if (status & UART_MSR_DDCD) + ip->icount.dcd++; + if (status & UART_MSR_DCTS) + ip->icount.cts++; + + ip->mon_data.modem_status = status; + me->rx_cnt[p] = ip->mon_data.rxcnt; + me->tx_cnt[p] = ip->mon_data.txcnt; + me->up_rxcnt[p] = ip->mon_data.up_rxcnt; + me->up_txcnt[p] = ip->mon_data.up_txcnt; + me->modem_status[p] = + ip->mon_data.modem_status; + spin_unlock_irq(&ip->slock); + + tty = tty_port_tty_get(&ip->port); + + if (!tty || !tty->termios) { + cflag = ip->normal_termios.c_cflag; + iflag = ip->normal_termios.c_iflag; + me->baudrate[p] = tty_termios_baud_rate(&ip->normal_termios); + } else { + cflag = tty->termios->c_cflag; + iflag = tty->termios->c_iflag; + me->baudrate[p] = tty_get_baud_rate(tty); + } + tty_kref_put(tty); + + me->databits[p] = cflag & CSIZE; + me->stopbits[p] = cflag & CSTOPB; + me->parity[p] = cflag & (PARENB | PARODD | + CMSPAR); + + if (cflag & CRTSCTS) + me->flowctrl[p] |= 0x03; + + if (iflag & (IXON | IXOFF)) + me->flowctrl[p] |= 0x0C; + + if (ip->type == PORT_16550A) + me->fifo[p] = 1; + + opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2); + opmode &= OP_MODE_MASK; + me->iftype[p] = opmode; + mutex_unlock(&port->mutex); + } + } + if (copy_to_user(argp, me, sizeof(*me))) + ret = -EFAULT; + kfree(me); + return ret; + } + default: + return -ENOIOCTLCMD; + } + return 0; +} + +static int mxser_cflags_changed(struct mxser_port *info, unsigned long arg, + struct async_icount *cprev) +{ + struct async_icount cnow; + unsigned long flags; + int ret; + + spin_lock_irqsave(&info->slock, flags); + cnow = info->icount; /* atomic copy */ + spin_unlock_irqrestore(&info->slock, flags); + + ret = ((arg & TIOCM_RNG) && (cnow.rng != cprev->rng)) || + ((arg & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) || + ((arg & TIOCM_CD) && (cnow.dcd != cprev->dcd)) || + ((arg & TIOCM_CTS) && (cnow.cts != cprev->cts)); + + *cprev = cnow; + + return ret; +} + +static int mxser_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + struct mxser_port *info = tty->driver_data; + struct tty_port *port = &info->port; + struct async_icount cnow; + unsigned long flags; + void __user *argp = (void __user *)arg; + int retval; + + if (tty->index == MXSER_PORTS) + return mxser_ioctl_special(cmd, argp); + + if (cmd == MOXA_SET_OP_MODE || cmd == MOXA_GET_OP_MODE) { + int p; + unsigned long opmode; + static unsigned char ModeMask[] = { 0xfc, 0xf3, 0xcf, 0x3f }; + int shiftbit; + unsigned char val, mask; + + p = tty->index % 4; + if (cmd == MOXA_SET_OP_MODE) { + if (get_user(opmode, (int __user *) argp)) + return -EFAULT; + if (opmode != RS232_MODE && + opmode != RS485_2WIRE_MODE && + opmode != RS422_MODE && + opmode != RS485_4WIRE_MODE) + return -EFAULT; + mask = ModeMask[p]; + shiftbit = p * 2; + spin_lock_irq(&info->slock); + val = inb(info->opmode_ioaddr); + val &= mask; + val |= (opmode << shiftbit); + outb(val, info->opmode_ioaddr); + spin_unlock_irq(&info->slock); + } else { + shiftbit = p * 2; + spin_lock_irq(&info->slock); + opmode = inb(info->opmode_ioaddr) >> shiftbit; + spin_unlock_irq(&info->slock); + opmode &= OP_MODE_MASK; + if (put_user(opmode, (int __user *)argp)) + return -EFAULT; + } + return 0; + } + + if (cmd != TIOCGSERIAL && cmd != TIOCMIWAIT && + test_bit(TTY_IO_ERROR, &tty->flags)) + return -EIO; + + switch (cmd) { + case TIOCGSERIAL: + mutex_lock(&port->mutex); + retval = mxser_get_serial_info(tty, argp); + mutex_unlock(&port->mutex); + return retval; + case TIOCSSERIAL: + mutex_lock(&port->mutex); + retval = mxser_set_serial_info(tty, argp); + mutex_unlock(&port->mutex); + return retval; + case TIOCSERGETLSR: /* Get line status register */ + return mxser_get_lsr_info(info, argp); + /* + * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change + * - mask passed in arg for lines of interest + * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) + * Caller should use TIOCGICOUNT to see which one it was + */ + case TIOCMIWAIT: + spin_lock_irqsave(&info->slock, flags); + cnow = info->icount; /* note the counters on entry */ + spin_unlock_irqrestore(&info->slock, flags); + + return wait_event_interruptible(info->port.delta_msr_wait, + mxser_cflags_changed(info, arg, &cnow)); + case MOXA_HighSpeedOn: + return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp); + case MOXA_SDS_RSTICOUNTER: + spin_lock_irq(&info->slock); + info->mon_data.rxcnt = 0; + info->mon_data.txcnt = 0; + spin_unlock_irq(&info->slock); + return 0; + + case MOXA_ASPP_OQUEUE:{ + int len, lsr; + + len = mxser_chars_in_buffer(tty); + spin_lock_irq(&info->slock); + lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_THRE; + spin_unlock_irq(&info->slock); + len += (lsr ? 0 : 1); + + return put_user(len, (int __user *)argp); + } + case MOXA_ASPP_MON: { + int mcr, status; + + spin_lock_irq(&info->slock); + status = mxser_get_msr(info->ioaddr, 1, tty->index); + mxser_check_modem_status(tty, info, status); + + mcr = inb(info->ioaddr + UART_MCR); + spin_unlock_irq(&info->slock); + + if (mcr & MOXA_MUST_MCR_XON_FLAG) + info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFHOLD; + else + info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFHOLD; + + if (mcr & MOXA_MUST_MCR_TX_XON) + info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFXENT; + else + info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFXENT; + + if (tty->hw_stopped) + info->mon_data.hold_reason |= NPPI_NOTIFY_CTSHOLD; + else + info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD; + + if (copy_to_user(argp, &info->mon_data, + sizeof(struct mxser_mon))) + return -EFAULT; + + return 0; + } + case MOXA_ASPP_LSTATUS: { + if (put_user(info->err_shadow, (unsigned char __user *)argp)) + return -EFAULT; + + info->err_shadow = 0; + return 0; + } + case MOXA_SET_BAUD_METHOD: { + int method; + + if (get_user(method, (int __user *)argp)) + return -EFAULT; + mxser_set_baud_method[tty->index] = method; + return put_user(method, (int __user *)argp); + } + default: + return -ENOIOCTLCMD; + } + return 0; +} + + /* + * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) + * Return: write counters to the user passed counter struct + * NB: both 1->0 and 0->1 transitions are counted except for + * RI where only 0->1 is counted. + */ + +static int mxser_get_icount(struct tty_struct *tty, + struct serial_icounter_struct *icount) + +{ + struct mxser_port *info = tty->driver_data; + struct async_icount cnow; + unsigned long flags; + + spin_lock_irqsave(&info->slock, flags); + cnow = info->icount; + spin_unlock_irqrestore(&info->slock, flags); + + icount->frame = cnow.frame; + icount->brk = cnow.brk; + icount->overrun = cnow.overrun; + icount->buf_overrun = cnow.buf_overrun; + icount->parity = cnow.parity; + icount->rx = cnow.rx; + icount->tx = cnow.tx; + icount->cts = cnow.cts; + icount->dsr = cnow.dsr; + icount->rng = cnow.rng; + icount->dcd = cnow.dcd; + return 0; +} + +static void mxser_stoprx(struct tty_struct *tty) +{ + struct mxser_port *info = tty->driver_data; + + info->ldisc_stop_rx = 1; + if (I_IXOFF(tty)) { + if (info->board->chip_flag) { + info->IER &= ~MOXA_MUST_RECV_ISR; + outb(info->IER, info->ioaddr + UART_IER); + } else { + info->x_char = STOP_CHAR(tty); + outb(0, info->ioaddr + UART_IER); + info->IER |= UART_IER_THRI; + outb(info->IER, info->ioaddr + UART_IER); + } + } + + if (tty->termios->c_cflag & CRTSCTS) { + info->MCR &= ~UART_MCR_RTS; + outb(info->MCR, info->ioaddr + UART_MCR); + } +} + +/* + * This routine is called by the upper-layer tty layer to signal that + * incoming characters should be throttled. + */ +static void mxser_throttle(struct tty_struct *tty) +{ + mxser_stoprx(tty); +} + +static void mxser_unthrottle(struct tty_struct *tty) +{ + struct mxser_port *info = tty->driver_data; + + /* startrx */ + info->ldisc_stop_rx = 0; + if (I_IXOFF(tty)) { + if (info->x_char) + info->x_char = 0; + else { + if (info->board->chip_flag) { + info->IER |= MOXA_MUST_RECV_ISR; + outb(info->IER, info->ioaddr + UART_IER); + } else { + info->x_char = START_CHAR(tty); + outb(0, info->ioaddr + UART_IER); + info->IER |= UART_IER_THRI; + outb(info->IER, info->ioaddr + UART_IER); + } + } + } + + if (tty->termios->c_cflag & CRTSCTS) { + info->MCR |= UART_MCR_RTS; + outb(info->MCR, info->ioaddr + UART_MCR); + } +} + +/* + * mxser_stop() and mxser_start() + * + * This routines are called before setting or resetting tty->stopped. + * They enable or disable transmitter interrupts, as necessary. + */ +static void mxser_stop(struct tty_struct *tty) +{ + struct mxser_port *info = tty->driver_data; + unsigned long flags; + + spin_lock_irqsave(&info->slock, flags); + if (info->IER & UART_IER_THRI) { + info->IER &= ~UART_IER_THRI; + outb(info->IER, info->ioaddr + UART_IER); + } + spin_unlock_irqrestore(&info->slock, flags); +} + +static void mxser_start(struct tty_struct *tty) +{ + struct mxser_port *info = tty->driver_data; + unsigned long flags; + + spin_lock_irqsave(&info->slock, flags); + if (info->xmit_cnt && info->port.xmit_buf) { + outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER); + info->IER |= UART_IER_THRI; + outb(info->IER, info->ioaddr + UART_IER); + } + spin_unlock_irqrestore(&info->slock, flags); +} + +static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termios) +{ + struct mxser_port *info = tty->driver_data; + unsigned long flags; + + spin_lock_irqsave(&info->slock, flags); + mxser_change_speed(tty, old_termios); + spin_unlock_irqrestore(&info->slock, flags); + + if ((old_termios->c_cflag & CRTSCTS) && + !(tty->termios->c_cflag & CRTSCTS)) { + tty->hw_stopped = 0; + mxser_start(tty); + } + + /* Handle sw stopped */ + if ((old_termios->c_iflag & IXON) && + !(tty->termios->c_iflag & IXON)) { + tty->stopped = 0; + + if (info->board->chip_flag) { + spin_lock_irqsave(&info->slock, flags); + mxser_disable_must_rx_software_flow_control( + info->ioaddr); + spin_unlock_irqrestore(&info->slock, flags); + } + + mxser_start(tty); + } +} + +/* + * mxser_wait_until_sent() --- wait until the transmitter is empty + */ +static void mxser_wait_until_sent(struct tty_struct *tty, int timeout) +{ + struct mxser_port *info = tty->driver_data; + unsigned long orig_jiffies, char_time; + unsigned long flags; + int lsr; + + if (info->type == PORT_UNKNOWN) + return; + + if (info->xmit_fifo_size == 0) + return; /* Just in case.... */ + + orig_jiffies = jiffies; + /* + * Set the check interval to be 1/5 of the estimated time to + * send a single character, and make it at least 1. The check + * interval should also be less than the timeout. + * + * Note: we have to use pretty tight timings here to satisfy + * the NIST-PCTS. + */ + char_time = (info->timeout - HZ / 50) / info->xmit_fifo_size; + char_time = char_time / 5; + if (char_time == 0) + char_time = 1; + if (timeout && timeout < char_time) + char_time = timeout; + /* + * If the transmitter hasn't cleared in twice the approximate + * amount of time to send the entire FIFO, it probably won't + * ever clear. This assumes the UART isn't doing flow + * control, which is currently the case. Hence, if it ever + * takes longer than info->timeout, this is probably due to a + * UART bug of some kind. So, we clamp the timeout parameter at + * 2*info->timeout. + */ + if (!timeout || timeout > 2 * info->timeout) + timeout = 2 * info->timeout; +#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT + printk(KERN_DEBUG "In rs_wait_until_sent(%d) check=%lu...", + timeout, char_time); + printk("jiff=%lu...", jiffies); +#endif + spin_lock_irqsave(&info->slock, flags); + while (!((lsr = inb(info->ioaddr + UART_LSR)) & UART_LSR_TEMT)) { +#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT + printk("lsr = %d (jiff=%lu)...", lsr, jiffies); +#endif + spin_unlock_irqrestore(&info->slock, flags); + schedule_timeout_interruptible(char_time); + spin_lock_irqsave(&info->slock, flags); + if (signal_pending(current)) + break; + if (timeout && time_after(jiffies, orig_jiffies + timeout)) + break; + } + spin_unlock_irqrestore(&info->slock, flags); + set_current_state(TASK_RUNNING); + +#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT + printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies); +#endif +} + +/* + * This routine is called by tty_hangup() when a hangup is signaled. + */ +static void mxser_hangup(struct tty_struct *tty) +{ + struct mxser_port *info = tty->driver_data; + + mxser_flush_buffer(tty); + tty_port_hangup(&info->port); +} + +/* + * mxser_rs_break() --- routine which turns the break handling on or off + */ +static int mxser_rs_break(struct tty_struct *tty, int break_state) +{ + struct mxser_port *info = tty->driver_data; + unsigned long flags; + + spin_lock_irqsave(&info->slock, flags); + if (break_state == -1) + outb(inb(info->ioaddr + UART_LCR) | UART_LCR_SBC, + info->ioaddr + UART_LCR); + else + outb(inb(info->ioaddr + UART_LCR) & ~UART_LCR_SBC, + info->ioaddr + UART_LCR); + spin_unlock_irqrestore(&info->slock, flags); + return 0; +} + +static void mxser_receive_chars(struct tty_struct *tty, + struct mxser_port *port, int *status) +{ + unsigned char ch, gdl; + int ignored = 0; + int cnt = 0; + int recv_room; + int max = 256; + + recv_room = tty->receive_room; + if (recv_room == 0 && !port->ldisc_stop_rx) + mxser_stoprx(tty); + if (port->board->chip_flag != MOXA_OTHER_UART) { + + if (*status & UART_LSR_SPECIAL) + goto intr_old; + if (port->board->chip_flag == MOXA_MUST_MU860_HWID && + (*status & MOXA_MUST_LSR_RERR)) + goto intr_old; + if (*status & MOXA_MUST_LSR_RERR) + goto intr_old; + + gdl = inb(port->ioaddr + MOXA_MUST_GDL_REGISTER); + + if (port->board->chip_flag == MOXA_MUST_MU150_HWID) + gdl &= MOXA_MUST_GDL_MASK; + if (gdl >= recv_room) { + if (!port->ldisc_stop_rx) + mxser_stoprx(tty); + } + while (gdl--) { + ch = inb(port->ioaddr + UART_RX); + tty_insert_flip_char(tty, ch, 0); + cnt++; + } + goto end_intr; + } +intr_old: + + do { + if (max-- < 0) + break; + + ch = inb(port->ioaddr + UART_RX); + if (port->board->chip_flag && (*status & UART_LSR_OE)) + outb(0x23, port->ioaddr + UART_FCR); + *status &= port->read_status_mask; + if (*status & port->ignore_status_mask) { + if (++ignored > 100) + break; + } else { + char flag = 0; + if (*status & UART_LSR_SPECIAL) { + if (*status & UART_LSR_BI) { + flag = TTY_BREAK; + port->icount.brk++; + + if (port->port.flags & ASYNC_SAK) + do_SAK(tty); + } else if (*status & UART_LSR_PE) { + flag = TTY_PARITY; + port->icount.parity++; + } else if (*status & UART_LSR_FE) { + flag = TTY_FRAME; + port->icount.frame++; + } else if (*status & UART_LSR_OE) { + flag = TTY_OVERRUN; + port->icount.overrun++; + } else + flag = TTY_BREAK; + } + tty_insert_flip_char(tty, ch, flag); + cnt++; + if (cnt >= recv_room) { + if (!port->ldisc_stop_rx) + mxser_stoprx(tty); + break; + } + + } + + if (port->board->chip_flag) + break; + + *status = inb(port->ioaddr + UART_LSR); + } while (*status & UART_LSR_DR); + +end_intr: + mxvar_log.rxcnt[tty->index] += cnt; + port->mon_data.rxcnt += cnt; + port->mon_data.up_rxcnt += cnt; + + /* + * We are called from an interrupt context with &port->slock + * being held. Drop it temporarily in order to prevent + * recursive locking. + */ + spin_unlock(&port->slock); + tty_flip_buffer_push(tty); + spin_lock(&port->slock); +} + +static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port) +{ + int count, cnt; + + if (port->x_char) { + outb(port->x_char, port->ioaddr + UART_TX); + port->x_char = 0; + mxvar_log.txcnt[tty->index]++; + port->mon_data.txcnt++; + port->mon_data.up_txcnt++; + port->icount.tx++; + return; + } + + if (port->port.xmit_buf == NULL) + return; + + if (port->xmit_cnt <= 0 || tty->stopped || + (tty->hw_stopped && + (port->type != PORT_16550A) && + (!port->board->chip_flag))) { + port->IER &= ~UART_IER_THRI; + outb(port->IER, port->ioaddr + UART_IER); + return; + } + + cnt = port->xmit_cnt; + count = port->xmit_fifo_size; + do { + outb(port->port.xmit_buf[port->xmit_tail++], + port->ioaddr + UART_TX); + port->xmit_tail = port->xmit_tail & (SERIAL_XMIT_SIZE - 1); + if (--port->xmit_cnt <= 0) + break; + } while (--count > 0); + mxvar_log.txcnt[tty->index] += (cnt - port->xmit_cnt); + + port->mon_data.txcnt += (cnt - port->xmit_cnt); + port->mon_data.up_txcnt += (cnt - port->xmit_cnt); + port->icount.tx += (cnt - port->xmit_cnt); + + if (port->xmit_cnt < WAKEUP_CHARS) + tty_wakeup(tty); + + if (port->xmit_cnt <= 0) { + port->IER &= ~UART_IER_THRI; + outb(port->IER, port->ioaddr + UART_IER); + } +} + +/* + * This is the serial driver's generic interrupt routine + */ +static irqreturn_t mxser_interrupt(int irq, void *dev_id) +{ + int status, iir, i; + struct mxser_board *brd = NULL; + struct mxser_port *port; + int max, irqbits, bits, msr; + unsigned int int_cnt, pass_counter = 0; + int handled = IRQ_NONE; + struct tty_struct *tty; + + for (i = 0; i < MXSER_BOARDS; i++) + if (dev_id == &mxser_boards[i]) { + brd = dev_id; + break; + } + + if (i == MXSER_BOARDS) + goto irq_stop; + if (brd == NULL) + goto irq_stop; + max = brd->info->nports; + while (pass_counter++ < MXSER_ISR_PASS_LIMIT) { + irqbits = inb(brd->vector) & brd->vector_mask; + if (irqbits == brd->vector_mask) + break; + + handled = IRQ_HANDLED; + for (i = 0, bits = 1; i < max; i++, irqbits |= bits, bits <<= 1) { + if (irqbits == brd->vector_mask) + break; + if (bits & irqbits) + continue; + port = &brd->ports[i]; + + int_cnt = 0; + spin_lock(&port->slock); + do { + iir = inb(port->ioaddr + UART_IIR); + if (iir & UART_IIR_NO_INT) + break; + iir &= MOXA_MUST_IIR_MASK; + tty = tty_port_tty_get(&port->port); + if (!tty || + (port->port.flags & ASYNC_CLOSING) || + !(port->port.flags & + ASYNC_INITIALIZED)) { + status = inb(port->ioaddr + UART_LSR); + outb(0x27, port->ioaddr + UART_FCR); + inb(port->ioaddr + UART_MSR); + tty_kref_put(tty); + break; + } + + status = inb(port->ioaddr + UART_LSR); + + if (status & UART_LSR_PE) + port->err_shadow |= NPPI_NOTIFY_PARITY; + if (status & UART_LSR_FE) + port->err_shadow |= NPPI_NOTIFY_FRAMING; + if (status & UART_LSR_OE) + port->err_shadow |= + NPPI_NOTIFY_HW_OVERRUN; + if (status & UART_LSR_BI) + port->err_shadow |= NPPI_NOTIFY_BREAK; + + if (port->board->chip_flag) { + if (iir == MOXA_MUST_IIR_GDA || + iir == MOXA_MUST_IIR_RDA || + iir == MOXA_MUST_IIR_RTO || + iir == MOXA_MUST_IIR_LSR) + mxser_receive_chars(tty, port, + &status); + + } else { + status &= port->read_status_mask; + if (status & UART_LSR_DR) + mxser_receive_chars(tty, port, + &status); + } + msr = inb(port->ioaddr + UART_MSR); + if (msr & UART_MSR_ANY_DELTA) + mxser_check_modem_status(tty, port, msr); + + if (port->board->chip_flag) { + if (iir == 0x02 && (status & + UART_LSR_THRE)) + mxser_transmit_chars(tty, port); + } else { + if (status & UART_LSR_THRE) + mxser_transmit_chars(tty, port); + } + tty_kref_put(tty); + } while (int_cnt++ < MXSER_ISR_PASS_LIMIT); + spin_unlock(&port->slock); + } + } + +irq_stop: + return handled; +} + +static const struct tty_operations mxser_ops = { + .open = mxser_open, + .close = mxser_close, + .write = mxser_write, + .put_char = mxser_put_char, + .flush_chars = mxser_flush_chars, + .write_room = mxser_write_room, + .chars_in_buffer = mxser_chars_in_buffer, + .flush_buffer = mxser_flush_buffer, + .ioctl = mxser_ioctl, + .throttle = mxser_throttle, + .unthrottle = mxser_unthrottle, + .set_termios = mxser_set_termios, + .stop = mxser_stop, + .start = mxser_start, + .hangup = mxser_hangup, + .break_ctl = mxser_rs_break, + .wait_until_sent = mxser_wait_until_sent, + .tiocmget = mxser_tiocmget, + .tiocmset = mxser_tiocmset, + .get_icount = mxser_get_icount, +}; + +struct tty_port_operations mxser_port_ops = { + .carrier_raised = mxser_carrier_raised, + .dtr_rts = mxser_dtr_rts, + .activate = mxser_activate, + .shutdown = mxser_shutdown_port, +}; + +/* + * The MOXA Smartio/Industio serial driver boot-time initialization code! + */ + +static void mxser_release_ISA_res(struct mxser_board *brd) +{ + free_irq(brd->irq, brd); + release_region(brd->ports[0].ioaddr, 8 * brd->info->nports); + release_region(brd->vector, 1); +} + +static int __devinit mxser_initbrd(struct mxser_board *brd, + struct pci_dev *pdev) +{ + struct mxser_port *info; + unsigned int i; + int retval; + + printk(KERN_INFO "mxser: max. baud rate = %d bps\n", + brd->ports[0].max_baud); + + for (i = 0; i < brd->info->nports; i++) { + info = &brd->ports[i]; + tty_port_init(&info->port); + info->port.ops = &mxser_port_ops; + info->board = brd; + info->stop_rx = 0; + info->ldisc_stop_rx = 0; + + /* Enhance mode enabled here */ + if (brd->chip_flag != MOXA_OTHER_UART) + mxser_enable_must_enchance_mode(info->ioaddr); + + info->port.flags = ASYNC_SHARE_IRQ; + info->type = brd->uart_type; + + process_txrx_fifo(info); + + info->custom_divisor = info->baud_base * 16; + info->port.close_delay = 5 * HZ / 10; + info->port.closing_wait = 30 * HZ; + info->normal_termios = mxvar_sdriver->init_termios; + memset(&info->mon_data, 0, sizeof(struct mxser_mon)); + info->err_shadow = 0; + spin_lock_init(&info->slock); + + /* before set INT ISR, disable all int */ + outb(inb(info->ioaddr + UART_IER) & 0xf0, + info->ioaddr + UART_IER); + } + + retval = request_irq(brd->irq, mxser_interrupt, IRQF_SHARED, "mxser", + brd); + if (retval) + printk(KERN_ERR "Board %s: Request irq failed, IRQ (%d) may " + "conflict with another device.\n", + brd->info->name, brd->irq); + + return retval; +} + +static int __init mxser_get_ISA_conf(int cap, struct mxser_board *brd) +{ + int id, i, bits; + unsigned short regs[16], irq; + unsigned char scratch, scratch2; + + brd->chip_flag = MOXA_OTHER_UART; + + id = mxser_read_register(cap, regs); + switch (id) { + case C168_ASIC_ID: + brd->info = &mxser_cards[0]; + break; + case C104_ASIC_ID: + brd->info = &mxser_cards[1]; + break; + case CI104J_ASIC_ID: + brd->info = &mxser_cards[2]; + break; + case C102_ASIC_ID: + brd->info = &mxser_cards[5]; + break; + case CI132_ASIC_ID: + brd->info = &mxser_cards[6]; + break; + case CI134_ASIC_ID: + brd->info = &mxser_cards[7]; + break; + default: + return 0; + } + + irq = 0; + /* some ISA cards have 2 ports, but we want to see them as 4-port (why?) + Flag-hack checks if configuration should be read as 2-port here. */ + if (brd->info->nports == 2 || (brd->info->flags & MXSER_HAS2)) { + irq = regs[9] & 0xF000; + irq = irq | (irq >> 4); + if (irq != (regs[9] & 0xFF00)) + goto err_irqconflict; + } else if (brd->info->nports == 4) { + irq = regs[9] & 0xF000; + irq = irq | (irq >> 4); + irq = irq | (irq >> 8); + if (irq != regs[9]) + goto err_irqconflict; + } else if (brd->info->nports == 8) { + irq = regs[9] & 0xF000; + irq = irq | (irq >> 4); + irq = irq | (irq >> 8); + if ((irq != regs[9]) || (irq != regs[10])) + goto err_irqconflict; + } + + if (!irq) { + printk(KERN_ERR "mxser: interrupt number unset\n"); + return -EIO; + } + brd->irq = ((int)(irq & 0xF000) >> 12); + for (i = 0; i < 8; i++) + brd->ports[i].ioaddr = (int) regs[i + 1] & 0xFFF8; + if ((regs[12] & 0x80) == 0) { + printk(KERN_ERR "mxser: invalid interrupt vector\n"); + return -EIO; + } + brd->vector = (int)regs[11]; /* interrupt vector */ + if (id == 1) + brd->vector_mask = 0x00FF; + else + brd->vector_mask = 0x000F; + for (i = 7, bits = 0x0100; i >= 0; i--, bits <<= 1) { + if (regs[12] & bits) { + brd->ports[i].baud_base = 921600; + brd->ports[i].max_baud = 921600; + } else { + brd->ports[i].baud_base = 115200; + brd->ports[i].max_baud = 115200; + } + } + scratch2 = inb(cap + UART_LCR) & (~UART_LCR_DLAB); + outb(scratch2 | UART_LCR_DLAB, cap + UART_LCR); + outb(0, cap + UART_EFR); /* EFR is the same as FCR */ + outb(scratch2, cap + UART_LCR); + outb(UART_FCR_ENABLE_FIFO, cap + UART_FCR); + scratch = inb(cap + UART_IIR); + + if (scratch & 0xC0) + brd->uart_type = PORT_16550A; + else + brd->uart_type = PORT_16450; + if (!request_region(brd->ports[0].ioaddr, 8 * brd->info->nports, + "mxser(IO)")) { + printk(KERN_ERR "mxser: can't request ports I/O region: " + "0x%.8lx-0x%.8lx\n", + brd->ports[0].ioaddr, brd->ports[0].ioaddr + + 8 * brd->info->nports - 1); + return -EIO; + } + if (!request_region(brd->vector, 1, "mxser(vector)")) { + release_region(brd->ports[0].ioaddr, 8 * brd->info->nports); + printk(KERN_ERR "mxser: can't request interrupt vector region: " + "0x%.8lx-0x%.8lx\n", + brd->ports[0].ioaddr, brd->ports[0].ioaddr + + 8 * brd->info->nports - 1); + return -EIO; + } + return brd->info->nports; + +err_irqconflict: + printk(KERN_ERR "mxser: invalid interrupt number\n"); + return -EIO; +} + +static int __devinit mxser_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ +#ifdef CONFIG_PCI + struct mxser_board *brd; + unsigned int i, j; + unsigned long ioaddress; + int retval = -EINVAL; + + for (i = 0; i < MXSER_BOARDS; i++) + if (mxser_boards[i].info == NULL) + break; + + if (i >= MXSER_BOARDS) { + dev_err(&pdev->dev, "too many boards found (maximum %d), board " + "not configured\n", MXSER_BOARDS); + goto err; + } + + brd = &mxser_boards[i]; + brd->idx = i * MXSER_PORTS_PER_BOARD; + dev_info(&pdev->dev, "found MOXA %s board (BusNo=%d, DevNo=%d)\n", + mxser_cards[ent->driver_data].name, + pdev->bus->number, PCI_SLOT(pdev->devfn)); + + retval = pci_enable_device(pdev); + if (retval) { + dev_err(&pdev->dev, "PCI enable failed\n"); + goto err; + } + + /* io address */ + ioaddress = pci_resource_start(pdev, 2); + retval = pci_request_region(pdev, 2, "mxser(IO)"); + if (retval) + goto err_dis; + + brd->info = &mxser_cards[ent->driver_data]; + for (i = 0; i < brd->info->nports; i++) + brd->ports[i].ioaddr = ioaddress + 8 * i; + + /* vector */ + ioaddress = pci_resource_start(pdev, 3); + retval = pci_request_region(pdev, 3, "mxser(vector)"); + if (retval) + goto err_zero; + brd->vector = ioaddress; + + /* irq */ + brd->irq = pdev->irq; + + brd->chip_flag = CheckIsMoxaMust(brd->ports[0].ioaddr); + brd->uart_type = PORT_16550A; + brd->vector_mask = 0; + + for (i = 0; i < brd->info->nports; i++) { + for (j = 0; j < UART_INFO_NUM; j++) { + if (Gpci_uart_info[j].type == brd->chip_flag) { + brd->ports[i].max_baud = + Gpci_uart_info[j].max_baud; + + /* exception....CP-102 */ + if (brd->info->flags & MXSER_HIGHBAUD) + brd->ports[i].max_baud = 921600; + break; + } + } + } + + if (brd->chip_flag == MOXA_MUST_MU860_HWID) { + for (i = 0; i < brd->info->nports; i++) { + if (i < 4) + brd->ports[i].opmode_ioaddr = ioaddress + 4; + else + brd->ports[i].opmode_ioaddr = ioaddress + 0x0c; + } + outb(0, ioaddress + 4); /* default set to RS232 mode */ + outb(0, ioaddress + 0x0c); /* default set to RS232 mode */ + } + + for (i = 0; i < brd->info->nports; i++) { + brd->vector_mask |= (1 << i); + brd->ports[i].baud_base = 921600; + } + + /* mxser_initbrd will hook ISR. */ + retval = mxser_initbrd(brd, pdev); + if (retval) + goto err_rel3; + + for (i = 0; i < brd->info->nports; i++) + tty_register_device(mxvar_sdriver, brd->idx + i, &pdev->dev); + + pci_set_drvdata(pdev, brd); + + return 0; +err_rel3: + pci_release_region(pdev, 3); +err_zero: + brd->info = NULL; + pci_release_region(pdev, 2); +err_dis: + pci_disable_device(pdev); +err: + return retval; +#else + return -ENODEV; +#endif +} + +static void __devexit mxser_remove(struct pci_dev *pdev) +{ +#ifdef CONFIG_PCI + struct mxser_board *brd = pci_get_drvdata(pdev); + unsigned int i; + + for (i = 0; i < brd->info->nports; i++) + tty_unregister_device(mxvar_sdriver, brd->idx + i); + + free_irq(pdev->irq, brd); + pci_release_region(pdev, 2); + pci_release_region(pdev, 3); + pci_disable_device(pdev); + brd->info = NULL; +#endif +} + +static struct pci_driver mxser_driver = { + .name = "mxser", + .id_table = mxser_pcibrds, + .probe = mxser_probe, + .remove = __devexit_p(mxser_remove) +}; + +static int __init mxser_module_init(void) +{ + struct mxser_board *brd; + unsigned int b, i, m; + int retval; + + mxvar_sdriver = alloc_tty_driver(MXSER_PORTS + 1); + if (!mxvar_sdriver) + return -ENOMEM; + + printk(KERN_INFO "MOXA Smartio/Industio family driver version %s\n", + MXSER_VERSION); + + /* Initialize the tty_driver structure */ + mxvar_sdriver->owner = THIS_MODULE; + mxvar_sdriver->magic = TTY_DRIVER_MAGIC; + mxvar_sdriver->name = "ttyMI"; + mxvar_sdriver->major = ttymajor; + mxvar_sdriver->minor_start = 0; + mxvar_sdriver->num = MXSER_PORTS + 1; + mxvar_sdriver->type = TTY_DRIVER_TYPE_SERIAL; + mxvar_sdriver->subtype = SERIAL_TYPE_NORMAL; + mxvar_sdriver->init_termios = tty_std_termios; + mxvar_sdriver->init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL; + mxvar_sdriver->flags = TTY_DRIVER_REAL_RAW|TTY_DRIVER_DYNAMIC_DEV; + tty_set_operations(mxvar_sdriver, &mxser_ops); + + retval = tty_register_driver(mxvar_sdriver); + if (retval) { + printk(KERN_ERR "Couldn't install MOXA Smartio/Industio family " + "tty driver !\n"); + goto err_put; + } + + /* Start finding ISA boards here */ + for (m = 0, b = 0; b < MXSER_BOARDS; b++) { + if (!ioaddr[b]) + continue; + + brd = &mxser_boards[m]; + retval = mxser_get_ISA_conf(ioaddr[b], brd); + if (retval <= 0) { + brd->info = NULL; + continue; + } + + printk(KERN_INFO "mxser: found MOXA %s board (CAP=0x%lx)\n", + brd->info->name, ioaddr[b]); + + /* mxser_initbrd will hook ISR. */ + if (mxser_initbrd(brd, NULL) < 0) { + brd->info = NULL; + continue; + } + + brd->idx = m * MXSER_PORTS_PER_BOARD; + for (i = 0; i < brd->info->nports; i++) + tty_register_device(mxvar_sdriver, brd->idx + i, NULL); + + m++; + } + + retval = pci_register_driver(&mxser_driver); + if (retval) { + printk(KERN_ERR "mxser: can't register pci driver\n"); + if (!m) { + retval = -ENODEV; + goto err_unr; + } /* else: we have some ISA cards under control */ + } + + return 0; +err_unr: + tty_unregister_driver(mxvar_sdriver); +err_put: + put_tty_driver(mxvar_sdriver); + return retval; +} + +static void __exit mxser_module_exit(void) +{ + unsigned int i, j; + + pci_unregister_driver(&mxser_driver); + + for (i = 0; i < MXSER_BOARDS; i++) /* ISA remains */ + if (mxser_boards[i].info != NULL) + for (j = 0; j < mxser_boards[i].info->nports; j++) + tty_unregister_device(mxvar_sdriver, + mxser_boards[i].idx + j); + tty_unregister_driver(mxvar_sdriver); + put_tty_driver(mxvar_sdriver); + + for (i = 0; i < MXSER_BOARDS; i++) + if (mxser_boards[i].info != NULL) + mxser_release_ISA_res(&mxser_boards[i]); +} + +module_init(mxser_module_init); +module_exit(mxser_module_exit); diff --git a/drivers/tty/mxser.h b/drivers/tty/mxser.h new file mode 100644 index 00000000000..41878a69203 --- /dev/null +++ b/drivers/tty/mxser.h @@ -0,0 +1,150 @@ +#ifndef _MXSER_H +#define _MXSER_H + +/* + * Semi-public control interfaces + */ + +/* + * MOXA ioctls + */ + +#define MOXA 0x400 +#define MOXA_GETDATACOUNT (MOXA + 23) +#define MOXA_DIAGNOSE (MOXA + 50) +#define MOXA_CHKPORTENABLE (MOXA + 60) +#define MOXA_HighSpeedOn (MOXA + 61) +#define MOXA_GET_MAJOR (MOXA + 63) +#define MOXA_GETMSTATUS (MOXA + 65) +#define MOXA_SET_OP_MODE (MOXA + 66) +#define MOXA_GET_OP_MODE (MOXA + 67) + +#define RS232_MODE 0 +#define RS485_2WIRE_MODE 1 +#define RS422_MODE 2 +#define RS485_4WIRE_MODE 3 +#define OP_MODE_MASK 3 + +#define MOXA_SDS_RSTICOUNTER (MOXA + 69) +#define MOXA_ASPP_OQUEUE (MOXA + 70) +#define MOXA_ASPP_MON (MOXA + 73) +#define MOXA_ASPP_LSTATUS (MOXA + 74) +#define MOXA_ASPP_MON_EXT (MOXA + 75) +#define MOXA_SET_BAUD_METHOD (MOXA + 76) + +/* --------------------------------------------------- */ + +#define NPPI_NOTIFY_PARITY 0x01 +#define NPPI_NOTIFY_FRAMING 0x02 +#define NPPI_NOTIFY_HW_OVERRUN 0x04 +#define NPPI_NOTIFY_SW_OVERRUN 0x08 +#define NPPI_NOTIFY_BREAK 0x10 + +#define NPPI_NOTIFY_CTSHOLD 0x01 /* Tx hold by CTS low */ +#define NPPI_NOTIFY_DSRHOLD 0x02 /* Tx hold by DSR low */ +#define NPPI_NOTIFY_XOFFHOLD 0x08 /* Tx hold by Xoff received */ +#define NPPI_NOTIFY_XOFFXENT 0x10 /* Xoff Sent */ + +/* follow just for Moxa Must chip define. */ +/* */ +/* when LCR register (offset 0x03) write following value, */ +/* the Must chip will enter enchance mode. And write value */ +/* on EFR (offset 0x02) bit 6,7 to change bank. */ +#define MOXA_MUST_ENTER_ENCHANCE 0xBF + +/* when enhance mode enable, access on general bank register */ +#define MOXA_MUST_GDL_REGISTER 0x07 +#define MOXA_MUST_GDL_MASK 0x7F +#define MOXA_MUST_GDL_HAS_BAD_DATA 0x80 + +#define MOXA_MUST_LSR_RERR 0x80 /* error in receive FIFO */ +/* enchance register bank select and enchance mode setting register */ +/* when LCR register equal to 0xBF */ +#define MOXA_MUST_EFR_REGISTER 0x02 +/* enchance mode enable */ +#define MOXA_MUST_EFR_EFRB_ENABLE 0x10 +/* enchance reister bank set 0, 1, 2 */ +#define MOXA_MUST_EFR_BANK0 0x00 +#define MOXA_MUST_EFR_BANK1 0x40 +#define MOXA_MUST_EFR_BANK2 0x80 +#define MOXA_MUST_EFR_BANK3 0xC0 +#define MOXA_MUST_EFR_BANK_MASK 0xC0 + +/* set XON1 value register, when LCR=0xBF and change to bank0 */ +#define MOXA_MUST_XON1_REGISTER 0x04 + +/* set XON2 value register, when LCR=0xBF and change to bank0 */ +#define MOXA_MUST_XON2_REGISTER 0x05 + +/* set XOFF1 value register, when LCR=0xBF and change to bank0 */ +#define MOXA_MUST_XOFF1_REGISTER 0x06 + +/* set XOFF2 value register, when LCR=0xBF and change to bank0 */ +#define MOXA_MUST_XOFF2_REGISTER 0x07 + +#define MOXA_MUST_RBRTL_REGISTER 0x04 +#define MOXA_MUST_RBRTH_REGISTER 0x05 +#define MOXA_MUST_RBRTI_REGISTER 0x06 +#define MOXA_MUST_THRTL_REGISTER 0x07 +#define MOXA_MUST_ENUM_REGISTER 0x04 +#define MOXA_MUST_HWID_REGISTER 0x05 +#define MOXA_MUST_ECR_REGISTER 0x06 +#define MOXA_MUST_CSR_REGISTER 0x07 + +/* good data mode enable */ +#define MOXA_MUST_FCR_GDA_MODE_ENABLE 0x20 +/* only good data put into RxFIFO */ +#define MOXA_MUST_FCR_GDA_ONLY_ENABLE 0x10 + +/* enable CTS interrupt */ +#define MOXA_MUST_IER_ECTSI 0x80 +/* enable RTS interrupt */ +#define MOXA_MUST_IER_ERTSI 0x40 +/* enable Xon/Xoff interrupt */ +#define MOXA_MUST_IER_XINT 0x20 +/* enable GDA interrupt */ +#define MOXA_MUST_IER_EGDAI 0x10 + +#define MOXA_MUST_RECV_ISR (UART_IER_RDI | MOXA_MUST_IER_EGDAI) + +/* GDA interrupt pending */ +#define MOXA_MUST_IIR_GDA 0x1C +#define MOXA_MUST_IIR_RDA 0x04 +#define MOXA_MUST_IIR_RTO 0x0C +#define MOXA_MUST_IIR_LSR 0x06 + +/* recieved Xon/Xoff or specical interrupt pending */ +#define MOXA_MUST_IIR_XSC 0x10 + +/* RTS/CTS change state interrupt pending */ +#define MOXA_MUST_IIR_RTSCTS 0x20 +#define MOXA_MUST_IIR_MASK 0x3E + +#define MOXA_MUST_MCR_XON_FLAG 0x40 +#define MOXA_MUST_MCR_XON_ANY 0x80 +#define MOXA_MUST_MCR_TX_XON 0x08 + +/* software flow control on chip mask value */ +#define MOXA_MUST_EFR_SF_MASK 0x0F +/* send Xon1/Xoff1 */ +#define MOXA_MUST_EFR_SF_TX1 0x08 +/* send Xon2/Xoff2 */ +#define MOXA_MUST_EFR_SF_TX2 0x04 +/* send Xon1,Xon2/Xoff1,Xoff2 */ +#define MOXA_MUST_EFR_SF_TX12 0x0C +/* don't send Xon/Xoff */ +#define MOXA_MUST_EFR_SF_TX_NO 0x00 +/* Tx software flow control mask */ +#define MOXA_MUST_EFR_SF_TX_MASK 0x0C +/* don't receive Xon/Xoff */ +#define MOXA_MUST_EFR_SF_RX_NO 0x00 +/* receive Xon1/Xoff1 */ +#define MOXA_MUST_EFR_SF_RX1 0x02 +/* receive Xon2/Xoff2 */ +#define MOXA_MUST_EFR_SF_RX2 0x01 +/* receive Xon1,Xon2/Xoff1,Xoff2 */ +#define MOXA_MUST_EFR_SF_RX12 0x03 +/* Rx software flow control mask */ +#define MOXA_MUST_EFR_SF_RX_MASK 0x03 + +#endif diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 44b8412a04e..176f63256b3 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -1250,8 +1250,7 @@ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command, static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl) { - struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1, - gsm->ftype|PF); + struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1, gsm->ftype); if (msg == NULL) return; msg->data[0] = (ctrl->cmd << 1) | 2 | EA; /* command */ @@ -2414,6 +2413,7 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm, gsm->initiator = c->initiator; gsm->mru = c->mru; + gsm->mtu = c->mtu; gsm->encoding = c->encapsulation; gsm->adaption = c->adaption; gsm->n2 = c->n2; @@ -2648,13 +2648,13 @@ static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout) to do here */ } -static int gsmtty_tiocmget(struct tty_struct *tty, struct file *filp) +static int gsmtty_tiocmget(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; return dlci->modem_rx; } -static int gsmtty_tiocmset(struct tty_struct *tty, struct file *filp, +static int gsmtty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct gsm_dlci *dlci = tty->driver_data; @@ -2671,7 +2671,7 @@ static int gsmtty_tiocmset(struct tty_struct *tty, struct file *filp, } -static int gsmtty_ioctl(struct tty_struct *tty, struct file *filp, +static int gsmtty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { return -ENOIOCTLCMD; diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index 47d32281032..cea56033b34 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c @@ -97,7 +97,6 @@ #include <linux/slab.h> #include <linux/tty.h> #include <linux/errno.h> -#include <linux/smp_lock.h> #include <linux/string.h> /* used in new tty drivers */ #include <linux/signal.h> /* used in new tty drivers */ #include <linux/if.h> @@ -581,8 +580,9 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, __u8 __user *buf, size_t nr) { struct n_hdlc *n_hdlc = tty2n_hdlc(tty); - int ret; + int ret = 0; struct n_hdlc_buf *rbuf; + DECLARE_WAITQUEUE(wait, current); if (debuglevel >= DEBUG_LEVEL_INFO) printk("%s(%d)n_hdlc_tty_read() called\n",__FILE__,__LINE__); @@ -598,57 +598,55 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, return -EFAULT; } - tty_lock(); + add_wait_queue(&tty->read_wait, &wait); for (;;) { if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { - tty_unlock(); - return -EIO; + ret = -EIO; + break; } + if (tty_hung_up_p(file)) + break; - n_hdlc = tty2n_hdlc (tty); - if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC || - tty != n_hdlc->tty) { - tty_unlock(); - return 0; - } + set_current_state(TASK_INTERRUPTIBLE); rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list); - if (rbuf) + if (rbuf) { + if (rbuf->count > nr) { + /* too large for caller's buffer */ + ret = -EOVERFLOW; + } else { + if (copy_to_user(buf, rbuf->buf, rbuf->count)) + ret = -EFAULT; + else + ret = rbuf->count; + } + + if (n_hdlc->rx_free_buf_list.count > + DEFAULT_RX_BUF_COUNT) + kfree(rbuf); + else + n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf); break; + } /* no data */ if (file->f_flags & O_NONBLOCK) { - tty_unlock(); - return -EAGAIN; + ret = -EAGAIN; + break; } - - interruptible_sleep_on (&tty->read_wait); + + schedule(); + if (signal_pending(current)) { - tty_unlock(); - return -EINTR; + ret = -EINTR; + break; } } - - if (rbuf->count > nr) - /* frame too large for caller's buffer (discard frame) */ - ret = -EOVERFLOW; - else { - /* Copy the data to the caller's buffer */ - if (copy_to_user(buf, rbuf->buf, rbuf->count)) - ret = -EFAULT; - else - ret = rbuf->count; - } - - /* return HDLC buffer to free list unless the free list */ - /* count has exceeded the default value, in which case the */ - /* buffer is freed back to the OS to conserve memory */ - if (n_hdlc->rx_free_buf_list.count > DEFAULT_RX_BUF_COUNT) - kfree(rbuf); - else - n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,rbuf); - tty_unlock(); + + remove_wait_queue(&tty->read_wait, &wait); + __set_current_state(TASK_RUNNING); + return ret; } /* end of n_hdlc_tty_read() */ @@ -691,14 +689,15 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file, count = maxframe; } - tty_lock(); - add_wait_queue(&tty->write_wait, &wait); - set_current_state(TASK_INTERRUPTIBLE); + + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); - /* Allocate transmit buffer */ - /* sleep until transmit buffer available */ - while (!(tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list))) { + tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list); + if (tbuf) + break; + if (file->f_flags & O_NONBLOCK) { error = -EAGAIN; break; @@ -719,7 +718,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file, } } - set_current_state(TASK_RUNNING); + __set_current_state(TASK_RUNNING); remove_wait_queue(&tty->write_wait, &wait); if (!error) { @@ -731,7 +730,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file, n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf); n_hdlc_send_frames(n_hdlc,tty); } - tty_unlock(); + return error; } /* end of n_hdlc_tty_write() */ diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c index 88dda0c45ee..5c6c31459a2 100644 --- a/drivers/tty/n_r3964.c +++ b/drivers/tty/n_r3964.c @@ -57,7 +57,6 @@ #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/string.h> /* used in new tty drivers */ diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c new file mode 100644 index 00000000000..f4f11164efe --- /dev/null +++ b/drivers/tty/nozomi.c @@ -0,0 +1,1991 @@ +/* + * nozomi.c -- HSDPA driver Broadband Wireless Data Card - Globe Trotter + * + * Written by: Ulf Jakobsson, + * Jan Åkerfeldt, + * Stefan Thomasson, + * + * Maintained by: Paul Hardwick (p.hardwick@option.com) + * + * Patches: + * Locking code changes for Vodafone by Sphere Systems Ltd, + * Andrew Bird (ajb@spheresystems.co.uk ) + * & Phil Sanderson + * + * Source has been ported from an implementation made by Filip Aben @ Option + * + * -------------------------------------------------------------------------- + * + * Copyright (c) 2005,2006 Option Wireless Sweden AB + * Copyright (c) 2006 Sphere Systems Ltd + * Copyright (c) 2006 Option Wireless n/v + * All rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * -------------------------------------------------------------------------- + */ + +/* Enable this to have a lot of debug printouts */ +#define DEBUG + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/ioport.h> +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/tty_flip.h> +#include <linux/sched.h> +#include <linux/serial.h> +#include <linux/interrupt.h> +#include <linux/kmod.h> +#include <linux/init.h> +#include <linux/kfifo.h> +#include <linux/uaccess.h> +#include <linux/slab.h> +#include <asm/byteorder.h> + +#include <linux/delay.h> + + +#define VERSION_STRING DRIVER_DESC " 2.1d (build date: " \ + __DATE__ " " __TIME__ ")" + +/* Macros definitions */ + +/* Default debug printout level */ +#define NOZOMI_DEBUG_LEVEL 0x00 + +#define P_BUF_SIZE 128 +#define NFO(_err_flag_, args...) \ +do { \ + char tmp[P_BUF_SIZE]; \ + snprintf(tmp, sizeof(tmp), ##args); \ + printk(_err_flag_ "[%d] %s(): %s\n", __LINE__, \ + __func__, tmp); \ +} while (0) + +#define DBG1(args...) D_(0x01, ##args) +#define DBG2(args...) D_(0x02, ##args) +#define DBG3(args...) D_(0x04, ##args) +#define DBG4(args...) D_(0x08, ##args) +#define DBG5(args...) D_(0x10, ##args) +#define DBG6(args...) D_(0x20, ##args) +#define DBG7(args...) D_(0x40, ##args) +#define DBG8(args...) D_(0x80, ##args) + +#ifdef DEBUG +/* Do we need this settable at runtime? */ +static int debug = NOZOMI_DEBUG_LEVEL; + +#define D(lvl, args...) do \ + {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \ + while (0) +#define D_(lvl, args...) D(lvl, ##args) + +/* These printouts are always printed */ + +#else +static int debug; +#define D_(lvl, args...) +#endif + +/* TODO: rewrite to optimize macros... */ + +#define TMP_BUF_MAX 256 + +#define DUMP(buf__,len__) \ + do { \ + char tbuf[TMP_BUF_MAX] = {0};\ + if (len__ > 1) {\ + snprintf(tbuf, len__ > TMP_BUF_MAX ? TMP_BUF_MAX : len__, "%s", buf__);\ + if (tbuf[len__-2] == '\r') {\ + tbuf[len__-2] = 'r';\ + } \ + DBG1("SENDING: '%s' (%d+n)", tbuf, len__);\ + } else {\ + DBG1("SENDING: '%s' (%d)", tbuf, len__);\ + } \ +} while (0) + +/* Defines */ +#define NOZOMI_NAME "nozomi" +#define NOZOMI_NAME_TTY "nozomi_tty" +#define DRIVER_DESC "Nozomi driver" + +#define NTTY_TTY_MAXMINORS 256 +#define NTTY_FIFO_BUFFER_SIZE 8192 + +/* Must be power of 2 */ +#define FIFO_BUFFER_SIZE_UL 8192 + +/* Size of tmp send buffer to card */ +#define SEND_BUF_MAX 1024 +#define RECEIVE_BUF_MAX 4 + + +#define R_IIR 0x0000 /* Interrupt Identity Register */ +#define R_FCR 0x0000 /* Flow Control Register */ +#define R_IER 0x0004 /* Interrupt Enable Register */ + +#define CONFIG_MAGIC 0xEFEFFEFE +#define TOGGLE_VALID 0x0000 + +/* Definition of interrupt tokens */ +#define MDM_DL1 0x0001 +#define MDM_UL1 0x0002 +#define MDM_DL2 0x0004 +#define MDM_UL2 0x0008 +#define DIAG_DL1 0x0010 +#define DIAG_DL2 0x0020 +#define DIAG_UL 0x0040 +#define APP1_DL 0x0080 +#define APP1_UL 0x0100 +#define APP2_DL 0x0200 +#define APP2_UL 0x0400 +#define CTRL_DL 0x0800 +#define CTRL_UL 0x1000 +#define RESET 0x8000 + +#define MDM_DL (MDM_DL1 | MDM_DL2) +#define MDM_UL (MDM_UL1 | MDM_UL2) +#define DIAG_DL (DIAG_DL1 | DIAG_DL2) + +/* modem signal definition */ +#define CTRL_DSR 0x0001 +#define CTRL_DCD 0x0002 +#define CTRL_RI 0x0004 +#define CTRL_CTS 0x0008 + +#define CTRL_DTR 0x0001 +#define CTRL_RTS 0x0002 + +#define MAX_PORT 4 +#define NOZOMI_MAX_PORTS 5 +#define NOZOMI_MAX_CARDS (NTTY_TTY_MAXMINORS / MAX_PORT) + +/* Type definitions */ + +/* + * There are two types of nozomi cards, + * one with 2048 memory and with 8192 memory + */ +enum card_type { + F32_2 = 2048, /* 512 bytes downlink + uplink * 2 -> 2048 */ + F32_8 = 8192, /* 3072 bytes downl. + 1024 bytes uplink * 2 -> 8192 */ +}; + +/* Initialization states a card can be in */ +enum card_state { + NOZOMI_STATE_UKNOWN = 0, + NOZOMI_STATE_ENABLED = 1, /* pci device enabled */ + NOZOMI_STATE_ALLOCATED = 2, /* config setup done */ + NOZOMI_STATE_READY = 3, /* flowcontrols received */ +}; + +/* Two different toggle channels exist */ +enum channel_type { + CH_A = 0, + CH_B = 1, +}; + +/* Port definition for the card regarding flow control */ +enum ctrl_port_type { + CTRL_CMD = 0, + CTRL_MDM = 1, + CTRL_DIAG = 2, + CTRL_APP1 = 3, + CTRL_APP2 = 4, + CTRL_ERROR = -1, +}; + +/* Ports that the nozomi has */ +enum port_type { + PORT_MDM = 0, + PORT_DIAG = 1, + PORT_APP1 = 2, + PORT_APP2 = 3, + PORT_CTRL = 4, + PORT_ERROR = -1, +}; + +#ifdef __BIG_ENDIAN +/* Big endian */ + +struct toggles { + unsigned int enabled:5; /* + * Toggle fields are valid if enabled is 0, + * else A-channels must always be used. + */ + unsigned int diag_dl:1; + unsigned int mdm_dl:1; + unsigned int mdm_ul:1; +} __attribute__ ((packed)); + +/* Configuration table to read at startup of card */ +/* Is for now only needed during initialization phase */ +struct config_table { + u32 signature; + u16 product_information; + u16 version; + u8 pad3[3]; + struct toggles toggle; + u8 pad1[4]; + u16 dl_mdm_len1; /* + * If this is 64, it can hold + * 60 bytes + 4 that is length field + */ + u16 dl_start; + + u16 dl_diag_len1; + u16 dl_mdm_len2; /* + * If this is 64, it can hold + * 60 bytes + 4 that is length field + */ + u16 dl_app1_len; + + u16 dl_diag_len2; + u16 dl_ctrl_len; + u16 dl_app2_len; + u8 pad2[16]; + u16 ul_mdm_len1; + u16 ul_start; + u16 ul_diag_len; + u16 ul_mdm_len2; + u16 ul_app1_len; + u16 ul_app2_len; + u16 ul_ctrl_len; +} __attribute__ ((packed)); + +/* This stores all control downlink flags */ +struct ctrl_dl { + u8 port; + unsigned int reserved:4; + unsigned int CTS:1; + unsigned int RI:1; + unsigned int DCD:1; + unsigned int DSR:1; +} __attribute__ ((packed)); + +/* This stores all control uplink flags */ +struct ctrl_ul { + u8 port; + unsigned int reserved:6; + unsigned int RTS:1; + unsigned int DTR:1; +} __attribute__ ((packed)); + +#else +/* Little endian */ + +/* This represents the toggle information */ +struct toggles { + unsigned int mdm_ul:1; + unsigned int mdm_dl:1; + unsigned int diag_dl:1; + unsigned int enabled:5; /* + * Toggle fields are valid if enabled is 0, + * else A-channels must always be used. + */ +} __attribute__ ((packed)); + +/* Configuration table to read at startup of card */ +struct config_table { + u32 signature; + u16 version; + u16 product_information; + struct toggles toggle; + u8 pad1[7]; + u16 dl_start; + u16 dl_mdm_len1; /* + * If this is 64, it can hold + * 60 bytes + 4 that is length field + */ + u16 dl_mdm_len2; + u16 dl_diag_len1; + u16 dl_diag_len2; + u16 dl_app1_len; + u16 dl_app2_len; + u16 dl_ctrl_len; + u8 pad2[16]; + u16 ul_start; + u16 ul_mdm_len2; + u16 ul_mdm_len1; + u16 ul_diag_len; + u16 ul_app1_len; + u16 ul_app2_len; + u16 ul_ctrl_len; +} __attribute__ ((packed)); + +/* This stores all control downlink flags */ +struct ctrl_dl { + unsigned int DSR:1; + unsigned int DCD:1; + unsigned int RI:1; + unsigned int CTS:1; + unsigned int reserverd:4; + u8 port; +} __attribute__ ((packed)); + +/* This stores all control uplink flags */ +struct ctrl_ul { + unsigned int DTR:1; + unsigned int RTS:1; + unsigned int reserved:6; + u8 port; +} __attribute__ ((packed)); +#endif + +/* This holds all information that is needed regarding a port */ +struct port { + struct tty_port port; + u8 update_flow_control; + struct ctrl_ul ctrl_ul; + struct ctrl_dl ctrl_dl; + struct kfifo fifo_ul; + void __iomem *dl_addr[2]; + u32 dl_size[2]; + u8 toggle_dl; + void __iomem *ul_addr[2]; + u32 ul_size[2]; + u8 toggle_ul; + u16 token_dl; + + /* mutex to ensure one access patch to this port */ + struct mutex tty_sem; + wait_queue_head_t tty_wait; + struct async_icount tty_icount; + + struct nozomi *dc; +}; + +/* Private data one for each card in the system */ +struct nozomi { + void __iomem *base_addr; + unsigned long flip; + + /* Pointers to registers */ + void __iomem *reg_iir; + void __iomem *reg_fcr; + void __iomem *reg_ier; + + u16 last_ier; + enum card_type card_type; + struct config_table config_table; /* Configuration table */ + struct pci_dev *pdev; + struct port port[NOZOMI_MAX_PORTS]; + u8 *send_buf; + + spinlock_t spin_mutex; /* secures access to registers and tty */ + + unsigned int index_start; + enum card_state state; + u32 open_ttys; +}; + +/* This is a data packet that is read or written to/from card */ +struct buffer { + u32 size; /* size is the length of the data buffer */ + u8 *data; +} __attribute__ ((packed)); + +/* Global variables */ +static const struct pci_device_id nozomi_pci_tbl[] __devinitconst = { + {PCI_DEVICE(0x1931, 0x000c)}, /* Nozomi HSDPA */ + {}, +}; + +MODULE_DEVICE_TABLE(pci, nozomi_pci_tbl); + +static struct nozomi *ndevs[NOZOMI_MAX_CARDS]; +static struct tty_driver *ntty_driver; + +static const struct tty_port_operations noz_tty_port_ops; + +/* + * find card by tty_index + */ +static inline struct nozomi *get_dc_by_tty(const struct tty_struct *tty) +{ + return tty ? ndevs[tty->index / MAX_PORT] : NULL; +} + +static inline struct port *get_port_by_tty(const struct tty_struct *tty) +{ + struct nozomi *ndev = get_dc_by_tty(tty); + return ndev ? &ndev->port[tty->index % MAX_PORT] : NULL; +} + +/* + * TODO: + * -Optimize + * -Rewrite cleaner + */ + +static void read_mem32(u32 *buf, const void __iomem *mem_addr_start, + u32 size_bytes) +{ + u32 i = 0; + const u32 __iomem *ptr = mem_addr_start; + u16 *buf16; + + if (unlikely(!ptr || !buf)) + goto out; + + /* shortcut for extremely often used cases */ + switch (size_bytes) { + case 2: /* 2 bytes */ + buf16 = (u16 *) buf; + *buf16 = __le16_to_cpu(readw(ptr)); + goto out; + break; + case 4: /* 4 bytes */ + *(buf) = __le32_to_cpu(readl(ptr)); + goto out; + break; + } + + while (i < size_bytes) { + if (size_bytes - i == 2) { + /* Handle 2 bytes in the end */ + buf16 = (u16 *) buf; + *(buf16) = __le16_to_cpu(readw(ptr)); + i += 2; + } else { + /* Read 4 bytes */ + *(buf) = __le32_to_cpu(readl(ptr)); + i += 4; + } + buf++; + ptr++; + } +out: + return; +} + +/* + * TODO: + * -Optimize + * -Rewrite cleaner + */ +static u32 write_mem32(void __iomem *mem_addr_start, const u32 *buf, + u32 size_bytes) +{ + u32 i = 0; + u32 __iomem *ptr = mem_addr_start; + const u16 *buf16; + + if (unlikely(!ptr || !buf)) + return 0; + + /* shortcut for extremely often used cases */ + switch (size_bytes) { + case 2: /* 2 bytes */ + buf16 = (const u16 *)buf; + writew(__cpu_to_le16(*buf16), ptr); + return 2; + break; + case 1: /* + * also needs to write 4 bytes in this case + * so falling through.. + */ + case 4: /* 4 bytes */ + writel(__cpu_to_le32(*buf), ptr); + return 4; + break; + } + + while (i < size_bytes) { + if (size_bytes - i == 2) { + /* 2 bytes */ + buf16 = (const u16 *)buf; + writew(__cpu_to_le16(*buf16), ptr); + i += 2; + } else { + /* 4 bytes */ + writel(__cpu_to_le32(*buf), ptr); + i += 4; + } + buf++; + ptr++; + } + return i; +} + +/* Setup pointers to different channels and also setup buffer sizes. */ +static void setup_memory(struct nozomi *dc) +{ + void __iomem *offset = dc->base_addr + dc->config_table.dl_start; + /* The length reported is including the length field of 4 bytes, + * hence subtract with 4. + */ + const u16 buff_offset = 4; + + /* Modem port dl configuration */ + dc->port[PORT_MDM].dl_addr[CH_A] = offset; + dc->port[PORT_MDM].dl_addr[CH_B] = + (offset += dc->config_table.dl_mdm_len1); + dc->port[PORT_MDM].dl_size[CH_A] = + dc->config_table.dl_mdm_len1 - buff_offset; + dc->port[PORT_MDM].dl_size[CH_B] = + dc->config_table.dl_mdm_len2 - buff_offset; + + /* Diag port dl configuration */ + dc->port[PORT_DIAG].dl_addr[CH_A] = + (offset += dc->config_table.dl_mdm_len2); + dc->port[PORT_DIAG].dl_size[CH_A] = + dc->config_table.dl_diag_len1 - buff_offset; + dc->port[PORT_DIAG].dl_addr[CH_B] = + (offset += dc->config_table.dl_diag_len1); + dc->port[PORT_DIAG].dl_size[CH_B] = + dc->config_table.dl_diag_len2 - buff_offset; + + /* App1 port dl configuration */ + dc->port[PORT_APP1].dl_addr[CH_A] = + (offset += dc->config_table.dl_diag_len2); + dc->port[PORT_APP1].dl_size[CH_A] = + dc->config_table.dl_app1_len - buff_offset; + + /* App2 port dl configuration */ + dc->port[PORT_APP2].dl_addr[CH_A] = + (offset += dc->config_table.dl_app1_len); + dc->port[PORT_APP2].dl_size[CH_A] = + dc->config_table.dl_app2_len - buff_offset; + + /* Ctrl dl configuration */ + dc->port[PORT_CTRL].dl_addr[CH_A] = + (offset += dc->config_table.dl_app2_len); + dc->port[PORT_CTRL].dl_size[CH_A] = + dc->config_table.dl_ctrl_len - buff_offset; + + offset = dc->base_addr + dc->config_table.ul_start; + + /* Modem Port ul configuration */ + dc->port[PORT_MDM].ul_addr[CH_A] = offset; + dc->port[PORT_MDM].ul_size[CH_A] = + dc->config_table.ul_mdm_len1 - buff_offset; + dc->port[PORT_MDM].ul_addr[CH_B] = + (offset += dc->config_table.ul_mdm_len1); + dc->port[PORT_MDM].ul_size[CH_B] = + dc->config_table.ul_mdm_len2 - buff_offset; + + /* Diag port ul configuration */ + dc->port[PORT_DIAG].ul_addr[CH_A] = + (offset += dc->config_table.ul_mdm_len2); + dc->port[PORT_DIAG].ul_size[CH_A] = + dc->config_table.ul_diag_len - buff_offset; + + /* App1 port ul configuration */ + dc->port[PORT_APP1].ul_addr[CH_A] = + (offset += dc->config_table.ul_diag_len); + dc->port[PORT_APP1].ul_size[CH_A] = + dc->config_table.ul_app1_len - buff_offset; + + /* App2 port ul configuration */ + dc->port[PORT_APP2].ul_addr[CH_A] = + (offset += dc->config_table.ul_app1_len); + dc->port[PORT_APP2].ul_size[CH_A] = + dc->config_table.ul_app2_len - buff_offset; + + /* Ctrl ul configuration */ + dc->port[PORT_CTRL].ul_addr[CH_A] = + (offset += dc->config_table.ul_app2_len); + dc->port[PORT_CTRL].ul_size[CH_A] = + dc->config_table.ul_ctrl_len - buff_offset; +} + +/* Dump config table under initalization phase */ +#ifdef DEBUG +static void dump_table(const struct nozomi *dc) +{ + DBG3("signature: 0x%08X", dc->config_table.signature); + DBG3("version: 0x%04X", dc->config_table.version); + DBG3("product_information: 0x%04X", \ + dc->config_table.product_information); + DBG3("toggle enabled: %d", dc->config_table.toggle.enabled); + DBG3("toggle up_mdm: %d", dc->config_table.toggle.mdm_ul); + DBG3("toggle dl_mdm: %d", dc->config_table.toggle.mdm_dl); + DBG3("toggle dl_dbg: %d", dc->config_table.toggle.diag_dl); + + DBG3("dl_start: 0x%04X", dc->config_table.dl_start); + DBG3("dl_mdm_len0: 0x%04X, %d", dc->config_table.dl_mdm_len1, + dc->config_table.dl_mdm_len1); + DBG3("dl_mdm_len1: 0x%04X, %d", dc->config_table.dl_mdm_len2, + dc->config_table.dl_mdm_len2); + DBG3("dl_diag_len0: 0x%04X, %d", dc->config_table.dl_diag_len1, + dc->config_table.dl_diag_len1); + DBG3("dl_diag_len1: 0x%04X, %d", dc->config_table.dl_diag_len2, + dc->config_table.dl_diag_len2); + DBG3("dl_app1_len: 0x%04X, %d", dc->config_table.dl_app1_len, + dc->config_table.dl_app1_len); + DBG3("dl_app2_len: 0x%04X, %d", dc->config_table.dl_app2_len, + dc->config_table.dl_app2_len); + DBG3("dl_ctrl_len: 0x%04X, %d", dc->config_table.dl_ctrl_len, + dc->config_table.dl_ctrl_len); + DBG3("ul_start: 0x%04X, %d", dc->config_table.ul_start, + dc->config_table.ul_start); + DBG3("ul_mdm_len[0]: 0x%04X, %d", dc->config_table.ul_mdm_len1, + dc->config_table.ul_mdm_len1); + DBG3("ul_mdm_len[1]: 0x%04X, %d", dc->config_table.ul_mdm_len2, + dc->config_table.ul_mdm_len2); + DBG3("ul_diag_len: 0x%04X, %d", dc->config_table.ul_diag_len, + dc->config_table.ul_diag_len); + DBG3("ul_app1_len: 0x%04X, %d", dc->config_table.ul_app1_len, + dc->config_table.ul_app1_len); + DBG3("ul_app2_len: 0x%04X, %d", dc->config_table.ul_app2_len, + dc->config_table.ul_app2_len); + DBG3("ul_ctrl_len: 0x%04X, %d", dc->config_table.ul_ctrl_len, + dc->config_table.ul_ctrl_len); +} +#else +static inline void dump_table(const struct nozomi *dc) { } +#endif + +/* + * Read configuration table from card under intalization phase + * Returns 1 if ok, else 0 + */ +static int nozomi_read_config_table(struct nozomi *dc) +{ + read_mem32((u32 *) &dc->config_table, dc->base_addr + 0, + sizeof(struct config_table)); + + if (dc->config_table.signature != CONFIG_MAGIC) { + dev_err(&dc->pdev->dev, "ConfigTable Bad! 0x%08X != 0x%08X\n", + dc->config_table.signature, CONFIG_MAGIC); + return 0; + } + + if ((dc->config_table.version == 0) + || (dc->config_table.toggle.enabled == TOGGLE_VALID)) { + int i; + DBG1("Second phase, configuring card"); + + setup_memory(dc); + + dc->port[PORT_MDM].toggle_ul = dc->config_table.toggle.mdm_ul; + dc->port[PORT_MDM].toggle_dl = dc->config_table.toggle.mdm_dl; + dc->port[PORT_DIAG].toggle_dl = dc->config_table.toggle.diag_dl; + DBG1("toggle ports: MDM UL:%d MDM DL:%d, DIAG DL:%d", + dc->port[PORT_MDM].toggle_ul, + dc->port[PORT_MDM].toggle_dl, dc->port[PORT_DIAG].toggle_dl); + + dump_table(dc); + + for (i = PORT_MDM; i < MAX_PORT; i++) { + memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl)); + memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul)); + } + + /* Enable control channel */ + dc->last_ier = dc->last_ier | CTRL_DL; + writew(dc->last_ier, dc->reg_ier); + + dc->state = NOZOMI_STATE_ALLOCATED; + dev_info(&dc->pdev->dev, "Initialization OK!\n"); + return 1; + } + + if ((dc->config_table.version > 0) + && (dc->config_table.toggle.enabled != TOGGLE_VALID)) { + u32 offset = 0; + DBG1("First phase: pushing upload buffers, clearing download"); + + dev_info(&dc->pdev->dev, "Version of card: %d\n", + dc->config_table.version); + + /* Here we should disable all I/O over F32. */ + setup_memory(dc); + + /* + * We should send ALL channel pair tokens back along + * with reset token + */ + + /* push upload modem buffers */ + write_mem32(dc->port[PORT_MDM].ul_addr[CH_A], + (u32 *) &offset, 4); + write_mem32(dc->port[PORT_MDM].ul_addr[CH_B], + (u32 *) &offset, 4); + + writew(MDM_UL | DIAG_DL | MDM_DL, dc->reg_fcr); + + DBG1("First phase done"); + } + + return 1; +} + +/* Enable uplink interrupts */ +static void enable_transmit_ul(enum port_type port, struct nozomi *dc) +{ + static const u16 mask[] = {MDM_UL, DIAG_UL, APP1_UL, APP2_UL, CTRL_UL}; + + if (port < NOZOMI_MAX_PORTS) { + dc->last_ier |= mask[port]; + writew(dc->last_ier, dc->reg_ier); + } else { + dev_err(&dc->pdev->dev, "Called with wrong port?\n"); + } +} + +/* Disable uplink interrupts */ +static void disable_transmit_ul(enum port_type port, struct nozomi *dc) +{ + static const u16 mask[] = + {~MDM_UL, ~DIAG_UL, ~APP1_UL, ~APP2_UL, ~CTRL_UL}; + + if (port < NOZOMI_MAX_PORTS) { + dc->last_ier &= mask[port]; + writew(dc->last_ier, dc->reg_ier); + } else { + dev_err(&dc->pdev->dev, "Called with wrong port?\n"); + } +} + +/* Enable downlink interrupts */ +static void enable_transmit_dl(enum port_type port, struct nozomi *dc) +{ + static const u16 mask[] = {MDM_DL, DIAG_DL, APP1_DL, APP2_DL, CTRL_DL}; + + if (port < NOZOMI_MAX_PORTS) { + dc->last_ier |= mask[port]; + writew(dc->last_ier, dc->reg_ier); + } else { + dev_err(&dc->pdev->dev, "Called with wrong port?\n"); + } +} + +/* Disable downlink interrupts */ +static void disable_transmit_dl(enum port_type port, struct nozomi *dc) +{ + static const u16 mask[] = + {~MDM_DL, ~DIAG_DL, ~APP1_DL, ~APP2_DL, ~CTRL_DL}; + + if (port < NOZOMI_MAX_PORTS) { + dc->last_ier &= mask[port]; + writew(dc->last_ier, dc->reg_ier); + } else { + dev_err(&dc->pdev->dev, "Called with wrong port?\n"); + } +} + +/* + * Return 1 - send buffer to card and ack. + * Return 0 - don't ack, don't send buffer to card. + */ +static int send_data(enum port_type index, struct nozomi *dc) +{ + u32 size = 0; + struct port *port = &dc->port[index]; + const u8 toggle = port->toggle_ul; + void __iomem *addr = port->ul_addr[toggle]; + const u32 ul_size = port->ul_size[toggle]; + struct tty_struct *tty = tty_port_tty_get(&port->port); + + /* Get data from tty and place in buf for now */ + size = kfifo_out(&port->fifo_ul, dc->send_buf, + ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX); + + if (size == 0) { + DBG4("No more data to send, disable link:"); + tty_kref_put(tty); + return 0; + } + + /* DUMP(buf, size); */ + + /* Write length + data */ + write_mem32(addr, (u32 *) &size, 4); + write_mem32(addr + 4, (u32 *) dc->send_buf, size); + + if (tty) + tty_wakeup(tty); + + tty_kref_put(tty); + return 1; +} + +/* If all data has been read, return 1, else 0 */ +static int receive_data(enum port_type index, struct nozomi *dc) +{ + u8 buf[RECEIVE_BUF_MAX] = { 0 }; + int size; + u32 offset = 4; + struct port *port = &dc->port[index]; + void __iomem *addr = port->dl_addr[port->toggle_dl]; + struct tty_struct *tty = tty_port_tty_get(&port->port); + int i, ret; + + if (unlikely(!tty)) { + DBG1("tty not open for port: %d?", index); + return 1; + } + + read_mem32((u32 *) &size, addr, 4); + /* DBG1( "%d bytes port: %d", size, index); */ + + if (test_bit(TTY_THROTTLED, &tty->flags)) { + DBG1("No room in tty, don't read data, don't ack interrupt, " + "disable interrupt"); + + /* disable interrupt in downlink... */ + disable_transmit_dl(index, dc); + ret = 0; + goto put; + } + + if (unlikely(size == 0)) { + dev_err(&dc->pdev->dev, "size == 0?\n"); + ret = 1; + goto put; + } + + while (size > 0) { + read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX); + + if (size == 1) { + tty_insert_flip_char(tty, buf[0], TTY_NORMAL); + size = 0; + } else if (size < RECEIVE_BUF_MAX) { + size -= tty_insert_flip_string(tty, (char *) buf, size); + } else { + i = tty_insert_flip_string(tty, \ + (char *) buf, RECEIVE_BUF_MAX); + size -= i; + offset += i; + } + } + + set_bit(index, &dc->flip); + ret = 1; +put: + tty_kref_put(tty); + return ret; +} + +/* Debug for interrupts */ +#ifdef DEBUG +static char *interrupt2str(u16 interrupt) +{ + static char buf[TMP_BUF_MAX]; + char *p = buf; + + interrupt & MDM_DL1 ? p += snprintf(p, TMP_BUF_MAX, "MDM_DL1 ") : NULL; + interrupt & MDM_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "MDM_DL2 ") : NULL; + + interrupt & MDM_UL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "MDM_UL1 ") : NULL; + interrupt & MDM_UL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "MDM_UL2 ") : NULL; + + interrupt & DIAG_DL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "DIAG_DL1 ") : NULL; + interrupt & DIAG_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "DIAG_DL2 ") : NULL; + + interrupt & DIAG_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "DIAG_UL ") : NULL; + + interrupt & APP1_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "APP1_DL ") : NULL; + interrupt & APP2_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "APP2_DL ") : NULL; + + interrupt & APP1_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "APP1_UL ") : NULL; + interrupt & APP2_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "APP2_UL ") : NULL; + + interrupt & CTRL_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "CTRL_DL ") : NULL; + interrupt & CTRL_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "CTRL_UL ") : NULL; + + interrupt & RESET ? p += snprintf(p, TMP_BUF_MAX - (p - buf), + "RESET ") : NULL; + + return buf; +} +#endif + +/* + * Receive flow control + * Return 1 - If ok, else 0 + */ +static int receive_flow_control(struct nozomi *dc) +{ + enum port_type port = PORT_MDM; + struct ctrl_dl ctrl_dl; + struct ctrl_dl old_ctrl; + u16 enable_ier = 0; + + read_mem32((u32 *) &ctrl_dl, dc->port[PORT_CTRL].dl_addr[CH_A], 2); + + switch (ctrl_dl.port) { + case CTRL_CMD: + DBG1("The Base Band sends this value as a response to a " + "request for IMSI detach sent over the control " + "channel uplink (see section 7.6.1)."); + break; + case CTRL_MDM: + port = PORT_MDM; + enable_ier = MDM_DL; + break; + case CTRL_DIAG: + port = PORT_DIAG; + enable_ier = DIAG_DL; + break; + case CTRL_APP1: + port = PORT_APP1; + enable_ier = APP1_DL; + break; + case CTRL_APP2: + port = PORT_APP2; + enable_ier = APP2_DL; + if (dc->state == NOZOMI_STATE_ALLOCATED) { + /* + * After card initialization the flow control + * received for APP2 is always the last + */ + dc->state = NOZOMI_STATE_READY; + dev_info(&dc->pdev->dev, "Device READY!\n"); + } + break; + default: + dev_err(&dc->pdev->dev, + "ERROR: flow control received for non-existing port\n"); + return 0; + }; + + DBG1("0x%04X->0x%04X", *((u16 *)&dc->port[port].ctrl_dl), + *((u16 *)&ctrl_dl)); + + old_ctrl = dc->port[port].ctrl_dl; + dc->port[port].ctrl_dl = ctrl_dl; + + if (old_ctrl.CTS == 1 && ctrl_dl.CTS == 0) { + DBG1("Disable interrupt (0x%04X) on port: %d", + enable_ier, port); + disable_transmit_ul(port, dc); + + } else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) { + + if (kfifo_len(&dc->port[port].fifo_ul)) { + DBG1("Enable interrupt (0x%04X) on port: %d", + enable_ier, port); + DBG1("Data in buffer [%d], enable transmit! ", + kfifo_len(&dc->port[port].fifo_ul)); + enable_transmit_ul(port, dc); + } else { + DBG1("No data in buffer..."); + } + } + + if (*(u16 *)&old_ctrl == *(u16 *)&ctrl_dl) { + DBG1(" No change in mctrl"); + return 1; + } + /* Update statistics */ + if (old_ctrl.CTS != ctrl_dl.CTS) + dc->port[port].tty_icount.cts++; + if (old_ctrl.DSR != ctrl_dl.DSR) + dc->port[port].tty_icount.dsr++; + if (old_ctrl.RI != ctrl_dl.RI) + dc->port[port].tty_icount.rng++; + if (old_ctrl.DCD != ctrl_dl.DCD) + dc->port[port].tty_icount.dcd++; + + wake_up_interruptible(&dc->port[port].tty_wait); + + DBG1("port: %d DCD(%d), CTS(%d), RI(%d), DSR(%d)", + port, + dc->port[port].tty_icount.dcd, dc->port[port].tty_icount.cts, + dc->port[port].tty_icount.rng, dc->port[port].tty_icount.dsr); + + return 1; +} + +static enum ctrl_port_type port2ctrl(enum port_type port, + const struct nozomi *dc) +{ + switch (port) { + case PORT_MDM: + return CTRL_MDM; + case PORT_DIAG: + return CTRL_DIAG; + case PORT_APP1: + return CTRL_APP1; + case PORT_APP2: + return CTRL_APP2; + default: + dev_err(&dc->pdev->dev, + "ERROR: send flow control " \ + "received for non-existing port\n"); + }; + return CTRL_ERROR; +} + +/* + * Send flow control, can only update one channel at a time + * Return 0 - If we have updated all flow control + * Return 1 - If we need to update more flow control, ack current enable more + */ +static int send_flow_control(struct nozomi *dc) +{ + u32 i, more_flow_control_to_be_updated = 0; + u16 *ctrl; + + for (i = PORT_MDM; i < MAX_PORT; i++) { + if (dc->port[i].update_flow_control) { + if (more_flow_control_to_be_updated) { + /* We have more flow control to be updated */ + return 1; + } + dc->port[i].ctrl_ul.port = port2ctrl(i, dc); + ctrl = (u16 *)&dc->port[i].ctrl_ul; + write_mem32(dc->port[PORT_CTRL].ul_addr[0], \ + (u32 *) ctrl, 2); + dc->port[i].update_flow_control = 0; + more_flow_control_to_be_updated = 1; + } + } + return 0; +} + +/* + * Handle downlink data, ports that are handled are modem and diagnostics + * Return 1 - ok + * Return 0 - toggle fields are out of sync + */ +static int handle_data_dl(struct nozomi *dc, enum port_type port, u8 *toggle, + u16 read_iir, u16 mask1, u16 mask2) +{ + if (*toggle == 0 && read_iir & mask1) { + if (receive_data(port, dc)) { + writew(mask1, dc->reg_fcr); + *toggle = !(*toggle); + } + + if (read_iir & mask2) { + if (receive_data(port, dc)) { + writew(mask2, dc->reg_fcr); + *toggle = !(*toggle); + } + } + } else if (*toggle == 1 && read_iir & mask2) { + if (receive_data(port, dc)) { + writew(mask2, dc->reg_fcr); + *toggle = !(*toggle); + } + + if (read_iir & mask1) { + if (receive_data(port, dc)) { + writew(mask1, dc->reg_fcr); + *toggle = !(*toggle); + } + } + } else { + dev_err(&dc->pdev->dev, "port out of sync!, toggle:%d\n", + *toggle); + return 0; + } + return 1; +} + +/* + * Handle uplink data, this is currently for the modem port + * Return 1 - ok + * Return 0 - toggle field are out of sync + */ +static int handle_data_ul(struct nozomi *dc, enum port_type port, u16 read_iir) +{ + u8 *toggle = &(dc->port[port].toggle_ul); + + if (*toggle == 0 && read_iir & MDM_UL1) { + dc->last_ier &= ~MDM_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_data(port, dc)) { + writew(MDM_UL1, dc->reg_fcr); + dc->last_ier = dc->last_ier | MDM_UL; + writew(dc->last_ier, dc->reg_ier); + *toggle = !*toggle; + } + + if (read_iir & MDM_UL2) { + dc->last_ier &= ~MDM_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_data(port, dc)) { + writew(MDM_UL2, dc->reg_fcr); + dc->last_ier = dc->last_ier | MDM_UL; + writew(dc->last_ier, dc->reg_ier); + *toggle = !*toggle; + } + } + + } else if (*toggle == 1 && read_iir & MDM_UL2) { + dc->last_ier &= ~MDM_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_data(port, dc)) { + writew(MDM_UL2, dc->reg_fcr); + dc->last_ier = dc->last_ier | MDM_UL; + writew(dc->last_ier, dc->reg_ier); + *toggle = !*toggle; + } + + if (read_iir & MDM_UL1) { + dc->last_ier &= ~MDM_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_data(port, dc)) { + writew(MDM_UL1, dc->reg_fcr); + dc->last_ier = dc->last_ier | MDM_UL; + writew(dc->last_ier, dc->reg_ier); + *toggle = !*toggle; + } + } + } else { + writew(read_iir & MDM_UL, dc->reg_fcr); + dev_err(&dc->pdev->dev, "port out of sync!\n"); + return 0; + } + return 1; +} + +static irqreturn_t interrupt_handler(int irq, void *dev_id) +{ + struct nozomi *dc = dev_id; + unsigned int a; + u16 read_iir; + + if (!dc) + return IRQ_NONE; + + spin_lock(&dc->spin_mutex); + read_iir = readw(dc->reg_iir); + + /* Card removed */ + if (read_iir == (u16)-1) + goto none; + /* + * Just handle interrupt enabled in IER + * (by masking with dc->last_ier) + */ + read_iir &= dc->last_ier; + + if (read_iir == 0) + goto none; + + + DBG4("%s irq:0x%04X, prev:0x%04X", interrupt2str(read_iir), read_iir, + dc->last_ier); + + if (read_iir & RESET) { + if (unlikely(!nozomi_read_config_table(dc))) { + dc->last_ier = 0x0; + writew(dc->last_ier, dc->reg_ier); + dev_err(&dc->pdev->dev, "Could not read status from " + "card, we should disable interface\n"); + } else { + writew(RESET, dc->reg_fcr); + } + /* No more useful info if this was the reset interrupt. */ + goto exit_handler; + } + if (read_iir & CTRL_UL) { + DBG1("CTRL_UL"); + dc->last_ier &= ~CTRL_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_flow_control(dc)) { + writew(CTRL_UL, dc->reg_fcr); + dc->last_ier = dc->last_ier | CTRL_UL; + writew(dc->last_ier, dc->reg_ier); + } + } + if (read_iir & CTRL_DL) { + receive_flow_control(dc); + writew(CTRL_DL, dc->reg_fcr); + } + if (read_iir & MDM_DL) { + if (!handle_data_dl(dc, PORT_MDM, + &(dc->port[PORT_MDM].toggle_dl), read_iir, + MDM_DL1, MDM_DL2)) { + dev_err(&dc->pdev->dev, "MDM_DL out of sync!\n"); + goto exit_handler; + } + } + if (read_iir & MDM_UL) { + if (!handle_data_ul(dc, PORT_MDM, read_iir)) { + dev_err(&dc->pdev->dev, "MDM_UL out of sync!\n"); + goto exit_handler; + } + } + if (read_iir & DIAG_DL) { + if (!handle_data_dl(dc, PORT_DIAG, + &(dc->port[PORT_DIAG].toggle_dl), read_iir, + DIAG_DL1, DIAG_DL2)) { + dev_err(&dc->pdev->dev, "DIAG_DL out of sync!\n"); + goto exit_handler; + } + } + if (read_iir & DIAG_UL) { + dc->last_ier &= ~DIAG_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_data(PORT_DIAG, dc)) { + writew(DIAG_UL, dc->reg_fcr); + dc->last_ier = dc->last_ier | DIAG_UL; + writew(dc->last_ier, dc->reg_ier); + } + } + if (read_iir & APP1_DL) { + if (receive_data(PORT_APP1, dc)) + writew(APP1_DL, dc->reg_fcr); + } + if (read_iir & APP1_UL) { + dc->last_ier &= ~APP1_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_data(PORT_APP1, dc)) { + writew(APP1_UL, dc->reg_fcr); + dc->last_ier = dc->last_ier | APP1_UL; + writew(dc->last_ier, dc->reg_ier); + } + } + if (read_iir & APP2_DL) { + if (receive_data(PORT_APP2, dc)) + writew(APP2_DL, dc->reg_fcr); + } + if (read_iir & APP2_UL) { + dc->last_ier &= ~APP2_UL; + writew(dc->last_ier, dc->reg_ier); + if (send_data(PORT_APP2, dc)) { + writew(APP2_UL, dc->reg_fcr); + dc->last_ier = dc->last_ier | APP2_UL; + writew(dc->last_ier, dc->reg_ier); + } + } + +exit_handler: + spin_unlock(&dc->spin_mutex); + for (a = 0; a < NOZOMI_MAX_PORTS; a++) { + struct tty_struct *tty; + if (test_and_clear_bit(a, &dc->flip)) { + tty = tty_port_tty_get(&dc->port[a].port); + if (tty) + tty_flip_buffer_push(tty); + tty_kref_put(tty); + } + } + return IRQ_HANDLED; +none: + spin_unlock(&dc->spin_mutex); + return IRQ_NONE; +} + +static void nozomi_get_card_type(struct nozomi *dc) +{ + int i; + u32 size = 0; + + for (i = 0; i < 6; i++) + size += pci_resource_len(dc->pdev, i); + + /* Assume card type F32_8 if no match */ + dc->card_type = size == 2048 ? F32_2 : F32_8; + + dev_info(&dc->pdev->dev, "Card type is: %d\n", dc->card_type); +} + +static void nozomi_setup_private_data(struct nozomi *dc) +{ + void __iomem *offset = dc->base_addr + dc->card_type / 2; + unsigned int i; + + dc->reg_fcr = (void __iomem *)(offset + R_FCR); + dc->reg_iir = (void __iomem *)(offset + R_IIR); + dc->reg_ier = (void __iomem *)(offset + R_IER); + dc->last_ier = 0; + dc->flip = 0; + + dc->port[PORT_MDM].token_dl = MDM_DL; + dc->port[PORT_DIAG].token_dl = DIAG_DL; + dc->port[PORT_APP1].token_dl = APP1_DL; + dc->port[PORT_APP2].token_dl = APP2_DL; + + for (i = 0; i < MAX_PORT; i++) + init_waitqueue_head(&dc->port[i].tty_wait); +} + +static ssize_t card_type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + const struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev)); + + return sprintf(buf, "%d\n", dc->card_type); +} +static DEVICE_ATTR(card_type, S_IRUGO, card_type_show, NULL); + +static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + const struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev)); + + return sprintf(buf, "%u\n", dc->open_ttys); +} +static DEVICE_ATTR(open_ttys, S_IRUGO, open_ttys_show, NULL); + +static void make_sysfs_files(struct nozomi *dc) +{ + if (device_create_file(&dc->pdev->dev, &dev_attr_card_type)) + dev_err(&dc->pdev->dev, + "Could not create sysfs file for card_type\n"); + if (device_create_file(&dc->pdev->dev, &dev_attr_open_ttys)) + dev_err(&dc->pdev->dev, + "Could not create sysfs file for open_ttys\n"); +} + +static void remove_sysfs_files(struct nozomi *dc) +{ + device_remove_file(&dc->pdev->dev, &dev_attr_card_type); + device_remove_file(&dc->pdev->dev, &dev_attr_open_ttys); +} + +/* Allocate memory for one device */ +static int __devinit nozomi_card_init(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + resource_size_t start; + int ret; + struct nozomi *dc = NULL; + int ndev_idx; + int i; + + dev_dbg(&pdev->dev, "Init, new card found\n"); + + for (ndev_idx = 0; ndev_idx < ARRAY_SIZE(ndevs); ndev_idx++) + if (!ndevs[ndev_idx]) + break; + + if (ndev_idx >= ARRAY_SIZE(ndevs)) { + dev_err(&pdev->dev, "no free tty range for this card left\n"); + ret = -EIO; + goto err; + } + + dc = kzalloc(sizeof(struct nozomi), GFP_KERNEL); + if (unlikely(!dc)) { + dev_err(&pdev->dev, "Could not allocate memory\n"); + ret = -ENOMEM; + goto err_free; + } + + dc->pdev = pdev; + + ret = pci_enable_device(dc->pdev); + if (ret) { + dev_err(&pdev->dev, "Failed to enable PCI Device\n"); + goto err_free; + } + + ret = pci_request_regions(dc->pdev, NOZOMI_NAME); + if (ret) { + dev_err(&pdev->dev, "I/O address 0x%04x already in use\n", + (int) /* nozomi_private.io_addr */ 0); + goto err_disable_device; + } + + start = pci_resource_start(dc->pdev, 0); + if (start == 0) { + dev_err(&pdev->dev, "No I/O address for card detected\n"); + ret = -ENODEV; + goto err_rel_regs; + } + + /* Find out what card type it is */ + nozomi_get_card_type(dc); + + dc->base_addr = ioremap_nocache(start, dc->card_type); + if (!dc->base_addr) { + dev_err(&pdev->dev, "Unable to map card MMIO\n"); + ret = -ENODEV; + goto err_rel_regs; + } + + dc->send_buf = kmalloc(SEND_BUF_MAX, GFP_KERNEL); + if (!dc->send_buf) { + dev_err(&pdev->dev, "Could not allocate send buffer?\n"); + ret = -ENOMEM; + goto err_free_sbuf; + } + + for (i = PORT_MDM; i < MAX_PORT; i++) { + if (kfifo_alloc(&dc->port[i].fifo_ul, + FIFO_BUFFER_SIZE_UL, GFP_ATOMIC)) { + dev_err(&pdev->dev, + "Could not allocate kfifo buffer\n"); + ret = -ENOMEM; + goto err_free_kfifo; + } + } + + spin_lock_init(&dc->spin_mutex); + + nozomi_setup_private_data(dc); + + /* Disable all interrupts */ + dc->last_ier = 0; + writew(dc->last_ier, dc->reg_ier); + + ret = request_irq(pdev->irq, &interrupt_handler, IRQF_SHARED, + NOZOMI_NAME, dc); + if (unlikely(ret)) { + dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq); + goto err_free_kfifo; + } + + DBG1("base_addr: %p", dc->base_addr); + + make_sysfs_files(dc); + + dc->index_start = ndev_idx * MAX_PORT; + ndevs[ndev_idx] = dc; + + pci_set_drvdata(pdev, dc); + + /* Enable RESET interrupt */ + dc->last_ier = RESET; + iowrite16(dc->last_ier, dc->reg_ier); + + dc->state = NOZOMI_STATE_ENABLED; + + for (i = 0; i < MAX_PORT; i++) { + struct device *tty_dev; + struct port *port = &dc->port[i]; + port->dc = dc; + mutex_init(&port->tty_sem); + tty_port_init(&port->port); + port->port.ops = &noz_tty_port_ops; + tty_dev = tty_register_device(ntty_driver, dc->index_start + i, + &pdev->dev); + + if (IS_ERR(tty_dev)) { + ret = PTR_ERR(tty_dev); + dev_err(&pdev->dev, "Could not allocate tty?\n"); + goto err_free_tty; + } + } + + return 0; + +err_free_tty: + for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i) + tty_unregister_device(ntty_driver, i); +err_free_kfifo: + for (i = 0; i < MAX_PORT; i++) + kfifo_free(&dc->port[i].fifo_ul); +err_free_sbuf: + kfree(dc->send_buf); + iounmap(dc->base_addr); +err_rel_regs: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err_free: + kfree(dc); +err: + return ret; +} + +static void __devexit tty_exit(struct nozomi *dc) +{ + unsigned int i; + + DBG1(" "); + + for (i = 0; i < MAX_PORT; ++i) { + struct tty_struct *tty = tty_port_tty_get(&dc->port[i].port); + if (tty && list_empty(&tty->hangup_work.entry)) + tty_hangup(tty); + tty_kref_put(tty); + } + /* Racy below - surely should wait for scheduled work to be done or + complete off a hangup method ? */ + while (dc->open_ttys) + msleep(1); + for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i) + tty_unregister_device(ntty_driver, i); +} + +/* Deallocate memory for one device */ +static void __devexit nozomi_card_exit(struct pci_dev *pdev) +{ + int i; + struct ctrl_ul ctrl; + struct nozomi *dc = pci_get_drvdata(pdev); + + /* Disable all interrupts */ + dc->last_ier = 0; + writew(dc->last_ier, dc->reg_ier); + + tty_exit(dc); + + /* Send 0x0001, command card to resend the reset token. */ + /* This is to get the reset when the module is reloaded. */ + ctrl.port = 0x00; + ctrl.reserved = 0; + ctrl.RTS = 0; + ctrl.DTR = 1; + DBG1("sending flow control 0x%04X", *((u16 *)&ctrl)); + + /* Setup dc->reg addresses to we can use defines here */ + write_mem32(dc->port[PORT_CTRL].ul_addr[0], (u32 *)&ctrl, 2); + writew(CTRL_UL, dc->reg_fcr); /* push the token to the card. */ + + remove_sysfs_files(dc); + + free_irq(pdev->irq, dc); + + for (i = 0; i < MAX_PORT; i++) + kfifo_free(&dc->port[i].fifo_ul); + + kfree(dc->send_buf); + + iounmap(dc->base_addr); + + pci_release_regions(pdev); + + pci_disable_device(pdev); + + ndevs[dc->index_start / MAX_PORT] = NULL; + + kfree(dc); +} + +static void set_rts(const struct tty_struct *tty, int rts) +{ + struct port *port = get_port_by_tty(tty); + + port->ctrl_ul.RTS = rts; + port->update_flow_control = 1; + enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty)); +} + +static void set_dtr(const struct tty_struct *tty, int dtr) +{ + struct port *port = get_port_by_tty(tty); + + DBG1("SETTING DTR index: %d, dtr: %d", tty->index, dtr); + + port->ctrl_ul.DTR = dtr; + port->update_flow_control = 1; + enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty)); +} + +/* + * ---------------------------------------------------------------------------- + * TTY code + * ---------------------------------------------------------------------------- + */ + +static int ntty_install(struct tty_driver *driver, struct tty_struct *tty) +{ + struct port *port = get_port_by_tty(tty); + struct nozomi *dc = get_dc_by_tty(tty); + int ret; + if (!port || !dc || dc->state != NOZOMI_STATE_READY) + return -ENODEV; + ret = tty_init_termios(tty); + if (ret == 0) { + tty_driver_kref_get(driver); + tty->count++; + tty->driver_data = port; + driver->ttys[tty->index] = tty; + } + return ret; +} + +static void ntty_cleanup(struct tty_struct *tty) +{ + tty->driver_data = NULL; +} + +static int ntty_activate(struct tty_port *tport, struct tty_struct *tty) +{ + struct port *port = container_of(tport, struct port, port); + struct nozomi *dc = port->dc; + unsigned long flags; + + DBG1("open: %d", port->token_dl); + spin_lock_irqsave(&dc->spin_mutex, flags); + dc->last_ier = dc->last_ier | port->token_dl; + writew(dc->last_ier, dc->reg_ier); + dc->open_ttys++; + spin_unlock_irqrestore(&dc->spin_mutex, flags); + printk("noz: activated %d: %p\n", tty->index, tport); + return 0; +} + +static int ntty_open(struct tty_struct *tty, struct file *filp) +{ + struct port *port = tty->driver_data; + return tty_port_open(&port->port, tty, filp); +} + +static void ntty_shutdown(struct tty_port *tport) +{ + struct port *port = container_of(tport, struct port, port); + struct nozomi *dc = port->dc; + unsigned long flags; + + DBG1("close: %d", port->token_dl); + spin_lock_irqsave(&dc->spin_mutex, flags); + dc->last_ier &= ~(port->token_dl); + writew(dc->last_ier, dc->reg_ier); + dc->open_ttys--; + spin_unlock_irqrestore(&dc->spin_mutex, flags); + printk("noz: shutdown %p\n", tport); +} + +static void ntty_close(struct tty_struct *tty, struct file *filp) +{ + struct port *port = tty->driver_data; + if (port) + tty_port_close(&port->port, tty, filp); +} + +static void ntty_hangup(struct tty_struct *tty) +{ + struct port *port = tty->driver_data; + tty_port_hangup(&port->port); +} + +/* + * called when the userspace process writes to the tty (/dev/noz*). + * Data is inserted into a fifo, which is then read and transfered to the modem. + */ +static int ntty_write(struct tty_struct *tty, const unsigned char *buffer, + int count) +{ + int rval = -EINVAL; + struct nozomi *dc = get_dc_by_tty(tty); + struct port *port = tty->driver_data; + unsigned long flags; + + /* DBG1( "WRITEx: %d, index = %d", count, index); */ + + if (!dc || !port) + return -ENODEV; + + mutex_lock(&port->tty_sem); + + if (unlikely(!port->port.count)) { + DBG1(" "); + goto exit; + } + + rval = kfifo_in(&port->fifo_ul, (unsigned char *)buffer, count); + + /* notify card */ + if (unlikely(dc == NULL)) { + DBG1("No device context?"); + goto exit; + } + + spin_lock_irqsave(&dc->spin_mutex, flags); + /* CTS is only valid on the modem channel */ + if (port == &(dc->port[PORT_MDM])) { + if (port->ctrl_dl.CTS) { + DBG4("Enable interrupt"); + enable_transmit_ul(tty->index % MAX_PORT, dc); + } else { + dev_err(&dc->pdev->dev, + "CTS not active on modem port?\n"); + } + } else { + enable_transmit_ul(tty->index % MAX_PORT, dc); + } + spin_unlock_irqrestore(&dc->spin_mutex, flags); + +exit: + mutex_unlock(&port->tty_sem); + return rval; +} + +/* + * Calculate how much is left in device + * This method is called by the upper tty layer. + * #according to sources N_TTY.c it expects a value >= 0 and + * does not check for negative values. + * + * If the port is unplugged report lots of room and let the bits + * dribble away so we don't block anything. + */ +static int ntty_write_room(struct tty_struct *tty) +{ + struct port *port = tty->driver_data; + int room = 4096; + const struct nozomi *dc = get_dc_by_tty(tty); + + if (dc) { + mutex_lock(&port->tty_sem); + if (port->port.count) + room = kfifo_avail(&port->fifo_ul); + mutex_unlock(&port->tty_sem); + } + return room; +} + +/* Gets io control parameters */ +static int ntty_tiocmget(struct tty_struct *tty) +{ + const struct port *port = tty->driver_data; + const struct ctrl_dl *ctrl_dl = &port->ctrl_dl; + const struct ctrl_ul *ctrl_ul = &port->ctrl_ul; + + /* Note: these could change under us but it is not clear this + matters if so */ + return (ctrl_ul->RTS ? TIOCM_RTS : 0) | + (ctrl_ul->DTR ? TIOCM_DTR : 0) | + (ctrl_dl->DCD ? TIOCM_CAR : 0) | + (ctrl_dl->RI ? TIOCM_RNG : 0) | + (ctrl_dl->DSR ? TIOCM_DSR : 0) | + (ctrl_dl->CTS ? TIOCM_CTS : 0); +} + +/* Sets io controls parameters */ +static int ntty_tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear) +{ + struct nozomi *dc = get_dc_by_tty(tty); + unsigned long flags; + + spin_lock_irqsave(&dc->spin_mutex, flags); + if (set & TIOCM_RTS) + set_rts(tty, 1); + else if (clear & TIOCM_RTS) + set_rts(tty, 0); + + if (set & TIOCM_DTR) + set_dtr(tty, 1); + else if (clear & TIOCM_DTR) + set_dtr(tty, 0); + spin_unlock_irqrestore(&dc->spin_mutex, flags); + + return 0; +} + +static int ntty_cflags_changed(struct port *port, unsigned long flags, + struct async_icount *cprev) +{ + const struct async_icount cnow = port->tty_icount; + int ret; + + ret = ((flags & TIOCM_RNG) && (cnow.rng != cprev->rng)) || + ((flags & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) || + ((flags & TIOCM_CD) && (cnow.dcd != cprev->dcd)) || + ((flags & TIOCM_CTS) && (cnow.cts != cprev->cts)); + + *cprev = cnow; + + return ret; +} + +static int ntty_tiocgicount(struct tty_struct *tty, + struct serial_icounter_struct *icount) +{ + struct port *port = tty->driver_data; + const struct async_icount cnow = port->tty_icount; + + icount->cts = cnow.cts; + icount->dsr = cnow.dsr; + icount->rng = cnow.rng; + icount->dcd = cnow.dcd; + icount->rx = cnow.rx; + icount->tx = cnow.tx; + icount->frame = cnow.frame; + icount->overrun = cnow.overrun; + icount->parity = cnow.parity; + icount->brk = cnow.brk; + icount->buf_overrun = cnow.buf_overrun; + return 0; +} + +static int ntty_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + struct port *port = tty->driver_data; + int rval = -ENOIOCTLCMD; + + DBG1("******** IOCTL, cmd: %d", cmd); + + switch (cmd) { + case TIOCMIWAIT: { + struct async_icount cprev = port->tty_icount; + + rval = wait_event_interruptible(port->tty_wait, + ntty_cflags_changed(port, arg, &cprev)); + break; + } + default: + DBG1("ERR: 0x%08X, %d", cmd, cmd); + break; + }; + + return rval; +} + +/* + * Called by the upper tty layer when tty buffers are ready + * to receive data again after a call to throttle. + */ +static void ntty_unthrottle(struct tty_struct *tty) +{ + struct nozomi *dc = get_dc_by_tty(tty); + unsigned long flags; + + DBG1("UNTHROTTLE"); + spin_lock_irqsave(&dc->spin_mutex, flags); + enable_transmit_dl(tty->index % MAX_PORT, dc); + set_rts(tty, 1); + + spin_unlock_irqrestore(&dc->spin_mutex, flags); +} + +/* + * Called by the upper tty layer when the tty buffers are almost full. + * The driver should stop send more data. + */ +static void ntty_throttle(struct tty_struct *tty) +{ + struct nozomi *dc = get_dc_by_tty(tty); + unsigned long flags; + + DBG1("THROTTLE"); + spin_lock_irqsave(&dc->spin_mutex, flags); + set_rts(tty, 0); + spin_unlock_irqrestore(&dc->spin_mutex, flags); +} + +/* Returns number of chars in buffer, called by tty layer */ +static s32 ntty_chars_in_buffer(struct tty_struct *tty) +{ + struct port *port = tty->driver_data; + struct nozomi *dc = get_dc_by_tty(tty); + s32 rval = 0; + + if (unlikely(!dc || !port)) { + goto exit_in_buffer; + } + + if (unlikely(!port->port.count)) { + dev_err(&dc->pdev->dev, "No tty open?\n"); + goto exit_in_buffer; + } + + rval = kfifo_len(&port->fifo_ul); + +exit_in_buffer: + return rval; +} + +static const struct tty_port_operations noz_tty_port_ops = { + .activate = ntty_activate, + .shutdown = ntty_shutdown, +}; + +static const struct tty_operations tty_ops = { + .ioctl = ntty_ioctl, + .open = ntty_open, + .close = ntty_close, + .hangup = ntty_hangup, + .write = ntty_write, + .write_room = ntty_write_room, + .unthrottle = ntty_unthrottle, + .throttle = ntty_throttle, + .chars_in_buffer = ntty_chars_in_buffer, + .tiocmget = ntty_tiocmget, + .tiocmset = ntty_tiocmset, + .get_icount = ntty_tiocgicount, + .install = ntty_install, + .cleanup = ntty_cleanup, +}; + +/* Module initialization */ +static struct pci_driver nozomi_driver = { + .name = NOZOMI_NAME, + .id_table = nozomi_pci_tbl, + .probe = nozomi_card_init, + .remove = __devexit_p(nozomi_card_exit), +}; + +static __init int nozomi_init(void) +{ + int ret; + + printk(KERN_INFO "Initializing %s\n", VERSION_STRING); + + ntty_driver = alloc_tty_driver(NTTY_TTY_MAXMINORS); + if (!ntty_driver) + return -ENOMEM; + + ntty_driver->owner = THIS_MODULE; + ntty_driver->driver_name = NOZOMI_NAME_TTY; + ntty_driver->name = "noz"; + ntty_driver->major = 0; + ntty_driver->type = TTY_DRIVER_TYPE_SERIAL; + ntty_driver->subtype = SERIAL_TYPE_NORMAL; + ntty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; + ntty_driver->init_termios = tty_std_termios; + ntty_driver->init_termios.c_cflag = B115200 | CS8 | CREAD | \ + HUPCL | CLOCAL; + ntty_driver->init_termios.c_ispeed = 115200; + ntty_driver->init_termios.c_ospeed = 115200; + tty_set_operations(ntty_driver, &tty_ops); + + ret = tty_register_driver(ntty_driver); + if (ret) { + printk(KERN_ERR "Nozomi: failed to register ntty driver\n"); + goto free_tty; + } + + ret = pci_register_driver(&nozomi_driver); + if (ret) { + printk(KERN_ERR "Nozomi: can't register pci driver\n"); + goto unr_tty; + } + + return 0; +unr_tty: + tty_unregister_driver(ntty_driver); +free_tty: + put_tty_driver(ntty_driver); + return ret; +} + +static __exit void nozomi_exit(void) +{ + printk(KERN_INFO "Unloading %s\n", DRIVER_DESC); + pci_unregister_driver(&nozomi_driver); + tty_unregister_driver(ntty_driver); + put_tty_driver(ntty_driver); +} + +module_init(nozomi_init); +module_exit(nozomi_exit); + +module_param(debug, int, S_IRUGO | S_IWUSR); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 923a4858550..210774726ad 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -23,7 +23,6 @@ #include <linux/major.h> #include <linux/mm.h> #include <linux/init.h> -#include <linux/smp_lock.h> #include <linux/sysctl.h> #include <linux/device.h> #include <linux/uaccess.h> @@ -334,7 +333,7 @@ free_mem_out: return -ENOMEM; } -static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file, +static int pty_bsd_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { switch (cmd) { @@ -489,7 +488,7 @@ static struct ctl_table pty_root_table[] = { }; -static int pty_unix98_ioctl(struct tty_struct *tty, struct file *file, +static int pty_unix98_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { switch (cmd) { diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c new file mode 100644 index 00000000000..3780da8ad12 --- /dev/null +++ b/drivers/tty/rocket.c @@ -0,0 +1,3199 @@ +/* + * RocketPort device driver for Linux + * + * Written by Theodore Ts'o, 1995, 1996, 1997, 1998, 1999, 2000. + * + * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2003 by Comtrol, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* + * Kernel Synchronization: + * + * This driver has 2 kernel control paths - exception handlers (calls into the driver + * from user mode) and the timer bottom half (tasklet). This is a polled driver, interrupts + * are not used. + * + * Critical data: + * - rp_table[], accessed through passed "info" pointers, is a global (static) array of + * serial port state information and the xmit_buf circular buffer. Protected by + * a per port spinlock. + * - xmit_flags[], an array of ints indexed by line (port) number, indicating that there + * is data to be transmitted. Protected by atomic bit operations. + * - rp_num_ports, int indicating number of open ports, protected by atomic operations. + * + * rp_write() and rp_write_char() functions use a per port semaphore to protect against + * simultaneous access to the same port by more than one process. + */ + +/****** Defines ******/ +#define ROCKET_PARANOIA_CHECK +#define ROCKET_DISABLE_SIMUSAGE + +#undef ROCKET_SOFT_FLOW +#undef ROCKET_DEBUG_OPEN +#undef ROCKET_DEBUG_INTR +#undef ROCKET_DEBUG_WRITE +#undef ROCKET_DEBUG_FLOW +#undef ROCKET_DEBUG_THROTTLE +#undef ROCKET_DEBUG_WAIT_UNTIL_SENT +#undef ROCKET_DEBUG_RECEIVE +#undef ROCKET_DEBUG_HANGUP +#undef REV_PCI_ORDER +#undef ROCKET_DEBUG_IO + +#define POLL_PERIOD HZ/100 /* Polling period .01 seconds (10ms) */ + +/****** Kernel includes ******/ + +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/major.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/timer.h> +#include <linux/interrupt.h> +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/tty_flip.h> +#include <linux/serial.h> +#include <linux/string.h> +#include <linux/fcntl.h> +#include <linux/ptrace.h> +#include <linux/mutex.h> +#include <linux/ioport.h> +#include <linux/delay.h> +#include <linux/completion.h> +#include <linux/wait.h> +#include <linux/pci.h> +#include <linux/uaccess.h> +#include <asm/atomic.h> +#include <asm/unaligned.h> +#include <linux/bitops.h> +#include <linux/spinlock.h> +#include <linux/init.h> + +/****** RocketPort includes ******/ + +#include "rocket_int.h" +#include "rocket.h" + +#define ROCKET_VERSION "2.09" +#define ROCKET_DATE "12-June-2003" + +/****** RocketPort Local Variables ******/ + +static void rp_do_poll(unsigned long dummy); + +static struct tty_driver *rocket_driver; + +static struct rocket_version driver_version = { + ROCKET_VERSION, ROCKET_DATE +}; + +static struct r_port *rp_table[MAX_RP_PORTS]; /* The main repository of serial port state information. */ +static unsigned int xmit_flags[NUM_BOARDS]; /* Bit significant, indicates port had data to transmit. */ + /* eg. Bit 0 indicates port 0 has xmit data, ... */ +static atomic_t rp_num_ports_open; /* Number of serial ports open */ +static DEFINE_TIMER(rocket_timer, rp_do_poll, 0, 0); + +static unsigned long board1; /* ISA addresses, retrieved from rocketport.conf */ +static unsigned long board2; +static unsigned long board3; +static unsigned long board4; +static unsigned long controller; +static int support_low_speed; +static unsigned long modem1; +static unsigned long modem2; +static unsigned long modem3; +static unsigned long modem4; +static unsigned long pc104_1[8]; +static unsigned long pc104_2[8]; +static unsigned long pc104_3[8]; +static unsigned long pc104_4[8]; +static unsigned long *pc104[4] = { pc104_1, pc104_2, pc104_3, pc104_4 }; + +static int rp_baud_base[NUM_BOARDS]; /* Board config info (Someday make a per-board structure) */ +static unsigned long rcktpt_io_addr[NUM_BOARDS]; +static int rcktpt_type[NUM_BOARDS]; +static int is_PCI[NUM_BOARDS]; +static rocketModel_t rocketModel[NUM_BOARDS]; +static int max_board; +static const struct tty_port_operations rocket_port_ops; + +/* + * The following arrays define the interrupt bits corresponding to each AIOP. + * These bits are different between the ISA and regular PCI boards and the + * Universal PCI boards. + */ + +static Word_t aiop_intr_bits[AIOP_CTL_SIZE] = { + AIOP_INTR_BIT_0, + AIOP_INTR_BIT_1, + AIOP_INTR_BIT_2, + AIOP_INTR_BIT_3 +}; + +static Word_t upci_aiop_intr_bits[AIOP_CTL_SIZE] = { + UPCI_AIOP_INTR_BIT_0, + UPCI_AIOP_INTR_BIT_1, + UPCI_AIOP_INTR_BIT_2, + UPCI_AIOP_INTR_BIT_3 +}; + +static Byte_t RData[RDATASIZE] = { + 0x00, 0x09, 0xf6, 0x82, + 0x02, 0x09, 0x86, 0xfb, + 0x04, 0x09, 0x00, 0x0a, + 0x06, 0x09, 0x01, 0x0a, + 0x08, 0x09, 0x8a, 0x13, + 0x0a, 0x09, 0xc5, 0x11, + 0x0c, 0x09, 0x86, 0x85, + 0x0e, 0x09, 0x20, 0x0a, + 0x10, 0x09, 0x21, 0x0a, + 0x12, 0x09, 0x41, 0xff, + 0x14, 0x09, 0x82, 0x00, + 0x16, 0x09, 0x82, 0x7b, + 0x18, 0x09, 0x8a, 0x7d, + 0x1a, 0x09, 0x88, 0x81, + 0x1c, 0x09, 0x86, 0x7a, + 0x1e, 0x09, 0x84, 0x81, + 0x20, 0x09, 0x82, 0x7c, + 0x22, 0x09, 0x0a, 0x0a +}; + +static Byte_t RRegData[RREGDATASIZE] = { + 0x00, 0x09, 0xf6, 0x82, /* 00: Stop Rx processor */ + 0x08, 0x09, 0x8a, 0x13, /* 04: Tx software flow control */ + 0x0a, 0x09, 0xc5, 0x11, /* 08: XON char */ + 0x0c, 0x09, 0x86, 0x85, /* 0c: XANY */ + 0x12, 0x09, 0x41, 0xff, /* 10: Rx mask char */ + 0x14, 0x09, 0x82, 0x00, /* 14: Compare/Ignore #0 */ + 0x16, 0x09, 0x82, 0x7b, /* 18: Compare #1 */ + 0x18, 0x09, 0x8a, 0x7d, /* 1c: Compare #2 */ + 0x1a, 0x09, 0x88, 0x81, /* 20: Interrupt #1 */ + 0x1c, 0x09, 0x86, 0x7a, /* 24: Ignore/Replace #1 */ + 0x1e, 0x09, 0x84, 0x81, /* 28: Interrupt #2 */ + 0x20, 0x09, 0x82, 0x7c, /* 2c: Ignore/Replace #2 */ + 0x22, 0x09, 0x0a, 0x0a /* 30: Rx FIFO Enable */ +}; + +static CONTROLLER_T sController[CTL_SIZE] = { + {-1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, + {0, 0, 0, 0}, {-1, -1, -1, -1}, {0, 0, 0, 0}}, + {-1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, + {0, 0, 0, 0}, {-1, -1, -1, -1}, {0, 0, 0, 0}}, + {-1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, + {0, 0, 0, 0}, {-1, -1, -1, -1}, {0, 0, 0, 0}}, + {-1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, + {0, 0, 0, 0}, {-1, -1, -1, -1}, {0, 0, 0, 0}} +}; + +static Byte_t sBitMapClrTbl[8] = { + 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f +}; + +static Byte_t sBitMapSetTbl[8] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 +}; + +static int sClockPrescale = 0x14; + +/* + * Line number is the ttySIx number (x), the Minor number. We + * assign them sequentially, starting at zero. The following + * array keeps track of the line number assigned to a given board/aiop/channel. + */ +static unsigned char lineNumbers[MAX_RP_PORTS]; +static unsigned long nextLineNumber; + +/***** RocketPort Static Prototypes *********/ +static int __init init_ISA(int i); +static void rp_wait_until_sent(struct tty_struct *tty, int timeout); +static void rp_flush_buffer(struct tty_struct *tty); +static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model); +static unsigned char GetLineNumber(int ctrl, int aiop, int ch); +static unsigned char SetLineNumber(int ctrl, int aiop, int ch); +static void rp_start(struct tty_struct *tty); +static int sInitChan(CONTROLLER_T * CtlP, CHANNEL_T * ChP, int AiopNum, + int ChanNum); +static void sSetInterfaceMode(CHANNEL_T * ChP, Byte_t mode); +static void sFlushRxFIFO(CHANNEL_T * ChP); +static void sFlushTxFIFO(CHANNEL_T * ChP); +static void sEnInterrupts(CHANNEL_T * ChP, Word_t Flags); +static void sDisInterrupts(CHANNEL_T * ChP, Word_t Flags); +static void sModemReset(CONTROLLER_T * CtlP, int chan, int on); +static void sPCIModemReset(CONTROLLER_T * CtlP, int chan, int on); +static int sWriteTxPrioByte(CHANNEL_T * ChP, Byte_t Data); +static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum, + ByteIO_t * AiopIOList, int AiopIOListSize, + WordIO_t ConfigIO, int IRQNum, Byte_t Frequency, + int PeriodicOnly, int altChanRingIndicator, + int UPCIRingInd); +static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO, + ByteIO_t * AiopIOList, int AiopIOListSize, + int IRQNum, Byte_t Frequency, int PeriodicOnly); +static int sReadAiopID(ByteIO_t io); +static int sReadAiopNumChan(WordIO_t io); + +MODULE_AUTHOR("Theodore Ts'o"); +MODULE_DESCRIPTION("Comtrol RocketPort driver"); +module_param(board1, ulong, 0); +MODULE_PARM_DESC(board1, "I/O port for (ISA) board #1"); +module_param(board2, ulong, 0); +MODULE_PARM_DESC(board2, "I/O port for (ISA) board #2"); +module_param(board3, ulong, 0); +MODULE_PARM_DESC(board3, "I/O port for (ISA) board #3"); +module_param(board4, ulong, 0); +MODULE_PARM_DESC(board4, "I/O port for (ISA) board #4"); +module_param(controller, ulong, 0); +MODULE_PARM_DESC(controller, "I/O port for (ISA) rocketport controller"); +module_param(support_low_speed, bool, 0); +MODULE_PARM_DESC(support_low_speed, "1 means support 50 baud, 0 means support 460400 baud"); +module_param(modem1, ulong, 0); +MODULE_PARM_DESC(modem1, "1 means (ISA) board #1 is a RocketModem"); +module_param(modem2, ulong, 0); +MODULE_PARM_DESC(modem2, "1 means (ISA) board #2 is a RocketModem"); +module_param(modem3, ulong, 0); +MODULE_PARM_DESC(modem3, "1 means (ISA) board #3 is a RocketModem"); +module_param(modem4, ulong, 0); +MODULE_PARM_DESC(modem4, "1 means (ISA) board #4 is a RocketModem"); +module_param_array(pc104_1, ulong, NULL, 0); +MODULE_PARM_DESC(pc104_1, "set interface types for ISA(PC104) board #1 (e.g. pc104_1=232,232,485,485,..."); +module_param_array(pc104_2, ulong, NULL, 0); +MODULE_PARM_DESC(pc104_2, "set interface types for ISA(PC104) board #2 (e.g. pc104_2=232,232,485,485,..."); +module_param_array(pc104_3, ulong, NULL, 0); +MODULE_PARM_DESC(pc104_3, "set interface types for ISA(PC104) board #3 (e.g. pc104_3=232,232,485,485,..."); +module_param_array(pc104_4, ulong, NULL, 0); +MODULE_PARM_DESC(pc104_4, "set interface types for ISA(PC104) board #4 (e.g. pc104_4=232,232,485,485,..."); + +static int rp_init(void); +static void rp_cleanup_module(void); + +module_init(rp_init); +module_exit(rp_cleanup_module); + + +MODULE_LICENSE("Dual BSD/GPL"); + +/*************************************************************************/ +/* Module code starts here */ + +static inline int rocket_paranoia_check(struct r_port *info, + const char *routine) +{ +#ifdef ROCKET_PARANOIA_CHECK + if (!info) + return 1; + if (info->magic != RPORT_MAGIC) { + printk(KERN_WARNING "Warning: bad magic number for rocketport " + "struct in %s\n", routine); + return 1; + } +#endif + return 0; +} + + +/* Serial port receive data function. Called (from timer poll) when an AIOPIC signals + * that receive data is present on a serial port. Pulls data from FIFO, moves it into the + * tty layer. + */ +static void rp_do_receive(struct r_port *info, + struct tty_struct *tty, + CHANNEL_t * cp, unsigned int ChanStatus) +{ + unsigned int CharNStat; + int ToRecv, wRecv, space; + unsigned char *cbuf; + + ToRecv = sGetRxCnt(cp); +#ifdef ROCKET_DEBUG_INTR + printk(KERN_INFO "rp_do_receive(%d)...\n", ToRecv); +#endif + if (ToRecv == 0) + return; + + /* + * if status indicates there are errored characters in the + * FIFO, then enter status mode (a word in FIFO holds + * character and status). + */ + if (ChanStatus & (RXFOVERFL | RXBREAK | RXFRAME | RXPARITY)) { + if (!(ChanStatus & STATMODE)) { +#ifdef ROCKET_DEBUG_RECEIVE + printk(KERN_INFO "Entering STATMODE...\n"); +#endif + ChanStatus |= STATMODE; + sEnRxStatusMode(cp); + } + } + + /* + * if we previously entered status mode, then read down the + * FIFO one word at a time, pulling apart the character and + * the status. Update error counters depending on status + */ + if (ChanStatus & STATMODE) { +#ifdef ROCKET_DEBUG_RECEIVE + printk(KERN_INFO "Ignore %x, read %x...\n", + info->ignore_status_mask, info->read_status_mask); +#endif + while (ToRecv) { + char flag; + + CharNStat = sInW(sGetTxRxDataIO(cp)); +#ifdef ROCKET_DEBUG_RECEIVE + printk(KERN_INFO "%x...\n", CharNStat); +#endif + if (CharNStat & STMBREAKH) + CharNStat &= ~(STMFRAMEH | STMPARITYH); + if (CharNStat & info->ignore_status_mask) { + ToRecv--; + continue; + } + CharNStat &= info->read_status_mask; + if (CharNStat & STMBREAKH) + flag = TTY_BREAK; + else if (CharNStat & STMPARITYH) + flag = TTY_PARITY; + else if (CharNStat & STMFRAMEH) + flag = TTY_FRAME; + else if (CharNStat & STMRCVROVRH) + flag = TTY_OVERRUN; + else + flag = TTY_NORMAL; + tty_insert_flip_char(tty, CharNStat & 0xff, flag); + ToRecv--; + } + + /* + * after we've emptied the FIFO in status mode, turn + * status mode back off + */ + if (sGetRxCnt(cp) == 0) { +#ifdef ROCKET_DEBUG_RECEIVE + printk(KERN_INFO "Status mode off.\n"); +#endif + sDisRxStatusMode(cp); + } + } else { + /* + * we aren't in status mode, so read down the FIFO two + * characters at time by doing repeated word IO + * transfer. + */ + space = tty_prepare_flip_string(tty, &cbuf, ToRecv); + if (space < ToRecv) { +#ifdef ROCKET_DEBUG_RECEIVE + printk(KERN_INFO "rp_do_receive:insufficient space ToRecv=%d space=%d\n", ToRecv, space); +#endif + if (space <= 0) + return; + ToRecv = space; + } + wRecv = ToRecv >> 1; + if (wRecv) + sInStrW(sGetTxRxDataIO(cp), (unsigned short *) cbuf, wRecv); + if (ToRecv & 1) + cbuf[ToRecv - 1] = sInB(sGetTxRxDataIO(cp)); + } + /* Push the data up to the tty layer */ + tty_flip_buffer_push(tty); +} + +/* + * Serial port transmit data function. Called from the timer polling loop as a + * result of a bit set in xmit_flags[], indicating data (from the tty layer) is ready + * to be sent out the serial port. Data is buffered in rp_table[line].xmit_buf, it is + * moved to the port's xmit FIFO. *info is critical data, protected by spinlocks. + */ +static void rp_do_transmit(struct r_port *info) +{ + int c; + CHANNEL_t *cp = &info->channel; + struct tty_struct *tty; + unsigned long flags; + +#ifdef ROCKET_DEBUG_INTR + printk(KERN_DEBUG "%s\n", __func__); +#endif + if (!info) + return; + tty = tty_port_tty_get(&info->port); + + if (tty == NULL) { + printk(KERN_WARNING "rp: WARNING %s called with tty==NULL\n", __func__); + clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]); + return; + } + + spin_lock_irqsave(&info->slock, flags); + info->xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp); + + /* Loop sending data to FIFO until done or FIFO full */ + while (1) { + if (tty->stopped || tty->hw_stopped) + break; + c = min(info->xmit_fifo_room, info->xmit_cnt); + c = min(c, XMIT_BUF_SIZE - info->xmit_tail); + if (c <= 0 || info->xmit_fifo_room <= 0) + break; + sOutStrW(sGetTxRxDataIO(cp), (unsigned short *) (info->xmit_buf + info->xmit_tail), c / 2); + if (c & 1) + sOutB(sGetTxRxDataIO(cp), info->xmit_buf[info->xmit_tail + c - 1]); + info->xmit_tail += c; + info->xmit_tail &= XMIT_BUF_SIZE - 1; + info->xmit_cnt -= c; + info->xmit_fifo_room -= c; +#ifdef ROCKET_DEBUG_INTR + printk(KERN_INFO "tx %d chars...\n", c); +#endif + } + + if (info->xmit_cnt == 0) + clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]); + + if (info->xmit_cnt < WAKEUP_CHARS) { + tty_wakeup(tty); +#ifdef ROCKETPORT_HAVE_POLL_WAIT + wake_up_interruptible(&tty->poll_wait); +#endif + } + + spin_unlock_irqrestore(&info->slock, flags); + tty_kref_put(tty); + +#ifdef ROCKET_DEBUG_INTR + printk(KERN_DEBUG "(%d,%d,%d,%d)...\n", info->xmit_cnt, info->xmit_head, + info->xmit_tail, info->xmit_fifo_room); +#endif +} + +/* + * Called when a serial port signals it has read data in it's RX FIFO. + * It checks what interrupts are pending and services them, including + * receiving serial data. + */ +static void rp_handle_port(struct r_port *info) +{ + CHANNEL_t *cp; + struct tty_struct *tty; + unsigned int IntMask, ChanStatus; + + if (!info) + return; + + if ((info->port.flags & ASYNC_INITIALIZED) == 0) { + printk(KERN_WARNING "rp: WARNING: rp_handle_port called with " + "info->flags & NOT_INIT\n"); + return; + } + tty = tty_port_tty_get(&info->port); + if (!tty) { + printk(KERN_WARNING "rp: WARNING: rp_handle_port called with " + "tty==NULL\n"); + return; + } + cp = &info->channel; + + IntMask = sGetChanIntID(cp) & info->intmask; +#ifdef ROCKET_DEBUG_INTR + printk(KERN_INFO "rp_interrupt %02x...\n", IntMask); +#endif + ChanStatus = sGetChanStatus(cp); + if (IntMask & RXF_TRIG) { /* Rx FIFO trigger level */ + rp_do_receive(info, tty, cp, ChanStatus); + } + if (IntMask & DELTA_CD) { /* CD change */ +#if (defined(ROCKET_DEBUG_OPEN) || defined(ROCKET_DEBUG_INTR) || defined(ROCKET_DEBUG_HANGUP)) + printk(KERN_INFO "ttyR%d CD now %s...\n", info->line, + (ChanStatus & CD_ACT) ? "on" : "off"); +#endif + if (!(ChanStatus & CD_ACT) && info->cd_status) { +#ifdef ROCKET_DEBUG_HANGUP + printk(KERN_INFO "CD drop, calling hangup.\n"); +#endif + tty_hangup(tty); + } + info->cd_status = (ChanStatus & CD_ACT) ? 1 : 0; + wake_up_interruptible(&info->port.open_wait); + } +#ifdef ROCKET_DEBUG_INTR + if (IntMask & DELTA_CTS) { /* CTS change */ + printk(KERN_INFO "CTS change...\n"); + } + if (IntMask & DELTA_DSR) { /* DSR change */ + printk(KERN_INFO "DSR change...\n"); + } +#endif + tty_kref_put(tty); +} + +/* + * The top level polling routine. Repeats every 1/100 HZ (10ms). + */ +static void rp_do_poll(unsigned long dummy) +{ + CONTROLLER_t *ctlp; + int ctrl, aiop, ch, line; + unsigned int xmitmask, i; + unsigned int CtlMask; + unsigned char AiopMask; + Word_t bit; + + /* Walk through all the boards (ctrl's) */ + for (ctrl = 0; ctrl < max_board; ctrl++) { + if (rcktpt_io_addr[ctrl] <= 0) + continue; + + /* Get a ptr to the board's control struct */ + ctlp = sCtlNumToCtlPtr(ctrl); + + /* Get the interrupt status from the board */ +#ifdef CONFIG_PCI + if (ctlp->BusType == isPCI) + CtlMask = sPCIGetControllerIntStatus(ctlp); + else +#endif + CtlMask = sGetControllerIntStatus(ctlp); + + /* Check if any AIOP read bits are set */ + for (aiop = 0; CtlMask; aiop++) { + bit = ctlp->AiopIntrBits[aiop]; + if (CtlMask & bit) { + CtlMask &= ~bit; + AiopMask = sGetAiopIntStatus(ctlp, aiop); + + /* Check if any port read bits are set */ + for (ch = 0; AiopMask; AiopMask >>= 1, ch++) { + if (AiopMask & 1) { + + /* Get the line number (/dev/ttyRx number). */ + /* Read the data from the port. */ + line = GetLineNumber(ctrl, aiop, ch); + rp_handle_port(rp_table[line]); + } + } + } + } + + xmitmask = xmit_flags[ctrl]; + + /* + * xmit_flags contains bit-significant flags, indicating there is data + * to xmit on the port. Bit 0 is port 0 on this board, bit 1 is port + * 1, ... (32 total possible). The variable i has the aiop and ch + * numbers encoded in it (port 0-7 are aiop0, 8-15 are aiop1, etc). + */ + if (xmitmask) { + for (i = 0; i < rocketModel[ctrl].numPorts; i++) { + if (xmitmask & (1 << i)) { + aiop = (i & 0x18) >> 3; + ch = i & 0x07; + line = GetLineNumber(ctrl, aiop, ch); + rp_do_transmit(rp_table[line]); + } + } + } + } + + /* + * Reset the timer so we get called at the next clock tick (10ms). + */ + if (atomic_read(&rp_num_ports_open)) + mod_timer(&rocket_timer, jiffies + POLL_PERIOD); +} + +/* + * Initializes the r_port structure for a port, as well as enabling the port on + * the board. + * Inputs: board, aiop, chan numbers + */ +static void init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev) +{ + unsigned rocketMode; + struct r_port *info; + int line; + CONTROLLER_T *ctlp; + + /* Get the next available line number */ + line = SetLineNumber(board, aiop, chan); + + ctlp = sCtlNumToCtlPtr(board); + + /* Get a r_port struct for the port, fill it in and save it globally, indexed by line number */ + info = kzalloc(sizeof (struct r_port), GFP_KERNEL); + if (!info) { + printk(KERN_ERR "Couldn't allocate info struct for line #%d\n", + line); + return; + } + + info->magic = RPORT_MAGIC; + info->line = line; + info->ctlp = ctlp; + info->board = board; + info->aiop = aiop; + info->chan = chan; + tty_port_init(&info->port); + info->port.ops = &rocket_port_ops; + init_completion(&info->close_wait); + info->flags &= ~ROCKET_MODE_MASK; + switch (pc104[board][line]) { + case 422: + info->flags |= ROCKET_MODE_RS422; + break; + case 485: + info->flags |= ROCKET_MODE_RS485; + break; + case 232: + default: + info->flags |= ROCKET_MODE_RS232; + break; + } + + info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR; + if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) { + printk(KERN_ERR "RocketPort sInitChan(%d, %d, %d) failed!\n", + board, aiop, chan); + kfree(info); + return; + } + + rocketMode = info->flags & ROCKET_MODE_MASK; + + if ((info->flags & ROCKET_RTS_TOGGLE) || (rocketMode == ROCKET_MODE_RS485)) + sEnRTSToggle(&info->channel); + else + sDisRTSToggle(&info->channel); + + if (ctlp->boardType == ROCKET_TYPE_PC104) { + switch (rocketMode) { + case ROCKET_MODE_RS485: + sSetInterfaceMode(&info->channel, InterfaceModeRS485); + break; + case ROCKET_MODE_RS422: + sSetInterfaceMode(&info->channel, InterfaceModeRS422); + break; + case ROCKET_MODE_RS232: + default: + if (info->flags & ROCKET_RTS_TOGGLE) + sSetInterfaceMode(&info->channel, InterfaceModeRS232T); + else + sSetInterfaceMode(&info->channel, InterfaceModeRS232); + break; + } + } + spin_lock_init(&info->slock); + mutex_init(&info->write_mtx); + rp_table[line] = info; + tty_register_device(rocket_driver, line, pci_dev ? &pci_dev->dev : + NULL); +} + +/* + * Configures a rocketport port according to its termio settings. Called from + * user mode into the driver (exception handler). *info CD manipulation is spinlock protected. + */ +static void configure_r_port(struct tty_struct *tty, struct r_port *info, + struct ktermios *old_termios) +{ + unsigned cflag; + unsigned long flags; + unsigned rocketMode; + int bits, baud, divisor; + CHANNEL_t *cp; + struct ktermios *t = tty->termios; + + cp = &info->channel; + cflag = t->c_cflag; + + /* Byte size and parity */ + if ((cflag & CSIZE) == CS8) { + sSetData8(cp); + bits = 10; + } else { + sSetData7(cp); + bits = 9; + } + if (cflag & CSTOPB) { + sSetStop2(cp); + bits++; + } else { + sSetStop1(cp); + } + + if (cflag & PARENB) { + sEnParity(cp); + bits++; + if (cflag & PARODD) { + sSetOddParity(cp); + } else { + sSetEvenParity(cp); + } + } else { + sDisParity(cp); + } + + /* baud rate */ + baud = tty_get_baud_rate(tty); + if (!baud) + baud = 9600; + divisor = ((rp_baud_base[info->board] + (baud >> 1)) / baud) - 1; + if ((divisor >= 8192 || divisor < 0) && old_termios) { + baud = tty_termios_baud_rate(old_termios); + if (!baud) + baud = 9600; + divisor = (rp_baud_base[info->board] / baud) - 1; + } + if (divisor >= 8192 || divisor < 0) { + baud = 9600; + divisor = (rp_baud_base[info->board] / baud) - 1; + } + info->cps = baud / bits; + sSetBaud(cp, divisor); + + /* FIXME: Should really back compute a baud rate from the divisor */ + tty_encode_baud_rate(tty, baud, baud); + + if (cflag & CRTSCTS) { + info->intmask |= DELTA_CTS; + sEnCTSFlowCtl(cp); + } else { + info->intmask &= ~DELTA_CTS; + sDisCTSFlowCtl(cp); + } + if (cflag & CLOCAL) { + info->intmask &= ~DELTA_CD; + } else { + spin_lock_irqsave(&info->slock, flags); + if (sGetChanStatus(cp) & CD_ACT) + info->cd_status = 1; + else + info->cd_status = 0; + info->intmask |= DELTA_CD; + spin_unlock_irqrestore(&info->slock, flags); + } + + /* + * Handle software flow control in the board + */ +#ifdef ROCKET_SOFT_FLOW + if (I_IXON(tty)) { + sEnTxSoftFlowCtl(cp); + if (I_IXANY(tty)) { + sEnIXANY(cp); + } else { + sDisIXANY(cp); + } + sSetTxXONChar(cp, START_CHAR(tty)); + sSetTxXOFFChar(cp, STOP_CHAR(tty)); + } else { + sDisTxSoftFlowCtl(cp); + sDisIXANY(cp); + sClrTxXOFF(cp); + } +#endif + + /* + * Set up ignore/read mask words + */ + info->read_status_mask = STMRCVROVRH | 0xFF; + if (I_INPCK(tty)) + info->read_status_mask |= STMFRAMEH | STMPARITYH; + if (I_BRKINT(tty) || I_PARMRK(tty)) + info->read_status_mask |= STMBREAKH; + + /* + * Characters to ignore + */ + info->ignore_status_mask = 0; + if (I_IGNPAR(tty)) + info->ignore_status_mask |= STMFRAMEH | STMPARITYH; + if (I_IGNBRK(tty)) { + info->ignore_status_mask |= STMBREAKH; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too. (For real raw support). + */ + if (I_IGNPAR(tty)) + info->ignore_status_mask |= STMRCVROVRH; + } + + rocketMode = info->flags & ROCKET_MODE_MASK; + + if ((info->flags & ROCKET_RTS_TOGGLE) + || (rocketMode == ROCKET_MODE_RS485)) + sEnRTSToggle(cp); + else + sDisRTSToggle(cp); + + sSetRTS(&info->channel); + + if (cp->CtlP->boardType == ROCKET_TYPE_PC104) { + switch (rocketMode) { + case ROCKET_MODE_RS485: + sSetInterfaceMode(cp, InterfaceModeRS485); + break; + case ROCKET_MODE_RS422: + sSetInterfaceMode(cp, InterfaceModeRS422); + break; + case ROCKET_MODE_RS232: + default: + if (info->flags & ROCKET_RTS_TOGGLE) + sSetInterfaceMode(cp, InterfaceModeRS232T); + else + sSetInterfaceMode(cp, InterfaceModeRS232); + break; + } + } +} + +static int carrier_raised(struct tty_port *port) +{ + struct r_port *info = container_of(port, struct r_port, port); + return (sGetChanStatusLo(&info->channel) & CD_ACT) ? 1 : 0; +} + +static void dtr_rts(struct tty_port *port, int on) +{ + struct r_port *info = container_of(port, struct r_port, port); + if (on) { + sSetDTR(&info->channel); + sSetRTS(&info->channel); + } else { + sClrDTR(&info->channel); + sClrRTS(&info->channel); + } +} + +/* + * Exception handler that opens a serial port. Creates xmit_buf storage, fills in + * port's r_port struct. Initializes the port hardware. + */ +static int rp_open(struct tty_struct *tty, struct file *filp) +{ + struct r_port *info; + struct tty_port *port; + int line = 0, retval; + CHANNEL_t *cp; + unsigned long page; + + line = tty->index; + if (line < 0 || line >= MAX_RP_PORTS || ((info = rp_table[line]) == NULL)) + return -ENXIO; + port = &info->port; + + page = __get_free_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + if (port->flags & ASYNC_CLOSING) { + retval = wait_for_completion_interruptible(&info->close_wait); + free_page(page); + if (retval) + return retval; + return ((port->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); + } + + /* + * We must not sleep from here until the port is marked fully in use. + */ + if (info->xmit_buf) + free_page(page); + else + info->xmit_buf = (unsigned char *) page; + + tty->driver_data = info; + tty_port_tty_set(port, tty); + + if (port->count++ == 0) { + atomic_inc(&rp_num_ports_open); + +#ifdef ROCKET_DEBUG_OPEN + printk(KERN_INFO "rocket mod++ = %d...\n", + atomic_read(&rp_num_ports_open)); +#endif + } +#ifdef ROCKET_DEBUG_OPEN + printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count); +#endif + + /* + * Info->count is now 1; so it's safe to sleep now. + */ + if (!test_bit(ASYNCB_INITIALIZED, &port->flags)) { + cp = &info->channel; + sSetRxTrigger(cp, TRIG_1); + if (sGetChanStatus(cp) & CD_ACT) + info->cd_status = 1; + else + info->cd_status = 0; + sDisRxStatusMode(cp); + sFlushRxFIFO(cp); + sFlushTxFIFO(cp); + + sEnInterrupts(cp, (TXINT_EN | MCINT_EN | RXINT_EN | SRCINT_EN | CHANINT_EN)); + sSetRxTrigger(cp, TRIG_1); + + sGetChanStatus(cp); + sDisRxStatusMode(cp); + sClrTxXOFF(cp); + + sDisCTSFlowCtl(cp); + sDisTxSoftFlowCtl(cp); + + sEnRxFIFO(cp); + sEnTransmit(cp); + + set_bit(ASYNCB_INITIALIZED, &info->port.flags); + + /* + * Set up the tty->alt_speed kludge + */ + if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_HI) + tty->alt_speed = 57600; + if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_VHI) + tty->alt_speed = 115200; + if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_SHI) + tty->alt_speed = 230400; + if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_WARP) + tty->alt_speed = 460800; + + configure_r_port(tty, info, NULL); + if (tty->termios->c_cflag & CBAUD) { + sSetDTR(cp); + sSetRTS(cp); + } + } + /* Starts (or resets) the maint polling loop */ + mod_timer(&rocket_timer, jiffies + POLL_PERIOD); + + retval = tty_port_block_til_ready(port, tty, filp); + if (retval) { +#ifdef ROCKET_DEBUG_OPEN + printk(KERN_INFO "rp_open returning after block_til_ready with %d\n", retval); +#endif + return retval; + } + return 0; +} + +/* + * Exception handler that closes a serial port. info->port.count is considered critical. + */ +static void rp_close(struct tty_struct *tty, struct file *filp) +{ + struct r_port *info = tty->driver_data; + struct tty_port *port = &info->port; + int timeout; + CHANNEL_t *cp; + + if (rocket_paranoia_check(info, "rp_close")) + return; + +#ifdef ROCKET_DEBUG_OPEN + printk(KERN_INFO "rp_close ttyR%d, count = %d\n", info->line, info->port.count); +#endif + + if (tty_port_close_start(port, tty, filp) == 0) + return; + + mutex_lock(&port->mutex); + cp = &info->channel; + /* + * Before we drop DTR, make sure the UART transmitter + * has completely drained; this is especially + * important if there is a transmit FIFO! + */ + timeout = (sGetTxCnt(cp) + 1) * HZ / info->cps; + if (timeout == 0) + timeout = 1; + rp_wait_until_sent(tty, timeout); + clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]); + + sDisTransmit(cp); + sDisInterrupts(cp, (TXINT_EN | MCINT_EN | RXINT_EN | SRCINT_EN | CHANINT_EN)); + sDisCTSFlowCtl(cp); + sDisTxSoftFlowCtl(cp); + sClrTxXOFF(cp); + sFlushRxFIFO(cp); + sFlushTxFIFO(cp); + sClrRTS(cp); + if (C_HUPCL(tty)) + sClrDTR(cp); + + rp_flush_buffer(tty); + + tty_ldisc_flush(tty); + + clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]); + + /* We can't yet use tty_port_close_end as the buffer handling in this + driver is a bit different to the usual */ + + if (port->blocked_open) { + if (port->close_delay) { + msleep_interruptible(jiffies_to_msecs(port->close_delay)); + } + wake_up_interruptible(&port->open_wait); + } else { + if (info->xmit_buf) { + free_page((unsigned long) info->xmit_buf); + info->xmit_buf = NULL; + } + } + spin_lock_irq(&port->lock); + info->port.flags &= ~(ASYNC_INITIALIZED | ASYNC_CLOSING | ASYNC_NORMAL_ACTIVE); + tty->closing = 0; + spin_unlock_irq(&port->lock); + mutex_unlock(&port->mutex); + tty_port_tty_set(port, NULL); + + wake_up_interruptible(&port->close_wait); + complete_all(&info->close_wait); + atomic_dec(&rp_num_ports_open); + +#ifdef ROCKET_DEBUG_OPEN + printk(KERN_INFO "rocket mod-- = %d...\n", + atomic_read(&rp_num_ports_open)); + printk(KERN_INFO "rp_close ttyR%d complete shutdown\n", info->line); +#endif + +} + +static void rp_set_termios(struct tty_struct *tty, + struct ktermios *old_termios) +{ + struct r_port *info = tty->driver_data; + CHANNEL_t *cp; + unsigned cflag; + + if (rocket_paranoia_check(info, "rp_set_termios")) + return; + + cflag = tty->termios->c_cflag; + + /* + * This driver doesn't support CS5 or CS6 + */ + if (((cflag & CSIZE) == CS5) || ((cflag & CSIZE) == CS6)) + tty->termios->c_cflag = + ((cflag & ~CSIZE) | (old_termios->c_cflag & CSIZE)); + /* Or CMSPAR */ + tty->termios->c_cflag &= ~CMSPAR; + + configure_r_port(tty, info, old_termios); + + cp = &info->channel; + + /* Handle transition to B0 status */ + if ((old_termios->c_cflag & CBAUD) && !(tty->termios->c_cflag & CBAUD)) { + sClrDTR(cp); + sClrRTS(cp); + } + + /* Handle transition away from B0 status */ + if (!(old_termios->c_cflag & CBAUD) && (tty->termios->c_cflag & CBAUD)) { + if (!tty->hw_stopped || !(tty->termios->c_cflag & CRTSCTS)) + sSetRTS(cp); + sSetDTR(cp); + } + + if ((old_termios->c_cflag & CRTSCTS) && !(tty->termios->c_cflag & CRTSCTS)) { + tty->hw_stopped = 0; + rp_start(tty); + } +} + +static int rp_break(struct tty_struct *tty, int break_state) +{ + struct r_port *info = tty->driver_data; + unsigned long flags; + + if (rocket_paranoia_check(info, "rp_break")) + return -EINVAL; + + spin_lock_irqsave(&info->slock, flags); + if (break_state == -1) + sSendBreak(&info->channel); + else + sClrBreak(&info->channel); + spin_unlock_irqrestore(&info->slock, flags); + return 0; +} + +/* + * sGetChanRI used to be a macro in rocket_int.h. When the functionality for + * the UPCI boards was added, it was decided to make this a function because + * the macro was getting too complicated. All cases except the first one + * (UPCIRingInd) are taken directly from the original macro. + */ +static int sGetChanRI(CHANNEL_T * ChP) +{ + CONTROLLER_t *CtlP = ChP->CtlP; + int ChanNum = ChP->ChanNum; + int RingInd = 0; + + if (CtlP->UPCIRingInd) + RingInd = !(sInB(CtlP->UPCIRingInd) & sBitMapSetTbl[ChanNum]); + else if (CtlP->AltChanRingIndicator) + RingInd = sInB((ByteIO_t) (ChP->ChanStat + 8)) & DSR_ACT; + else if (CtlP->boardType == ROCKET_TYPE_PC104) + RingInd = !(sInB(CtlP->AiopIO[3]) & sBitMapSetTbl[ChanNum]); + + return RingInd; +} + +/********************************************************************************************/ +/* Here are the routines used by rp_ioctl. These are all called from exception handlers. */ + +/* + * Returns the state of the serial modem control lines. These next 2 functions + * are the way kernel versions > 2.5 handle modem control lines rather than IOCTLs. + */ +static int rp_tiocmget(struct tty_struct *tty) +{ + struct r_port *info = tty->driver_data; + unsigned int control, result, ChanStatus; + + ChanStatus = sGetChanStatusLo(&info->channel); + control = info->channel.TxControl[3]; + result = ((control & SET_RTS) ? TIOCM_RTS : 0) | + ((control & SET_DTR) ? TIOCM_DTR : 0) | + ((ChanStatus & CD_ACT) ? TIOCM_CAR : 0) | + (sGetChanRI(&info->channel) ? TIOCM_RNG : 0) | + ((ChanStatus & DSR_ACT) ? TIOCM_DSR : 0) | + ((ChanStatus & CTS_ACT) ? TIOCM_CTS : 0); + + return result; +} + +/* + * Sets the modem control lines + */ +static int rp_tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear) +{ + struct r_port *info = tty->driver_data; + + if (set & TIOCM_RTS) + info->channel.TxControl[3] |= SET_RTS; + if (set & TIOCM_DTR) + info->channel.TxControl[3] |= SET_DTR; + if (clear & TIOCM_RTS) + info->channel.TxControl[3] &= ~SET_RTS; + if (clear & TIOCM_DTR) + info->channel.TxControl[3] &= ~SET_DTR; + + out32(info->channel.IndexAddr, info->channel.TxControl); + return 0; +} + +static int get_config(struct r_port *info, struct rocket_config __user *retinfo) +{ + struct rocket_config tmp; + + if (!retinfo) + return -EFAULT; + memset(&tmp, 0, sizeof (tmp)); + mutex_lock(&info->port.mutex); + tmp.line = info->line; + tmp.flags = info->flags; + tmp.close_delay = info->port.close_delay; + tmp.closing_wait = info->port.closing_wait; + tmp.port = rcktpt_io_addr[(info->line >> 5) & 3]; + mutex_unlock(&info->port.mutex); + + if (copy_to_user(retinfo, &tmp, sizeof (*retinfo))) + return -EFAULT; + return 0; +} + +static int set_config(struct tty_struct *tty, struct r_port *info, + struct rocket_config __user *new_info) +{ + struct rocket_config new_serial; + + if (copy_from_user(&new_serial, new_info, sizeof (new_serial))) + return -EFAULT; + + mutex_lock(&info->port.mutex); + if (!capable(CAP_SYS_ADMIN)) + { + if ((new_serial.flags & ~ROCKET_USR_MASK) != (info->flags & ~ROCKET_USR_MASK)) { + mutex_unlock(&info->port.mutex); + return -EPERM; + } + info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK)); + configure_r_port(tty, info, NULL); + mutex_unlock(&info->port.mutex); + return 0; + } + + info->flags = ((info->flags & ~ROCKET_FLAGS) | (new_serial.flags & ROCKET_FLAGS)); + info->port.close_delay = new_serial.close_delay; + info->port.closing_wait = new_serial.closing_wait; + + if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_HI) + tty->alt_speed = 57600; + if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_VHI) + tty->alt_speed = 115200; + if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_SHI) + tty->alt_speed = 230400; + if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_WARP) + tty->alt_speed = 460800; + mutex_unlock(&info->port.mutex); + + configure_r_port(tty, info, NULL); + return 0; +} + +/* + * This function fills in a rocket_ports struct with information + * about what boards/ports are in the system. This info is passed + * to user space. See setrocket.c where the info is used to create + * the /dev/ttyRx ports. + */ +static int get_ports(struct r_port *info, struct rocket_ports __user *retports) +{ + struct rocket_ports tmp; + int board; + + if (!retports) + return -EFAULT; + memset(&tmp, 0, sizeof (tmp)); + tmp.tty_major = rocket_driver->major; + + for (board = 0; board < 4; board++) { + tmp.rocketModel[board].model = rocketModel[board].model; + strcpy(tmp.rocketModel[board].modelString, rocketModel[board].modelString); + tmp.rocketModel[board].numPorts = rocketModel[board].numPorts; + tmp.rocketModel[board].loadrm2 = rocketModel[board].loadrm2; + tmp.rocketModel[board].startingPortNumber = rocketModel[board].startingPortNumber; + } + if (copy_to_user(retports, &tmp, sizeof (*retports))) + return -EFAULT; + return 0; +} + +static int reset_rm2(struct r_port *info, void __user *arg) +{ + int reset; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&reset, arg, sizeof (int))) + return -EFAULT; + if (reset) + reset = 1; + + if (rcktpt_type[info->board] != ROCKET_TYPE_MODEMII && + rcktpt_type[info->board] != ROCKET_TYPE_MODEMIII) + return -EINVAL; + + if (info->ctlp->BusType == isISA) + sModemReset(info->ctlp, info->chan, reset); + else + sPCIModemReset(info->ctlp, info->chan, reset); + + return 0; +} + +static int get_version(struct r_port *info, struct rocket_version __user *retvers) +{ + if (copy_to_user(retvers, &driver_version, sizeof (*retvers))) + return -EFAULT; + return 0; +} + +/* IOCTL call handler into the driver */ +static int rp_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + struct r_port *info = tty->driver_data; + void __user *argp = (void __user *)arg; + int ret = 0; + + if (cmd != RCKP_GET_PORTS && rocket_paranoia_check(info, "rp_ioctl")) + return -ENXIO; + + switch (cmd) { + case RCKP_GET_STRUCT: + if (copy_to_user(argp, info, sizeof (struct r_port))) + ret = -EFAULT; + break; + case RCKP_GET_CONFIG: + ret = get_config(info, argp); + break; + case RCKP_SET_CONFIG: + ret = set_config(tty, info, argp); + break; + case RCKP_GET_PORTS: + ret = get_ports(info, argp); + break; + case RCKP_RESET_RM2: + ret = reset_rm2(info, argp); + break; + case RCKP_GET_VERSION: + ret = get_version(info, argp); + break; + default: + ret = -ENOIOCTLCMD; + } + return ret; +} + +static void rp_send_xchar(struct tty_struct *tty, char ch) +{ + struct r_port *info = tty->driver_data; + CHANNEL_t *cp; + + if (rocket_paranoia_check(info, "rp_send_xchar")) + return; + + cp = &info->channel; + if (sGetTxCnt(cp)) + sWriteTxPrioByte(cp, ch); + else + sWriteTxByte(sGetTxRxDataIO(cp), ch); +} + +static void rp_throttle(struct tty_struct *tty) +{ + struct r_port *info = tty->driver_data; + CHANNEL_t *cp; + +#ifdef ROCKET_DEBUG_THROTTLE + printk(KERN_INFO "throttle %s: %d....\n", tty->name, + tty->ldisc.chars_in_buffer(tty)); +#endif + + if (rocket_paranoia_check(info, "rp_throttle")) + return; + + cp = &info->channel; + if (I_IXOFF(tty)) + rp_send_xchar(tty, STOP_CHAR(tty)); + + sClrRTS(&info->channel); +} + +static void rp_unthrottle(struct tty_struct *tty) +{ + struct r_port *info = tty->driver_data; + CHANNEL_t *cp; +#ifdef ROCKET_DEBUG_THROTTLE + printk(KERN_INFO "unthrottle %s: %d....\n", tty->name, + tty->ldisc.chars_in_buffer(tty)); +#endif + + if (rocket_paranoia_check(info, "rp_throttle")) + return; + + cp = &info->channel; + if (I_IXOFF(tty)) + rp_send_xchar(tty, START_CHAR(tty)); + + sSetRTS(&info->channel); +} + +/* + * ------------------------------------------------------------ + * rp_stop() and rp_start() + * + * This routines are called before setting or resetting tty->stopped. + * They enable or disable transmitter interrupts, as necessary. + * ------------------------------------------------------------ + */ +static void rp_stop(struct tty_struct *tty) +{ + struct r_port *info = tty->driver_data; + +#ifdef ROCKET_DEBUG_FLOW + printk(KERN_INFO "stop %s: %d %d....\n", tty->name, + info->xmit_cnt, info->xmit_fifo_room); +#endif + + if (rocket_paranoia_check(info, "rp_stop")) + return; + + if (sGetTxCnt(&info->channel)) + sDisTransmit(&info->channel); +} + +static void rp_start(struct tty_struct *tty) +{ + struct r_port *info = tty->driver_data; + +#ifdef ROCKET_DEBUG_FLOW + printk(KERN_INFO "start %s: %d %d....\n", tty->name, + info->xmit_cnt, info->xmit_fifo_room); +#endif + + if (rocket_paranoia_check(info, "rp_stop")) + return; + + sEnTransmit(&info->channel); + set_bit((info->aiop * 8) + info->chan, + (void *) &xmit_flags[info->board]); +} + +/* + * rp_wait_until_sent() --- wait until the transmitter is empty + */ +static void rp_wait_until_sent(struct tty_struct *tty, int timeout) +{ + struct r_port *info = tty->driver_data; + CHANNEL_t *cp; + unsigned long orig_jiffies; + int check_time, exit_time; + int txcnt; + + if (rocket_paranoia_check(info, "rp_wait_until_sent")) + return; + + cp = &info->channel; + + orig_jiffies = jiffies; +#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT + printk(KERN_INFO "In RP_wait_until_sent(%d) (jiff=%lu)...\n", timeout, + jiffies); + printk(KERN_INFO "cps=%d...\n", info->cps); +#endif + while (1) { + txcnt = sGetTxCnt(cp); + if (!txcnt) { + if (sGetChanStatusLo(cp) & TXSHRMT) + break; + check_time = (HZ / info->cps) / 5; + } else { + check_time = HZ * txcnt / info->cps; + } + if (timeout) { + exit_time = orig_jiffies + timeout - jiffies; + if (exit_time <= 0) + break; + if (exit_time < check_time) + check_time = exit_time; + } + if (check_time == 0) + check_time = 1; +#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT + printk(KERN_INFO "txcnt = %d (jiff=%lu,check=%d)...\n", txcnt, + jiffies, check_time); +#endif + msleep_interruptible(jiffies_to_msecs(check_time)); + if (signal_pending(current)) + break; + } + __set_current_state(TASK_RUNNING); +#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT + printk(KERN_INFO "txcnt = %d (jiff=%lu)...done\n", txcnt, jiffies); +#endif +} + +/* + * rp_hangup() --- called by tty_hangup() when a hangup is signaled. + */ +static void rp_hangup(struct tty_struct *tty) +{ + CHANNEL_t *cp; + struct r_port *info = tty->driver_data; + unsigned long flags; + + if (rocket_paranoia_check(info, "rp_hangup")) + return; + +#if (defined(ROCKET_DEBUG_OPEN) || defined(ROCKET_DEBUG_HANGUP)) + printk(KERN_INFO "rp_hangup of ttyR%d...\n", info->line); +#endif + rp_flush_buffer(tty); + spin_lock_irqsave(&info->port.lock, flags); + if (info->port.flags & ASYNC_CLOSING) { + spin_unlock_irqrestore(&info->port.lock, flags); + return; + } + if (info->port.count) + atomic_dec(&rp_num_ports_open); + clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]); + spin_unlock_irqrestore(&info->port.lock, flags); + + tty_port_hangup(&info->port); + + cp = &info->channel; + sDisRxFIFO(cp); + sDisTransmit(cp); + sDisInterrupts(cp, (TXINT_EN | MCINT_EN | RXINT_EN | SRCINT_EN | CHANINT_EN)); + sDisCTSFlowCtl(cp); + sDisTxSoftFlowCtl(cp); + sClrTxXOFF(cp); + clear_bit(ASYNCB_INITIALIZED, &info->port.flags); + + wake_up_interruptible(&info->port.open_wait); +} + +/* + * Exception handler - write char routine. The RocketPort driver uses a + * double-buffering strategy, with the twist that if the in-memory CPU + * buffer is empty, and there's space in the transmit FIFO, the + * writing routines will write directly to transmit FIFO. + * Write buffer and counters protected by spinlocks + */ +static int rp_put_char(struct tty_struct *tty, unsigned char ch) +{ + struct r_port *info = tty->driver_data; + CHANNEL_t *cp; + unsigned long flags; + + if (rocket_paranoia_check(info, "rp_put_char")) + return 0; + + /* + * Grab the port write mutex, locking out other processes that try to + * write to this port + */ + mutex_lock(&info->write_mtx); + +#ifdef ROCKET_DEBUG_WRITE + printk(KERN_INFO "rp_put_char %c...\n", ch); +#endif + + spin_lock_irqsave(&info->slock, flags); + cp = &info->channel; + + if (!tty->stopped && !tty->hw_stopped && info->xmit_fifo_room == 0) + info->xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp); + + if (tty->stopped || tty->hw_stopped || info->xmit_fifo_room == 0 || info->xmit_cnt != 0) { + info->xmit_buf[info->xmit_head++] = ch; + info->xmit_head &= XMIT_BUF_SIZE - 1; + info->xmit_cnt++; + set_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]); + } else { + sOutB(sGetTxRxDataIO(cp), ch); + info->xmit_fifo_room--; + } + spin_unlock_irqrestore(&info->slock, flags); + mutex_unlock(&info->write_mtx); + return 1; +} + +/* + * Exception handler - write routine, called when user app writes to the device. + * A per port write mutex is used to protect from another process writing to + * this port at the same time. This other process could be running on the other CPU + * or get control of the CPU if the copy_from_user() blocks due to a page fault (swapped out). + * Spinlocks protect the info xmit members. + */ +static int rp_write(struct tty_struct *tty, + const unsigned char *buf, int count) +{ + struct r_port *info = tty->driver_data; + CHANNEL_t *cp; + const unsigned char *b; + int c, retval = 0; + unsigned long flags; + + if (count <= 0 || rocket_paranoia_check(info, "rp_write")) + return 0; + + if (mutex_lock_interruptible(&info->write_mtx)) + return -ERESTARTSYS; + +#ifdef ROCKET_DEBUG_WRITE + printk(KERN_INFO "rp_write %d chars...\n", count); +#endif + cp = &info->channel; + + if (!tty->stopped && !tty->hw_stopped && info->xmit_fifo_room < count) + info->xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp); + + /* + * If the write queue for the port is empty, and there is FIFO space, stuff bytes + * into FIFO. Use the write queue for temp storage. + */ + if (!tty->stopped && !tty->hw_stopped && info->xmit_cnt == 0 && info->xmit_fifo_room > 0) { + c = min(count, info->xmit_fifo_room); + b = buf; + + /* Push data into FIFO, 2 bytes at a time */ + sOutStrW(sGetTxRxDataIO(cp), (unsigned short *) b, c / 2); + + /* If there is a byte remaining, write it */ + if (c & 1) + sOutB(sGetTxRxDataIO(cp), b[c - 1]); + + retval += c; + buf += c; + count -= c; + + spin_lock_irqsave(&info->slock, flags); + info->xmit_fifo_room -= c; + spin_unlock_irqrestore(&info->slock, flags); + } + + /* If count is zero, we wrote it all and are done */ + if (!count) + goto end; + + /* Write remaining data into the port's xmit_buf */ + while (1) { + /* Hung up ? */ + if (!test_bit(ASYNCB_NORMAL_ACTIVE, &info->port.flags)) + goto end; + c = min(count, XMIT_BUF_SIZE - info->xmit_cnt - 1); + c = min(c, XMIT_BUF_SIZE - info->xmit_head); + if (c <= 0) + break; + + b = buf; + memcpy(info->xmit_buf + info->xmit_head, b, c); + + spin_lock_irqsave(&info->slock, flags); + info->xmit_head = + (info->xmit_head + c) & (XMIT_BUF_SIZE - 1); + info->xmit_cnt += c; + spin_unlock_irqrestore(&info->slock, flags); + + buf += c; + count -= c; + retval += c; + } + + if ((retval > 0) && !tty->stopped && !tty->hw_stopped) + set_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]); + +end: + if (info->xmit_cnt < WAKEUP_CHARS) { + tty_wakeup(tty); +#ifdef ROCKETPORT_HAVE_POLL_WAIT + wake_up_interruptible(&tty->poll_wait); +#endif + } + mutex_unlock(&info->write_mtx); + return retval; +} + +/* + * Return the number of characters that can be sent. We estimate + * only using the in-memory transmit buffer only, and ignore the + * potential space in the transmit FIFO. + */ +static int rp_write_room(struct tty_struct *tty) +{ + struct r_port *info = tty->driver_data; + int ret; + + if (rocket_paranoia_check(info, "rp_write_room")) + return 0; + + ret = XMIT_BUF_SIZE - info->xmit_cnt - 1; + if (ret < 0) + ret = 0; +#ifdef ROCKET_DEBUG_WRITE + printk(KERN_INFO "rp_write_room returns %d...\n", ret); +#endif + return ret; +} + +/* + * Return the number of characters in the buffer. Again, this only + * counts those characters in the in-memory transmit buffer. + */ +static int rp_chars_in_buffer(struct tty_struct *tty) +{ + struct r_port *info = tty->driver_data; + CHANNEL_t *cp; + + if (rocket_paranoia_check(info, "rp_chars_in_buffer")) + return 0; + + cp = &info->channel; + +#ifdef ROCKET_DEBUG_WRITE + printk(KERN_INFO "rp_chars_in_buffer returns %d...\n", info->xmit_cnt); +#endif + return info->xmit_cnt; +} + +/* + * Flushes the TX fifo for a port, deletes data in the xmit_buf stored in the + * r_port struct for the port. Note that spinlock are used to protect info members, + * do not call this function if the spinlock is already held. + */ +static void rp_flush_buffer(struct tty_struct *tty) +{ + struct r_port *info = tty->driver_data; + CHANNEL_t *cp; + unsigned long flags; + + if (rocket_paranoia_check(info, "rp_flush_buffer")) + return; + + spin_lock_irqsave(&info->slock, flags); + info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; + spin_unlock_irqrestore(&info->slock, flags); + +#ifdef ROCKETPORT_HAVE_POLL_WAIT + wake_up_interruptible(&tty->poll_wait); +#endif + tty_wakeup(tty); + + cp = &info->channel; + sFlushTxFIFO(cp); +} + +#ifdef CONFIG_PCI + +static struct pci_device_id __devinitdata __used rocket_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_ANY_ID) }, + { } +}; +MODULE_DEVICE_TABLE(pci, rocket_pci_ids); + +/* + * Called when a PCI card is found. Retrieves and stores model information, + * init's aiopic and serial port hardware. + * Inputs: i is the board number (0-n) + */ +static __init int register_PCI(int i, struct pci_dev *dev) +{ + int num_aiops, aiop, max_num_aiops, num_chan, chan; + unsigned int aiopio[MAX_AIOPS_PER_BOARD]; + char *str, *board_type; + CONTROLLER_t *ctlp; + + int fast_clock = 0; + int altChanRingIndicator = 0; + int ports_per_aiop = 8; + WordIO_t ConfigIO = 0; + ByteIO_t UPCIRingInd = 0; + + if (!dev || pci_enable_device(dev)) + return 0; + + rcktpt_io_addr[i] = pci_resource_start(dev, 0); + + rcktpt_type[i] = ROCKET_TYPE_NORMAL; + rocketModel[i].loadrm2 = 0; + rocketModel[i].startingPortNumber = nextLineNumber; + + /* Depending on the model, set up some config variables */ + switch (dev->device) { + case PCI_DEVICE_ID_RP4QUAD: + str = "Quadcable"; + max_num_aiops = 1; + ports_per_aiop = 4; + rocketModel[i].model = MODEL_RP4QUAD; + strcpy(rocketModel[i].modelString, "RocketPort 4 port w/quad cable"); + rocketModel[i].numPorts = 4; + break; + case PCI_DEVICE_ID_RP8OCTA: + str = "Octacable"; + max_num_aiops = 1; + rocketModel[i].model = MODEL_RP8OCTA; + strcpy(rocketModel[i].modelString, "RocketPort 8 port w/octa cable"); + rocketModel[i].numPorts = 8; + break; + case PCI_DEVICE_ID_URP8OCTA: + str = "Octacable"; + max_num_aiops = 1; + rocketModel[i].model = MODEL_UPCI_RP8OCTA; + strcpy(rocketModel[i].modelString, "RocketPort UPCI 8 port w/octa cable"); + rocketModel[i].numPorts = 8; + break; + case PCI_DEVICE_ID_RP8INTF: + str = "8"; + max_num_aiops = 1; + rocketModel[i].model = MODEL_RP8INTF; + strcpy(rocketModel[i].modelString, "RocketPort 8 port w/external I/F"); + rocketModel[i].numPorts = 8; + break; + case PCI_DEVICE_ID_URP8INTF: + str = "8"; + max_num_aiops = 1; + rocketModel[i].model = MODEL_UPCI_RP8INTF; + strcpy(rocketModel[i].modelString, "RocketPort UPCI 8 port w/external I/F"); + rocketModel[i].numPorts = 8; + break; + case PCI_DEVICE_ID_RP8J: + str = "8J"; + max_num_aiops = 1; + rocketModel[i].model = MODEL_RP8J; + strcpy(rocketModel[i].modelString, "RocketPort 8 port w/RJ11 connectors"); + rocketModel[i].numPorts = 8; + break; + case PCI_DEVICE_ID_RP4J: + str = "4J"; + max_num_aiops = 1; + ports_per_aiop = 4; + rocketModel[i].model = MODEL_RP4J; + strcpy(rocketModel[i].modelString, "RocketPort 4 port w/RJ45 connectors"); + rocketModel[i].numPorts = 4; + break; + case PCI_DEVICE_ID_RP8SNI: + str = "8 (DB78 Custom)"; + max_num_aiops = 1; + rocketModel[i].model = MODEL_RP8SNI; + strcpy(rocketModel[i].modelString, "RocketPort 8 port w/ custom DB78"); + rocketModel[i].numPorts = 8; + break; + case PCI_DEVICE_ID_RP16SNI: + str = "16 (DB78 Custom)"; + max_num_aiops = 2; + rocketModel[i].model = MODEL_RP16SNI; + strcpy(rocketModel[i].modelString, "RocketPort 16 port w/ custom DB78"); + rocketModel[i].numPorts = 16; + break; + case PCI_DEVICE_ID_RP16INTF: + str = "16"; + max_num_aiops = 2; + rocketModel[i].model = MODEL_RP16INTF; + strcpy(rocketModel[i].modelString, "RocketPort 16 port w/external I/F"); + rocketModel[i].numPorts = 16; + break; + case PCI_DEVICE_ID_URP16INTF: + str = "16"; + max_num_aiops = 2; + rocketModel[i].model = MODEL_UPCI_RP16INTF; + strcpy(rocketModel[i].modelString, "RocketPort UPCI 16 port w/external I/F"); + rocketModel[i].numPorts = 16; + break; + case PCI_DEVICE_ID_CRP16INTF: + str = "16"; + max_num_aiops = 2; + rocketModel[i].model = MODEL_CPCI_RP16INTF; + strcpy(rocketModel[i].modelString, "RocketPort Compact PCI 16 port w/external I/F"); + rocketModel[i].numPorts = 16; + break; + case PCI_DEVICE_ID_RP32INTF: + str = "32"; + max_num_aiops = 4; + rocketModel[i].model = MODEL_RP32INTF; + strcpy(rocketModel[i].modelString, "RocketPort 32 port w/external I/F"); + rocketModel[i].numPorts = 32; + break; + case PCI_DEVICE_ID_URP32INTF: + str = "32"; + max_num_aiops = 4; + rocketModel[i].model = MODEL_UPCI_RP32INTF; + strcpy(rocketModel[i].modelString, "RocketPort UPCI 32 port w/external I/F"); + rocketModel[i].numPorts = 32; + break; + case PCI_DEVICE_ID_RPP4: + str = "Plus Quadcable"; + max_num_aiops = 1; + ports_per_aiop = 4; + altChanRingIndicator++; + fast_clock++; + rocketModel[i].model = MODEL_RPP4; + strcpy(rocketModel[i].modelString, "RocketPort Plus 4 port"); + rocketModel[i].numPorts = 4; + break; + case PCI_DEVICE_ID_RPP8: + str = "Plus Octacable"; + max_num_aiops = 2; + ports_per_aiop = 4; + altChanRingIndicator++; + fast_clock++; + rocketModel[i].model = MODEL_RPP8; + strcpy(rocketModel[i].modelString, "RocketPort Plus 8 port"); + rocketModel[i].numPorts = 8; + break; + case PCI_DEVICE_ID_RP2_232: + str = "Plus 2 (RS-232)"; + max_num_aiops = 1; + ports_per_aiop = 2; + altChanRingIndicator++; + fast_clock++; + rocketModel[i].model = MODEL_RP2_232; + strcpy(rocketModel[i].modelString, "RocketPort Plus 2 port RS232"); + rocketModel[i].numPorts = 2; + break; + case PCI_DEVICE_ID_RP2_422: + str = "Plus 2 (RS-422)"; + max_num_aiops = 1; + ports_per_aiop = 2; + altChanRingIndicator++; + fast_clock++; + rocketModel[i].model = MODEL_RP2_422; + strcpy(rocketModel[i].modelString, "RocketPort Plus 2 port RS422"); + rocketModel[i].numPorts = 2; + break; + case PCI_DEVICE_ID_RP6M: + + max_num_aiops = 1; + ports_per_aiop = 6; + str = "6-port"; + + /* If revision is 1, the rocketmodem flash must be loaded. + * If it is 2 it is a "socketed" version. */ + if (dev->revision == 1) { + rcktpt_type[i] = ROCKET_TYPE_MODEMII; + rocketModel[i].loadrm2 = 1; + } else { + rcktpt_type[i] = ROCKET_TYPE_MODEM; + } + + rocketModel[i].model = MODEL_RP6M; + strcpy(rocketModel[i].modelString, "RocketModem 6 port"); + rocketModel[i].numPorts = 6; + break; + case PCI_DEVICE_ID_RP4M: + max_num_aiops = 1; + ports_per_aiop = 4; + str = "4-port"; + if (dev->revision == 1) { + rcktpt_type[i] = ROCKET_TYPE_MODEMII; + rocketModel[i].loadrm2 = 1; + } else { + rcktpt_type[i] = ROCKET_TYPE_MODEM; + } + + rocketModel[i].model = MODEL_RP4M; + strcpy(rocketModel[i].modelString, "RocketModem 4 port"); + rocketModel[i].numPorts = 4; + break; + default: + str = "(unknown/unsupported)"; + max_num_aiops = 0; + break; + } + + /* + * Check for UPCI boards. + */ + + switch (dev->device) { + case PCI_DEVICE_ID_URP32INTF: + case PCI_DEVICE_ID_URP8INTF: + case PCI_DEVICE_ID_URP16INTF: + case PCI_DEVICE_ID_CRP16INTF: + case PCI_DEVICE_ID_URP8OCTA: + rcktpt_io_addr[i] = pci_resource_start(dev, 2); + ConfigIO = pci_resource_start(dev, 1); + if (dev->device == PCI_DEVICE_ID_URP8OCTA) { + UPCIRingInd = rcktpt_io_addr[i] + _PCI_9030_RING_IND; + + /* + * Check for octa or quad cable. + */ + if (! + (sInW(ConfigIO + _PCI_9030_GPIO_CTRL) & + PCI_GPIO_CTRL_8PORT)) { + str = "Quadcable"; + ports_per_aiop = 4; + rocketModel[i].numPorts = 4; + } + } + break; + case PCI_DEVICE_ID_UPCI_RM3_8PORT: + str = "8 ports"; + max_num_aiops = 1; + rocketModel[i].model = MODEL_UPCI_RM3_8PORT; + strcpy(rocketModel[i].modelString, "RocketModem III 8 port"); + rocketModel[i].numPorts = 8; + rcktpt_io_addr[i] = pci_resource_start(dev, 2); + UPCIRingInd = rcktpt_io_addr[i] + _PCI_9030_RING_IND; + ConfigIO = pci_resource_start(dev, 1); + rcktpt_type[i] = ROCKET_TYPE_MODEMIII; + break; + case PCI_DEVICE_ID_UPCI_RM3_4PORT: + str = "4 ports"; + max_num_aiops = 1; + rocketModel[i].model = MODEL_UPCI_RM3_4PORT; + strcpy(rocketModel[i].modelString, "RocketModem III 4 port"); + rocketModel[i].numPorts = 4; + rcktpt_io_addr[i] = pci_resource_start(dev, 2); + UPCIRingInd = rcktpt_io_addr[i] + _PCI_9030_RING_IND; + ConfigIO = pci_resource_start(dev, 1); + rcktpt_type[i] = ROCKET_TYPE_MODEMIII; + break; + default: + break; + } + + switch (rcktpt_type[i]) { + case ROCKET_TYPE_MODEM: + board_type = "RocketModem"; + break; + case ROCKET_TYPE_MODEMII: + board_type = "RocketModem II"; + break; + case ROCKET_TYPE_MODEMIII: + board_type = "RocketModem III"; + break; + default: + board_type = "RocketPort"; + break; + } + + if (fast_clock) { + sClockPrescale = 0x12; /* mod 2 (divide by 3) */ + rp_baud_base[i] = 921600; + } else { + /* + * If support_low_speed is set, use the slow clock + * prescale, which supports 50 bps + */ + if (support_low_speed) { + /* mod 9 (divide by 10) prescale */ + sClockPrescale = 0x19; + rp_baud_base[i] = 230400; + } else { + /* mod 4 (devide by 5) prescale */ + sClockPrescale = 0x14; + rp_baud_base[i] = 460800; + } + } + + for (aiop = 0; aiop < max_num_aiops; aiop++) + aiopio[aiop] = rcktpt_io_addr[i] + (aiop * 0x40); + ctlp = sCtlNumToCtlPtr(i); + num_aiops = sPCIInitController(ctlp, i, aiopio, max_num_aiops, ConfigIO, 0, FREQ_DIS, 0, altChanRingIndicator, UPCIRingInd); + for (aiop = 0; aiop < max_num_aiops; aiop++) + ctlp->AiopNumChan[aiop] = ports_per_aiop; + + dev_info(&dev->dev, "comtrol PCI controller #%d found at " + "address %04lx, %d AIOP(s) (%s), creating ttyR%d - %ld\n", + i, rcktpt_io_addr[i], num_aiops, rocketModel[i].modelString, + rocketModel[i].startingPortNumber, + rocketModel[i].startingPortNumber + rocketModel[i].numPorts-1); + + if (num_aiops <= 0) { + rcktpt_io_addr[i] = 0; + return (0); + } + is_PCI[i] = 1; + + /* Reset the AIOPIC, init the serial ports */ + for (aiop = 0; aiop < num_aiops; aiop++) { + sResetAiopByNum(ctlp, aiop); + num_chan = ports_per_aiop; + for (chan = 0; chan < num_chan; chan++) + init_r_port(i, aiop, chan, dev); + } + + /* Rocket modems must be reset */ + if ((rcktpt_type[i] == ROCKET_TYPE_MODEM) || + (rcktpt_type[i] == ROCKET_TYPE_MODEMII) || + (rcktpt_type[i] == ROCKET_TYPE_MODEMIII)) { + num_chan = ports_per_aiop; + for (chan = 0; chan < num_chan; chan++) + sPCIModemReset(ctlp, chan, 1); + msleep(500); + for (chan = 0; chan < num_chan; chan++) + sPCIModemReset(ctlp, chan, 0); + msleep(500); + rmSpeakerReset(ctlp, rocketModel[i].model); + } + return (1); +} + +/* + * Probes for PCI cards, inits them if found + * Input: board_found = number of ISA boards already found, or the + * starting board number + * Returns: Number of PCI boards found + */ +static int __init init_PCI(int boards_found) +{ + struct pci_dev *dev = NULL; + int count = 0; + + /* Work through the PCI device list, pulling out ours */ + while ((dev = pci_get_device(PCI_VENDOR_ID_RP, PCI_ANY_ID, dev))) { + if (register_PCI(count + boards_found, dev)) + count++; + } + return (count); +} + +#endif /* CONFIG_PCI */ + +/* + * Probes for ISA cards + * Input: i = the board number to look for + * Returns: 1 if board found, 0 else + */ +static int __init init_ISA(int i) +{ + int num_aiops, num_chan = 0, total_num_chan = 0; + int aiop, chan; + unsigned int aiopio[MAX_AIOPS_PER_BOARD]; + CONTROLLER_t *ctlp; + char *type_string; + + /* If io_addr is zero, no board configured */ + if (rcktpt_io_addr[i] == 0) + return (0); + + /* Reserve the IO region */ + if (!request_region(rcktpt_io_addr[i], 64, "Comtrol RocketPort")) { + printk(KERN_ERR "Unable to reserve IO region for configured " + "ISA RocketPort at address 0x%lx, board not " + "installed...\n", rcktpt_io_addr[i]); + rcktpt_io_addr[i] = 0; + return (0); + } + + ctlp = sCtlNumToCtlPtr(i); + + ctlp->boardType = rcktpt_type[i]; + + switch (rcktpt_type[i]) { + case ROCKET_TYPE_PC104: + type_string = "(PC104)"; + break; + case ROCKET_TYPE_MODEM: + type_string = "(RocketModem)"; + break; + case ROCKET_TYPE_MODEMII: + type_string = "(RocketModem II)"; + break; + default: + type_string = ""; + break; + } + + /* + * If support_low_speed is set, use the slow clock prescale, + * which supports 50 bps + */ + if (support_low_speed) { + sClockPrescale = 0x19; /* mod 9 (divide by 10) prescale */ + rp_baud_base[i] = 230400; + } else { + sClockPrescale = 0x14; /* mod 4 (devide by 5) prescale */ + rp_baud_base[i] = 460800; + } + + for (aiop = 0; aiop < MAX_AIOPS_PER_BOARD; aiop++) + aiopio[aiop] = rcktpt_io_addr[i] + (aiop * 0x400); + + num_aiops = sInitController(ctlp, i, controller + (i * 0x400), aiopio, MAX_AIOPS_PER_BOARD, 0, FREQ_DIS, 0); + + if (ctlp->boardType == ROCKET_TYPE_PC104) { + sEnAiop(ctlp, 2); /* only one AIOPIC, but these */ + sEnAiop(ctlp, 3); /* CSels used for other stuff */ + } + + /* If something went wrong initing the AIOP's release the ISA IO memory */ + if (num_aiops <= 0) { + release_region(rcktpt_io_addr[i], 64); + rcktpt_io_addr[i] = 0; + return (0); + } + + rocketModel[i].startingPortNumber = nextLineNumber; + + for (aiop = 0; aiop < num_aiops; aiop++) { + sResetAiopByNum(ctlp, aiop); + sEnAiop(ctlp, aiop); + num_chan = sGetAiopNumChan(ctlp, aiop); + total_num_chan += num_chan; + for (chan = 0; chan < num_chan; chan++) + init_r_port(i, aiop, chan, NULL); + } + is_PCI[i] = 0; + if ((rcktpt_type[i] == ROCKET_TYPE_MODEM) || (rcktpt_type[i] == ROCKET_TYPE_MODEMII)) { + num_chan = sGetAiopNumChan(ctlp, 0); + total_num_chan = num_chan; + for (chan = 0; chan < num_chan; chan++) + sModemReset(ctlp, chan, 1); + msleep(500); + for (chan = 0; chan < num_chan; chan++) + sModemReset(ctlp, chan, 0); + msleep(500); + strcpy(rocketModel[i].modelString, "RocketModem ISA"); + } else { + strcpy(rocketModel[i].modelString, "RocketPort ISA"); + } + rocketModel[i].numPorts = total_num_chan; + rocketModel[i].model = MODEL_ISA; + + printk(KERN_INFO "RocketPort ISA card #%d found at 0x%lx - %d AIOPs %s\n", + i, rcktpt_io_addr[i], num_aiops, type_string); + + printk(KERN_INFO "Installing %s, creating /dev/ttyR%d - %ld\n", + rocketModel[i].modelString, + rocketModel[i].startingPortNumber, + rocketModel[i].startingPortNumber + + rocketModel[i].numPorts - 1); + + return (1); +} + +static const struct tty_operations rocket_ops = { + .open = rp_open, + .close = rp_close, + .write = rp_write, + .put_char = rp_put_char, + .write_room = rp_write_room, + .chars_in_buffer = rp_chars_in_buffer, + .flush_buffer = rp_flush_buffer, + .ioctl = rp_ioctl, + .throttle = rp_throttle, + .unthrottle = rp_unthrottle, + .set_termios = rp_set_termios, + .stop = rp_stop, + .start = rp_start, + .hangup = rp_hangup, + .break_ctl = rp_break, + .send_xchar = rp_send_xchar, + .wait_until_sent = rp_wait_until_sent, + .tiocmget = rp_tiocmget, + .tiocmset = rp_tiocmset, +}; + +static const struct tty_port_operations rocket_port_ops = { + .carrier_raised = carrier_raised, + .dtr_rts = dtr_rts, +}; + +/* + * The module "startup" routine; it's run when the module is loaded. + */ +static int __init rp_init(void) +{ + int ret = -ENOMEM, pci_boards_found, isa_boards_found, i; + + printk(KERN_INFO "RocketPort device driver module, version %s, %s\n", + ROCKET_VERSION, ROCKET_DATE); + + rocket_driver = alloc_tty_driver(MAX_RP_PORTS); + if (!rocket_driver) + goto err; + + /* + * If board 1 is non-zero, there is at least one ISA configured. If controller is + * zero, use the default controller IO address of board1 + 0x40. + */ + if (board1) { + if (controller == 0) + controller = board1 + 0x40; + } else { + controller = 0; /* Used as a flag, meaning no ISA boards */ + } + + /* If an ISA card is configured, reserve the 4 byte IO space for the Mudbac controller */ + if (controller && (!request_region(controller, 4, "Comtrol RocketPort"))) { + printk(KERN_ERR "Unable to reserve IO region for first " + "configured ISA RocketPort controller 0x%lx. " + "Driver exiting\n", controller); + ret = -EBUSY; + goto err_tty; + } + + /* Store ISA variable retrieved from command line or .conf file. */ + rcktpt_io_addr[0] = board1; + rcktpt_io_addr[1] = board2; + rcktpt_io_addr[2] = board3; + rcktpt_io_addr[3] = board4; + + rcktpt_type[0] = modem1 ? ROCKET_TYPE_MODEM : ROCKET_TYPE_NORMAL; + rcktpt_type[0] = pc104_1[0] ? ROCKET_TYPE_PC104 : rcktpt_type[0]; + rcktpt_type[1] = modem2 ? ROCKET_TYPE_MODEM : ROCKET_TYPE_NORMAL; + rcktpt_type[1] = pc104_2[0] ? ROCKET_TYPE_PC104 : rcktpt_type[1]; + rcktpt_type[2] = modem3 ? ROCKET_TYPE_MODEM : ROCKET_TYPE_NORMAL; + rcktpt_type[2] = pc104_3[0] ? ROCKET_TYPE_PC104 : rcktpt_type[2]; + rcktpt_type[3] = modem4 ? ROCKET_TYPE_MODEM : ROCKET_TYPE_NORMAL; + rcktpt_type[3] = pc104_4[0] ? ROCKET_TYPE_PC104 : rcktpt_type[3]; + + /* + * Set up the tty driver structure and then register this + * driver with the tty layer. + */ + + rocket_driver->owner = THIS_MODULE; + rocket_driver->flags = TTY_DRIVER_DYNAMIC_DEV; + rocket_driver->name = "ttyR"; + rocket_driver->driver_name = "Comtrol RocketPort"; + rocket_driver->major = TTY_ROCKET_MAJOR; + rocket_driver->minor_start = 0; + rocket_driver->type = TTY_DRIVER_TYPE_SERIAL; + rocket_driver->subtype = SERIAL_TYPE_NORMAL; + rocket_driver->init_termios = tty_std_termios; + rocket_driver->init_termios.c_cflag = + B9600 | CS8 | CREAD | HUPCL | CLOCAL; + rocket_driver->init_termios.c_ispeed = 9600; + rocket_driver->init_termios.c_ospeed = 9600; +#ifdef ROCKET_SOFT_FLOW + rocket_driver->flags |= TTY_DRIVER_REAL_RAW; +#endif + tty_set_operations(rocket_driver, &rocket_ops); + + ret = tty_register_driver(rocket_driver); + if (ret < 0) { + printk(KERN_ERR "Couldn't install tty RocketPort driver\n"); + goto err_controller; + } + +#ifdef ROCKET_DEBUG_OPEN + printk(KERN_INFO "RocketPort driver is major %d\n", rocket_driver.major); +#endif + + /* + * OK, let's probe each of the controllers looking for boards. Any boards found + * will be initialized here. + */ + isa_boards_found = 0; + pci_boards_found = 0; + + for (i = 0; i < NUM_BOARDS; i++) { + if (init_ISA(i)) + isa_boards_found++; + } + +#ifdef CONFIG_PCI + if (isa_boards_found < NUM_BOARDS) + pci_boards_found = init_PCI(isa_boards_found); +#endif + + max_board = pci_boards_found + isa_boards_found; + + if (max_board == 0) { + printk(KERN_ERR "No rocketport ports found; unloading driver\n"); + ret = -ENXIO; + goto err_ttyu; + } + + return 0; +err_ttyu: + tty_unregister_driver(rocket_driver); +err_controller: + if (controller) + release_region(controller, 4); +err_tty: + put_tty_driver(rocket_driver); +err: + return ret; +} + + +static void rp_cleanup_module(void) +{ + int retval; + int i; + + del_timer_sync(&rocket_timer); + + retval = tty_unregister_driver(rocket_driver); + if (retval) + printk(KERN_ERR "Error %d while trying to unregister " + "rocketport driver\n", -retval); + + for (i = 0; i < MAX_RP_PORTS; i++) + if (rp_table[i]) { + tty_unregister_device(rocket_driver, i); + kfree(rp_table[i]); + } + + put_tty_driver(rocket_driver); + + for (i = 0; i < NUM_BOARDS; i++) { + if (rcktpt_io_addr[i] <= 0 || is_PCI[i]) + continue; + release_region(rcktpt_io_addr[i], 64); + } + if (controller) + release_region(controller, 4); +} + +/*************************************************************************** +Function: sInitController +Purpose: Initialization of controller global registers and controller + structure. +Call: sInitController(CtlP,CtlNum,MudbacIO,AiopIOList,AiopIOListSize, + IRQNum,Frequency,PeriodicOnly) + CONTROLLER_T *CtlP; Ptr to controller structure + int CtlNum; Controller number + ByteIO_t MudbacIO; Mudbac base I/O address. + ByteIO_t *AiopIOList; List of I/O addresses for each AIOP. + This list must be in the order the AIOPs will be found on the + controller. Once an AIOP in the list is not found, it is + assumed that there are no more AIOPs on the controller. + int AiopIOListSize; Number of addresses in AiopIOList + int IRQNum; Interrupt Request number. Can be any of the following: + 0: Disable global interrupts + 3: IRQ 3 + 4: IRQ 4 + 5: IRQ 5 + 9: IRQ 9 + 10: IRQ 10 + 11: IRQ 11 + 12: IRQ 12 + 15: IRQ 15 + Byte_t Frequency: A flag identifying the frequency + of the periodic interrupt, can be any one of the following: + FREQ_DIS - periodic interrupt disabled + FREQ_137HZ - 137 Hertz + FREQ_69HZ - 69 Hertz + FREQ_34HZ - 34 Hertz + FREQ_17HZ - 17 Hertz + FREQ_9HZ - 9 Hertz + FREQ_4HZ - 4 Hertz + If IRQNum is set to 0 the Frequency parameter is + overidden, it is forced to a value of FREQ_DIS. + int PeriodicOnly: 1 if all interrupts except the periodic + interrupt are to be blocked. + 0 is both the periodic interrupt and + other channel interrupts are allowed. + If IRQNum is set to 0 the PeriodicOnly parameter is + overidden, it is forced to a value of 0. +Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller + initialization failed. + +Comments: + If periodic interrupts are to be disabled but AIOP interrupts + are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0. + + If interrupts are to be completely disabled set IRQNum to 0. + + Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an + invalid combination. + + This function performs initialization of global interrupt modes, + but it does not actually enable global interrupts. To enable + and disable global interrupts use functions sEnGlobalInt() and + sDisGlobalInt(). Enabling of global interrupts is normally not + done until all other initializations are complete. + + Even if interrupts are globally enabled, they must also be + individually enabled for each channel that is to generate + interrupts. + +Warnings: No range checking on any of the parameters is done. + + No context switches are allowed while executing this function. + + After this function all AIOPs on the controller are disabled, + they can be enabled with sEnAiop(). +*/ +static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO, + ByteIO_t * AiopIOList, int AiopIOListSize, + int IRQNum, Byte_t Frequency, int PeriodicOnly) +{ + int i; + ByteIO_t io; + int done; + + CtlP->AiopIntrBits = aiop_intr_bits; + CtlP->AltChanRingIndicator = 0; + CtlP->CtlNum = CtlNum; + CtlP->CtlID = CTLID_0001; /* controller release 1 */ + CtlP->BusType = isISA; + CtlP->MBaseIO = MudbacIO; + CtlP->MReg1IO = MudbacIO + 1; + CtlP->MReg2IO = MudbacIO + 2; + CtlP->MReg3IO = MudbacIO + 3; +#if 1 + CtlP->MReg2 = 0; /* interrupt disable */ + CtlP->MReg3 = 0; /* no periodic interrupts */ +#else + if (sIRQMap[IRQNum] == 0) { /* interrupts globally disabled */ + CtlP->MReg2 = 0; /* interrupt disable */ + CtlP->MReg3 = 0; /* no periodic interrupts */ + } else { + CtlP->MReg2 = sIRQMap[IRQNum]; /* set IRQ number */ + CtlP->MReg3 = Frequency; /* set frequency */ + if (PeriodicOnly) { /* periodic interrupt only */ + CtlP->MReg3 |= PERIODIC_ONLY; + } + } +#endif + sOutB(CtlP->MReg2IO, CtlP->MReg2); + sOutB(CtlP->MReg3IO, CtlP->MReg3); + sControllerEOI(CtlP); /* clear EOI if warm init */ + /* Init AIOPs */ + CtlP->NumAiop = 0; + for (i = done = 0; i < AiopIOListSize; i++) { + io = AiopIOList[i]; + CtlP->AiopIO[i] = (WordIO_t) io; + CtlP->AiopIntChanIO[i] = io + _INT_CHAN; + sOutB(CtlP->MReg2IO, CtlP->MReg2 | (i & 0x03)); /* AIOP index */ + sOutB(MudbacIO, (Byte_t) (io >> 6)); /* set up AIOP I/O in MUDBAC */ + if (done) + continue; + sEnAiop(CtlP, i); /* enable the AIOP */ + CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */ + if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */ + done = 1; /* done looking for AIOPs */ + else { + CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */ + sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */ + sOutB(io + _INDX_DATA, sClockPrescale); + CtlP->NumAiop++; /* bump count of AIOPs */ + } + sDisAiop(CtlP, i); /* disable AIOP */ + } + + if (CtlP->NumAiop == 0) + return (-1); + else + return (CtlP->NumAiop); +} + +/*************************************************************************** +Function: sPCIInitController +Purpose: Initialization of controller global registers and controller + structure. +Call: sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize, + IRQNum,Frequency,PeriodicOnly) + CONTROLLER_T *CtlP; Ptr to controller structure + int CtlNum; Controller number + ByteIO_t *AiopIOList; List of I/O addresses for each AIOP. + This list must be in the order the AIOPs will be found on the + controller. Once an AIOP in the list is not found, it is + assumed that there are no more AIOPs on the controller. + int AiopIOListSize; Number of addresses in AiopIOList + int IRQNum; Interrupt Request number. Can be any of the following: + 0: Disable global interrupts + 3: IRQ 3 + 4: IRQ 4 + 5: IRQ 5 + 9: IRQ 9 + 10: IRQ 10 + 11: IRQ 11 + 12: IRQ 12 + 15: IRQ 15 + Byte_t Frequency: A flag identifying the frequency + of the periodic interrupt, can be any one of the following: + FREQ_DIS - periodic interrupt disabled + FREQ_137HZ - 137 Hertz + FREQ_69HZ - 69 Hertz + FREQ_34HZ - 34 Hertz + FREQ_17HZ - 17 Hertz + FREQ_9HZ - 9 Hertz + FREQ_4HZ - 4 Hertz + If IRQNum is set to 0 the Frequency parameter is + overidden, it is forced to a value of FREQ_DIS. + int PeriodicOnly: 1 if all interrupts except the periodic + interrupt are to be blocked. + 0 is both the periodic interrupt and + other channel interrupts are allowed. + If IRQNum is set to 0 the PeriodicOnly parameter is + overidden, it is forced to a value of 0. +Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller + initialization failed. + +Comments: + If periodic interrupts are to be disabled but AIOP interrupts + are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0. + + If interrupts are to be completely disabled set IRQNum to 0. + + Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an + invalid combination. + + This function performs initialization of global interrupt modes, + but it does not actually enable global interrupts. To enable + and disable global interrupts use functions sEnGlobalInt() and + sDisGlobalInt(). Enabling of global interrupts is normally not + done until all other initializations are complete. + + Even if interrupts are globally enabled, they must also be + individually enabled for each channel that is to generate + interrupts. + +Warnings: No range checking on any of the parameters is done. + + No context switches are allowed while executing this function. + + After this function all AIOPs on the controller are disabled, + they can be enabled with sEnAiop(). +*/ +static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum, + ByteIO_t * AiopIOList, int AiopIOListSize, + WordIO_t ConfigIO, int IRQNum, Byte_t Frequency, + int PeriodicOnly, int altChanRingIndicator, + int UPCIRingInd) +{ + int i; + ByteIO_t io; + + CtlP->AltChanRingIndicator = altChanRingIndicator; + CtlP->UPCIRingInd = UPCIRingInd; + CtlP->CtlNum = CtlNum; + CtlP->CtlID = CTLID_0001; /* controller release 1 */ + CtlP->BusType = isPCI; /* controller release 1 */ + + if (ConfigIO) { + CtlP->isUPCI = 1; + CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL; + CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL; + CtlP->AiopIntrBits = upci_aiop_intr_bits; + } else { + CtlP->isUPCI = 0; + CtlP->PCIIO = + (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC); + CtlP->AiopIntrBits = aiop_intr_bits; + } + + sPCIControllerEOI(CtlP); /* clear EOI if warm init */ + /* Init AIOPs */ + CtlP->NumAiop = 0; + for (i = 0; i < AiopIOListSize; i++) { + io = AiopIOList[i]; + CtlP->AiopIO[i] = (WordIO_t) io; + CtlP->AiopIntChanIO[i] = io + _INT_CHAN; + + CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */ + if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */ + break; /* done looking for AIOPs */ + + CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */ + sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */ + sOutB(io + _INDX_DATA, sClockPrescale); + CtlP->NumAiop++; /* bump count of AIOPs */ + } + + if (CtlP->NumAiop == 0) + return (-1); + else + return (CtlP->NumAiop); +} + +/*************************************************************************** +Function: sReadAiopID +Purpose: Read the AIOP idenfication number directly from an AIOP. +Call: sReadAiopID(io) + ByteIO_t io: AIOP base I/O address +Return: int: Flag AIOPID_XXXX if a valid AIOP is found, where X + is replace by an identifying number. + Flag AIOPID_NULL if no valid AIOP is found +Warnings: No context switches are allowed while executing this function. + +*/ +static int sReadAiopID(ByteIO_t io) +{ + Byte_t AiopID; /* ID byte from AIOP */ + + sOutB(io + _CMD_REG, RESET_ALL); /* reset AIOP */ + sOutB(io + _CMD_REG, 0x0); + AiopID = sInW(io + _CHN_STAT0) & 0x07; + if (AiopID == 0x06) + return (1); + else /* AIOP does not exist */ + return (-1); +} + +/*************************************************************************** +Function: sReadAiopNumChan +Purpose: Read the number of channels available in an AIOP directly from + an AIOP. +Call: sReadAiopNumChan(io) + WordIO_t io: AIOP base I/O address +Return: int: The number of channels available +Comments: The number of channels is determined by write/reads from identical + offsets within the SRAM address spaces for channels 0 and 4. + If the channel 4 space is mirrored to channel 0 it is a 4 channel + AIOP, otherwise it is an 8 channel. +Warnings: No context switches are allowed while executing this function. +*/ +static int sReadAiopNumChan(WordIO_t io) +{ + Word_t x; + static Byte_t R[4] = { 0x00, 0x00, 0x34, 0x12 }; + + /* write to chan 0 SRAM */ + out32((DWordIO_t) io + _INDX_ADDR, R); + sOutW(io + _INDX_ADDR, 0); /* read from SRAM, chan 0 */ + x = sInW(io + _INDX_DATA); + sOutW(io + _INDX_ADDR, 0x4000); /* read from SRAM, chan 4 */ + if (x != sInW(io + _INDX_DATA)) /* if different must be 8 chan */ + return (8); + else + return (4); +} + +/*************************************************************************** +Function: sInitChan +Purpose: Initialization of a channel and channel structure +Call: sInitChan(CtlP,ChP,AiopNum,ChanNum) + CONTROLLER_T *CtlP; Ptr to controller structure + CHANNEL_T *ChP; Ptr to channel structure + int AiopNum; AIOP number within controller + int ChanNum; Channel number within AIOP +Return: int: 1 if initialization succeeded, 0 if it fails because channel + number exceeds number of channels available in AIOP. +Comments: This function must be called before a channel can be used. +Warnings: No range checking on any of the parameters is done. + + No context switches are allowed while executing this function. +*/ +static int sInitChan(CONTROLLER_T * CtlP, CHANNEL_T * ChP, int AiopNum, + int ChanNum) +{ + int i; + WordIO_t AiopIO; + WordIO_t ChIOOff; + Byte_t *ChR; + Word_t ChOff; + static Byte_t R[4]; + int brd9600; + + if (ChanNum >= CtlP->AiopNumChan[AiopNum]) + return 0; /* exceeds num chans in AIOP */ + + /* Channel, AIOP, and controller identifiers */ + ChP->CtlP = CtlP; + ChP->ChanID = CtlP->AiopID[AiopNum]; + ChP->AiopNum = AiopNum; + ChP->ChanNum = ChanNum; + + /* Global direct addresses */ + AiopIO = CtlP->AiopIO[AiopNum]; + ChP->Cmd = (ByteIO_t) AiopIO + _CMD_REG; + ChP->IntChan = (ByteIO_t) AiopIO + _INT_CHAN; + ChP->IntMask = (ByteIO_t) AiopIO + _INT_MASK; + ChP->IndexAddr = (DWordIO_t) AiopIO + _INDX_ADDR; + ChP->IndexData = AiopIO + _INDX_DATA; + + /* Channel direct addresses */ + ChIOOff = AiopIO + ChP->ChanNum * 2; + ChP->TxRxData = ChIOOff + _TD0; + ChP->ChanStat = ChIOOff + _CHN_STAT0; + ChP->TxRxCount = ChIOOff + _FIFO_CNT0; + ChP->IntID = (ByteIO_t) AiopIO + ChP->ChanNum + _INT_ID0; + + /* Initialize the channel from the RData array */ + for (i = 0; i < RDATASIZE; i += 4) { + R[0] = RData[i]; + R[1] = RData[i + 1] + 0x10 * ChanNum; + R[2] = RData[i + 2]; + R[3] = RData[i + 3]; + out32(ChP->IndexAddr, R); + } + + ChR = ChP->R; + for (i = 0; i < RREGDATASIZE; i += 4) { + ChR[i] = RRegData[i]; + ChR[i + 1] = RRegData[i + 1] + 0x10 * ChanNum; + ChR[i + 2] = RRegData[i + 2]; + ChR[i + 3] = RRegData[i + 3]; + } + + /* Indexed registers */ + ChOff = (Word_t) ChanNum *0x1000; + + if (sClockPrescale == 0x14) + brd9600 = 47; + else + brd9600 = 23; + + ChP->BaudDiv[0] = (Byte_t) (ChOff + _BAUD); + ChP->BaudDiv[1] = (Byte_t) ((ChOff + _BAUD) >> 8); + ChP->BaudDiv[2] = (Byte_t) brd9600; + ChP->BaudDiv[3] = (Byte_t) (brd9600 >> 8); + out32(ChP->IndexAddr, ChP->BaudDiv); + + ChP->TxControl[0] = (Byte_t) (ChOff + _TX_CTRL); + ChP->TxControl[1] = (Byte_t) ((ChOff + _TX_CTRL) >> 8); + ChP->TxControl[2] = 0; + ChP->TxControl[3] = 0; + out32(ChP->IndexAddr, ChP->TxControl); + + ChP->RxControl[0] = (Byte_t) (ChOff + _RX_CTRL); + ChP->RxControl[1] = (Byte_t) ((ChOff + _RX_CTRL) >> 8); + ChP->RxControl[2] = 0; + ChP->RxControl[3] = 0; + out32(ChP->IndexAddr, ChP->RxControl); + + ChP->TxEnables[0] = (Byte_t) (ChOff + _TX_ENBLS); + ChP->TxEnables[1] = (Byte_t) ((ChOff + _TX_ENBLS) >> 8); + ChP->TxEnables[2] = 0; + ChP->TxEnables[3] = 0; + out32(ChP->IndexAddr, ChP->TxEnables); + + ChP->TxCompare[0] = (Byte_t) (ChOff + _TXCMP1); + ChP->TxCompare[1] = (Byte_t) ((ChOff + _TXCMP1) >> 8); + ChP->TxCompare[2] = 0; + ChP->TxCompare[3] = 0; + out32(ChP->IndexAddr, ChP->TxCompare); + + ChP->TxReplace1[0] = (Byte_t) (ChOff + _TXREP1B1); + ChP->TxReplace1[1] = (Byte_t) ((ChOff + _TXREP1B1) >> 8); + ChP->TxReplace1[2] = 0; + ChP->TxReplace1[3] = 0; + out32(ChP->IndexAddr, ChP->TxReplace1); + + ChP->TxReplace2[0] = (Byte_t) (ChOff + _TXREP2); + ChP->TxReplace2[1] = (Byte_t) ((ChOff + _TXREP2) >> 8); + ChP->TxReplace2[2] = 0; + ChP->TxReplace2[3] = 0; + out32(ChP->IndexAddr, ChP->TxReplace2); + + ChP->TxFIFOPtrs = ChOff + _TXF_OUTP; + ChP->TxFIFO = ChOff + _TX_FIFO; + + sOutB(ChP->Cmd, (Byte_t) ChanNum | RESTXFCNT); /* apply reset Tx FIFO count */ + sOutB(ChP->Cmd, (Byte_t) ChanNum); /* remove reset Tx FIFO count */ + sOutW((WordIO_t) ChP->IndexAddr, ChP->TxFIFOPtrs); /* clear Tx in/out ptrs */ + sOutW(ChP->IndexData, 0); + ChP->RxFIFOPtrs = ChOff + _RXF_OUTP; + ChP->RxFIFO = ChOff + _RX_FIFO; + + sOutB(ChP->Cmd, (Byte_t) ChanNum | RESRXFCNT); /* apply reset Rx FIFO count */ + sOutB(ChP->Cmd, (Byte_t) ChanNum); /* remove reset Rx FIFO count */ + sOutW((WordIO_t) ChP->IndexAddr, ChP->RxFIFOPtrs); /* clear Rx out ptr */ + sOutW(ChP->IndexData, 0); + sOutW((WordIO_t) ChP->IndexAddr, ChP->RxFIFOPtrs + 2); /* clear Rx in ptr */ + sOutW(ChP->IndexData, 0); + ChP->TxPrioCnt = ChOff + _TXP_CNT; + sOutW((WordIO_t) ChP->IndexAddr, ChP->TxPrioCnt); + sOutB(ChP->IndexData, 0); + ChP->TxPrioPtr = ChOff + _TXP_PNTR; + sOutW((WordIO_t) ChP->IndexAddr, ChP->TxPrioPtr); + sOutB(ChP->IndexData, 0); + ChP->TxPrioBuf = ChOff + _TXP_BUF; + sEnRxProcessor(ChP); /* start the Rx processor */ + + return 1; +} + +/*************************************************************************** +Function: sStopRxProcessor +Purpose: Stop the receive processor from processing a channel. +Call: sStopRxProcessor(ChP) + CHANNEL_T *ChP; Ptr to channel structure + +Comments: The receive processor can be started again with sStartRxProcessor(). + This function causes the receive processor to skip over the + stopped channel. It does not stop it from processing other channels. + +Warnings: No context switches are allowed while executing this function. + + Do not leave the receive processor stopped for more than one + character time. + + After calling this function a delay of 4 uS is required to ensure + that the receive processor is no longer processing this channel. +*/ +static void sStopRxProcessor(CHANNEL_T * ChP) +{ + Byte_t R[4]; + + R[0] = ChP->R[0]; + R[1] = ChP->R[1]; + R[2] = 0x0a; + R[3] = ChP->R[3]; + out32(ChP->IndexAddr, R); +} + +/*************************************************************************** +Function: sFlushRxFIFO +Purpose: Flush the Rx FIFO +Call: sFlushRxFIFO(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Return: void +Comments: To prevent data from being enqueued or dequeued in the Tx FIFO + while it is being flushed the receive processor is stopped + and the transmitter is disabled. After these operations a + 4 uS delay is done before clearing the pointers to allow + the receive processor to stop. These items are handled inside + this function. +Warnings: No context switches are allowed while executing this function. +*/ +static void sFlushRxFIFO(CHANNEL_T * ChP) +{ + int i; + Byte_t Ch; /* channel number within AIOP */ + int RxFIFOEnabled; /* 1 if Rx FIFO enabled */ + + if (sGetRxCnt(ChP) == 0) /* Rx FIFO empty */ + return; /* don't need to flush */ + + RxFIFOEnabled = 0; + if (ChP->R[0x32] == 0x08) { /* Rx FIFO is enabled */ + RxFIFOEnabled = 1; + sDisRxFIFO(ChP); /* disable it */ + for (i = 0; i < 2000 / 200; i++) /* delay 2 uS to allow proc to disable FIFO */ + sInB(ChP->IntChan); /* depends on bus i/o timing */ + } + sGetChanStatus(ChP); /* clear any pending Rx errors in chan stat */ + Ch = (Byte_t) sGetChanNum(ChP); + sOutB(ChP->Cmd, Ch | RESRXFCNT); /* apply reset Rx FIFO count */ + sOutB(ChP->Cmd, Ch); /* remove reset Rx FIFO count */ + sOutW((WordIO_t) ChP->IndexAddr, ChP->RxFIFOPtrs); /* clear Rx out ptr */ + sOutW(ChP->IndexData, 0); + sOutW((WordIO_t) ChP->IndexAddr, ChP->RxFIFOPtrs + 2); /* clear Rx in ptr */ + sOutW(ChP->IndexData, 0); + if (RxFIFOEnabled) + sEnRxFIFO(ChP); /* enable Rx FIFO */ +} + +/*************************************************************************** +Function: sFlushTxFIFO +Purpose: Flush the Tx FIFO +Call: sFlushTxFIFO(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Return: void +Comments: To prevent data from being enqueued or dequeued in the Tx FIFO + while it is being flushed the receive processor is stopped + and the transmitter is disabled. After these operations a + 4 uS delay is done before clearing the pointers to allow + the receive processor to stop. These items are handled inside + this function. +Warnings: No context switches are allowed while executing this function. +*/ +static void sFlushTxFIFO(CHANNEL_T * ChP) +{ + int i; + Byte_t Ch; /* channel number within AIOP */ + int TxEnabled; /* 1 if transmitter enabled */ + + if (sGetTxCnt(ChP) == 0) /* Tx FIFO empty */ + return; /* don't need to flush */ + + TxEnabled = 0; + if (ChP->TxControl[3] & TX_ENABLE) { + TxEnabled = 1; + sDisTransmit(ChP); /* disable transmitter */ + } + sStopRxProcessor(ChP); /* stop Rx processor */ + for (i = 0; i < 4000 / 200; i++) /* delay 4 uS to allow proc to stop */ + sInB(ChP->IntChan); /* depends on bus i/o timing */ + Ch = (Byte_t) sGetChanNum(ChP); + sOutB(ChP->Cmd, Ch | RESTXFCNT); /* apply reset Tx FIFO count */ + sOutB(ChP->Cmd, Ch); /* remove reset Tx FIFO count */ + sOutW((WordIO_t) ChP->IndexAddr, ChP->TxFIFOPtrs); /* clear Tx in/out ptrs */ + sOutW(ChP->IndexData, 0); + if (TxEnabled) + sEnTransmit(ChP); /* enable transmitter */ + sStartRxProcessor(ChP); /* restart Rx processor */ +} + +/*************************************************************************** +Function: sWriteTxPrioByte +Purpose: Write a byte of priority transmit data to a channel +Call: sWriteTxPrioByte(ChP,Data) + CHANNEL_T *ChP; Ptr to channel structure + Byte_t Data; The transmit data byte + +Return: int: 1 if the bytes is successfully written, otherwise 0. + +Comments: The priority byte is transmitted before any data in the Tx FIFO. + +Warnings: No context switches are allowed while executing this function. +*/ +static int sWriteTxPrioByte(CHANNEL_T * ChP, Byte_t Data) +{ + Byte_t DWBuf[4]; /* buffer for double word writes */ + Word_t *WordPtr; /* must be far because Win SS != DS */ + register DWordIO_t IndexAddr; + + if (sGetTxCnt(ChP) > 1) { /* write it to Tx priority buffer */ + IndexAddr = ChP->IndexAddr; + sOutW((WordIO_t) IndexAddr, ChP->TxPrioCnt); /* get priority buffer status */ + if (sInB((ByteIO_t) ChP->IndexData) & PRI_PEND) /* priority buffer busy */ + return (0); /* nothing sent */ + + WordPtr = (Word_t *) (&DWBuf[0]); + *WordPtr = ChP->TxPrioBuf; /* data byte address */ + + DWBuf[2] = Data; /* data byte value */ + out32(IndexAddr, DWBuf); /* write it out */ + + *WordPtr = ChP->TxPrioCnt; /* Tx priority count address */ + + DWBuf[2] = PRI_PEND + 1; /* indicate 1 byte pending */ + DWBuf[3] = 0; /* priority buffer pointer */ + out32(IndexAddr, DWBuf); /* write it out */ + } else { /* write it to Tx FIFO */ + + sWriteTxByte(sGetTxRxDataIO(ChP), Data); + } + return (1); /* 1 byte sent */ +} + +/*************************************************************************** +Function: sEnInterrupts +Purpose: Enable one or more interrupts for a channel +Call: sEnInterrupts(ChP,Flags) + CHANNEL_T *ChP; Ptr to channel structure + Word_t Flags: Interrupt enable flags, can be any combination + of the following flags: + TXINT_EN: Interrupt on Tx FIFO empty + RXINT_EN: Interrupt on Rx FIFO at trigger level (see + sSetRxTrigger()) + SRCINT_EN: Interrupt on SRC (Special Rx Condition) + MCINT_EN: Interrupt on modem input change + CHANINT_EN: Allow channel interrupt signal to the AIOP's + Interrupt Channel Register. +Return: void +Comments: If an interrupt enable flag is set in Flags, that interrupt will be + enabled. If an interrupt enable flag is not set in Flags, that + interrupt will not be changed. Interrupts can be disabled with + function sDisInterrupts(). + + This function sets the appropriate bit for the channel in the AIOP's + Interrupt Mask Register if the CHANINT_EN flag is set. This allows + this channel's bit to be set in the AIOP's Interrupt Channel Register. + + Interrupts must also be globally enabled before channel interrupts + will be passed on to the host. This is done with function + sEnGlobalInt(). + + In some cases it may be desirable to disable interrupts globally but + enable channel interrupts. This would allow the global interrupt + status register to be used to determine which AIOPs need service. +*/ +static void sEnInterrupts(CHANNEL_T * ChP, Word_t Flags) +{ + Byte_t Mask; /* Interrupt Mask Register */ + + ChP->RxControl[2] |= + ((Byte_t) Flags & (RXINT_EN | SRCINT_EN | MCINT_EN)); + + out32(ChP->IndexAddr, ChP->RxControl); + + ChP->TxControl[2] |= ((Byte_t) Flags & TXINT_EN); + + out32(ChP->IndexAddr, ChP->TxControl); + + if (Flags & CHANINT_EN) { + Mask = sInB(ChP->IntMask) | sBitMapSetTbl[ChP->ChanNum]; + sOutB(ChP->IntMask, Mask); + } +} + +/*************************************************************************** +Function: sDisInterrupts +Purpose: Disable one or more interrupts for a channel +Call: sDisInterrupts(ChP,Flags) + CHANNEL_T *ChP; Ptr to channel structure + Word_t Flags: Interrupt flags, can be any combination + of the following flags: + TXINT_EN: Interrupt on Tx FIFO empty + RXINT_EN: Interrupt on Rx FIFO at trigger level (see + sSetRxTrigger()) + SRCINT_EN: Interrupt on SRC (Special Rx Condition) + MCINT_EN: Interrupt on modem input change + CHANINT_EN: Disable channel interrupt signal to the + AIOP's Interrupt Channel Register. +Return: void +Comments: If an interrupt flag is set in Flags, that interrupt will be + disabled. If an interrupt flag is not set in Flags, that + interrupt will not be changed. Interrupts can be enabled with + function sEnInterrupts(). + + This function clears the appropriate bit for the channel in the AIOP's + Interrupt Mask Register if the CHANINT_EN flag is set. This blocks + this channel's bit from being set in the AIOP's Interrupt Channel + Register. +*/ +static void sDisInterrupts(CHANNEL_T * ChP, Word_t Flags) +{ + Byte_t Mask; /* Interrupt Mask Register */ + + ChP->RxControl[2] &= + ~((Byte_t) Flags & (RXINT_EN | SRCINT_EN | MCINT_EN)); + out32(ChP->IndexAddr, ChP->RxControl); + ChP->TxControl[2] &= ~((Byte_t) Flags & TXINT_EN); + out32(ChP->IndexAddr, ChP->TxControl); + + if (Flags & CHANINT_EN) { + Mask = sInB(ChP->IntMask) & sBitMapClrTbl[ChP->ChanNum]; + sOutB(ChP->IntMask, Mask); + } +} + +static void sSetInterfaceMode(CHANNEL_T * ChP, Byte_t mode) +{ + sOutB(ChP->CtlP->AiopIO[2], (mode & 0x18) | ChP->ChanNum); +} + +/* + * Not an official SSCI function, but how to reset RocketModems. + * ISA bus version + */ +static void sModemReset(CONTROLLER_T * CtlP, int chan, int on) +{ + ByteIO_t addr; + Byte_t val; + + addr = CtlP->AiopIO[0] + 0x400; + val = sInB(CtlP->MReg3IO); + /* if AIOP[1] is not enabled, enable it */ + if ((val & 2) == 0) { + val = sInB(CtlP->MReg2IO); + sOutB(CtlP->MReg2IO, (val & 0xfc) | (1 & 0x03)); + sOutB(CtlP->MBaseIO, (unsigned char) (addr >> 6)); + } + + sEnAiop(CtlP, 1); + if (!on) + addr += 8; + sOutB(addr + chan, 0); /* apply or remove reset */ + sDisAiop(CtlP, 1); +} + +/* + * Not an official SSCI function, but how to reset RocketModems. + * PCI bus version + */ +static void sPCIModemReset(CONTROLLER_T * CtlP, int chan, int on) +{ + ByteIO_t addr; + + addr = CtlP->AiopIO[0] + 0x40; /* 2nd AIOP */ + if (!on) + addr += 8; + sOutB(addr + chan, 0); /* apply or remove reset */ +} + +/* Resets the speaker controller on RocketModem II and III devices */ +static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model) +{ + ByteIO_t addr; + + /* RocketModem II speaker control is at the 8th port location of offset 0x40 */ + if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) { + addr = CtlP->AiopIO[0] + 0x4F; + sOutB(addr, 0); + } + + /* RocketModem III speaker control is at the 1st port location of offset 0x80 */ + if ((model == MODEL_UPCI_RM3_8PORT) + || (model == MODEL_UPCI_RM3_4PORT)) { + addr = CtlP->AiopIO[0] + 0x88; + sOutB(addr, 0); + } +} + +/* Returns the line number given the controller (board), aiop and channel number */ +static unsigned char GetLineNumber(int ctrl, int aiop, int ch) +{ + return lineNumbers[(ctrl << 5) | (aiop << 3) | ch]; +} + +/* + * Stores the line number associated with a given controller (board), aiop + * and channel number. + * Returns: The line number assigned + */ +static unsigned char SetLineNumber(int ctrl, int aiop, int ch) +{ + lineNumbers[(ctrl << 5) | (aiop << 3) | ch] = nextLineNumber++; + return (nextLineNumber - 1); +} diff --git a/drivers/tty/rocket.h b/drivers/tty/rocket.h new file mode 100644 index 00000000000..ec863f35f1a --- /dev/null +++ b/drivers/tty/rocket.h @@ -0,0 +1,111 @@ +/* + * rocket.h --- the exported interface of the rocket driver to its configuration program. + * + * Written by Theodore Ts'o, Copyright 1997. + * Copyright 1997 Comtrol Corporation. + * + */ + +/* Model Information Struct */ +typedef struct { + unsigned long model; + char modelString[80]; + unsigned long numPorts; + int loadrm2; + int startingPortNumber; +} rocketModel_t; + +struct rocket_config { + int line; + int flags; + int closing_wait; + int close_delay; + int port; + int reserved[32]; +}; + +struct rocket_ports { + int tty_major; + int callout_major; + rocketModel_t rocketModel[8]; +}; + +struct rocket_version { + char rocket_version[32]; + char rocket_date[32]; + char reserved[64]; +}; + +/* + * Rocketport flags + */ +/*#define ROCKET_CALLOUT_NOHUP 0x00000001 */ +#define ROCKET_FORCE_CD 0x00000002 +#define ROCKET_HUP_NOTIFY 0x00000004 +#define ROCKET_SPLIT_TERMIOS 0x00000008 +#define ROCKET_SPD_MASK 0x00000070 +#define ROCKET_SPD_HI 0x00000010 /* Use 56000 instead of 38400 bps */ +#define ROCKET_SPD_VHI 0x00000020 /* Use 115200 instead of 38400 bps */ +#define ROCKET_SPD_SHI 0x00000030 /* Use 230400 instead of 38400 bps */ +#define ROCKET_SPD_WARP 0x00000040 /* Use 460800 instead of 38400 bps */ +#define ROCKET_SAK 0x00000080 +#define ROCKET_SESSION_LOCKOUT 0x00000100 +#define ROCKET_PGRP_LOCKOUT 0x00000200 +#define ROCKET_RTS_TOGGLE 0x00000400 +#define ROCKET_MODE_MASK 0x00003000 +#define ROCKET_MODE_RS232 0x00000000 +#define ROCKET_MODE_RS485 0x00001000 +#define ROCKET_MODE_RS422 0x00002000 +#define ROCKET_FLAGS 0x00003FFF + +#define ROCKET_USR_MASK 0x0071 /* Legal flags that non-privileged + * users can set or reset */ + +/* + * For closing_wait and closing_wait2 + */ +#define ROCKET_CLOSING_WAIT_NONE ASYNC_CLOSING_WAIT_NONE +#define ROCKET_CLOSING_WAIT_INF ASYNC_CLOSING_WAIT_INF + +/* + * Rocketport ioctls -- "RP" + */ +#define RCKP_GET_STRUCT 0x00525001 +#define RCKP_GET_CONFIG 0x00525002 +#define RCKP_SET_CONFIG 0x00525003 +#define RCKP_GET_PORTS 0x00525004 +#define RCKP_RESET_RM2 0x00525005 +#define RCKP_GET_VERSION 0x00525006 + +/* Rocketport Models */ +#define MODEL_RP32INTF 0x0001 /* RP 32 port w/external I/F */ +#define MODEL_RP8INTF 0x0002 /* RP 8 port w/external I/F */ +#define MODEL_RP16INTF 0x0003 /* RP 16 port w/external I/F */ +#define MODEL_RP8OCTA 0x0005 /* RP 8 port w/octa cable */ +#define MODEL_RP4QUAD 0x0004 /* RP 4 port w/quad cable */ +#define MODEL_RP8J 0x0006 /* RP 8 port w/RJ11 connectors */ +#define MODEL_RP4J 0x0007 /* RP 4 port w/RJ45 connectors */ +#define MODEL_RP8SNI 0x0008 /* RP 8 port w/ DB78 SNI connector */ +#define MODEL_RP16SNI 0x0009 /* RP 16 port w/ DB78 SNI connector */ +#define MODEL_RPP4 0x000A /* RP Plus 4 port */ +#define MODEL_RPP8 0x000B /* RP Plus 8 port */ +#define MODEL_RP2_232 0x000E /* RP Plus 2 port RS232 */ +#define MODEL_RP2_422 0x000F /* RP Plus 2 port RS232 */ + +/* Rocketmodem II Models */ +#define MODEL_RP6M 0x000C /* RM 6 port */ +#define MODEL_RP4M 0x000D /* RM 4 port */ + +/* Universal PCI boards */ +#define MODEL_UPCI_RP32INTF 0x0801 /* RP UPCI 32 port w/external I/F */ +#define MODEL_UPCI_RP8INTF 0x0802 /* RP UPCI 8 port w/external I/F */ +#define MODEL_UPCI_RP16INTF 0x0803 /* RP UPCI 16 port w/external I/F */ +#define MODEL_UPCI_RP8OCTA 0x0805 /* RP UPCI 8 port w/octa cable */ +#define MODEL_UPCI_RM3_8PORT 0x080C /* RP UPCI Rocketmodem III 8 port */ +#define MODEL_UPCI_RM3_4PORT 0x080C /* RP UPCI Rocketmodem III 4 port */ + +/* Compact PCI 16 port */ +#define MODEL_CPCI_RP16INTF 0x0903 /* RP Compact PCI 16 port w/external I/F */ + +/* All ISA boards */ +#define MODEL_ISA 0x1000 diff --git a/drivers/tty/rocket_int.h b/drivers/tty/rocket_int.h new file mode 100644 index 00000000000..67e0f1e778a --- /dev/null +++ b/drivers/tty/rocket_int.h @@ -0,0 +1,1214 @@ +/* + * rocket_int.h --- internal header file for rocket.c + * + * Written by Theodore Ts'o, Copyright 1997. + * Copyright 1997 Comtrol Corporation. + * + */ + +/* + * Definition of the types in rcktpt_type + */ +#define ROCKET_TYPE_NORMAL 0 +#define ROCKET_TYPE_MODEM 1 +#define ROCKET_TYPE_MODEMII 2 +#define ROCKET_TYPE_MODEMIII 3 +#define ROCKET_TYPE_PC104 4 + +#include <linux/mutex.h> + +#include <asm/io.h> +#include <asm/byteorder.h> + +typedef unsigned char Byte_t; +typedef unsigned int ByteIO_t; + +typedef unsigned int Word_t; +typedef unsigned int WordIO_t; + +typedef unsigned int DWordIO_t; + +/* + * Note! Normally the Linux I/O macros already take care of + * byte-swapping the I/O instructions. However, all accesses using + * sOutDW aren't really 32-bit accesses, but should be handled in byte + * order. Hence the use of the cpu_to_le32() macro to byte-swap + * things to no-op the byte swapping done by the big-endian outl() + * instruction. + */ + +static inline void sOutB(unsigned short port, unsigned char value) +{ +#ifdef ROCKET_DEBUG_IO + printk(KERN_DEBUG "sOutB(%x, %x)...\n", port, value); +#endif + outb_p(value, port); +} + +static inline void sOutW(unsigned short port, unsigned short value) +{ +#ifdef ROCKET_DEBUG_IO + printk(KERN_DEBUG "sOutW(%x, %x)...\n", port, value); +#endif + outw_p(value, port); +} + +static inline void out32(unsigned short port, Byte_t *p) +{ + u32 value = get_unaligned_le32(p); +#ifdef ROCKET_DEBUG_IO + printk(KERN_DEBUG "out32(%x, %lx)...\n", port, value); +#endif + outl_p(value, port); +} + +static inline unsigned char sInB(unsigned short port) +{ + return inb_p(port); +} + +static inline unsigned short sInW(unsigned short port) +{ + return inw_p(port); +} + +/* This is used to move arrays of bytes so byte swapping isn't appropriate. */ +#define sOutStrW(port, addr, count) if (count) outsw(port, addr, count) +#define sInStrW(port, addr, count) if (count) insw(port, addr, count) + +#define CTL_SIZE 8 +#define AIOP_CTL_SIZE 4 +#define CHAN_AIOP_SIZE 8 +#define MAX_PORTS_PER_AIOP 8 +#define MAX_AIOPS_PER_BOARD 4 +#define MAX_PORTS_PER_BOARD 32 + +/* Bus type ID */ +#define isISA 0 +#define isPCI 1 +#define isMC 2 + +/* Controller ID numbers */ +#define CTLID_NULL -1 /* no controller exists */ +#define CTLID_0001 0x0001 /* controller release 1 */ + +/* AIOP ID numbers, identifies AIOP type implementing channel */ +#define AIOPID_NULL -1 /* no AIOP or channel exists */ +#define AIOPID_0001 0x0001 /* AIOP release 1 */ + +/************************************************************************ + Global Register Offsets - Direct Access - Fixed values +************************************************************************/ + +#define _CMD_REG 0x38 /* Command Register 8 Write */ +#define _INT_CHAN 0x39 /* Interrupt Channel Register 8 Read */ +#define _INT_MASK 0x3A /* Interrupt Mask Register 8 Read / Write */ +#define _UNUSED 0x3B /* Unused 8 */ +#define _INDX_ADDR 0x3C /* Index Register Address 16 Write */ +#define _INDX_DATA 0x3E /* Index Register Data 8/16 Read / Write */ + +/************************************************************************ + Channel Register Offsets for 1st channel in AIOP - Direct Access +************************************************************************/ +#define _TD0 0x00 /* Transmit Data 16 Write */ +#define _RD0 0x00 /* Receive Data 16 Read */ +#define _CHN_STAT0 0x20 /* Channel Status 8/16 Read / Write */ +#define _FIFO_CNT0 0x10 /* Transmit/Receive FIFO Count 16 Read */ +#define _INT_ID0 0x30 /* Interrupt Identification 8 Read */ + +/************************************************************************ + Tx Control Register Offsets - Indexed - External - Fixed +************************************************************************/ +#define _TX_ENBLS 0x980 /* Tx Processor Enables Register 8 Read / Write */ +#define _TXCMP1 0x988 /* Transmit Compare Value #1 8 Read / Write */ +#define _TXCMP2 0x989 /* Transmit Compare Value #2 8 Read / Write */ +#define _TXREP1B1 0x98A /* Tx Replace Value #1 - Byte 1 8 Read / Write */ +#define _TXREP1B2 0x98B /* Tx Replace Value #1 - Byte 2 8 Read / Write */ +#define _TXREP2 0x98C /* Transmit Replace Value #2 8 Read / Write */ + +/************************************************************************ +Memory Controller Register Offsets - Indexed - External - Fixed +************************************************************************/ +#define _RX_FIFO 0x000 /* Rx FIFO */ +#define _TX_FIFO 0x800 /* Tx FIFO */ +#define _RXF_OUTP 0x990 /* Rx FIFO OUT pointer 16 Read / Write */ +#define _RXF_INP 0x992 /* Rx FIFO IN pointer 16 Read / Write */ +#define _TXF_OUTP 0x994 /* Tx FIFO OUT pointer 8 Read / Write */ +#define _TXF_INP 0x995 /* Tx FIFO IN pointer 8 Read / Write */ +#define _TXP_CNT 0x996 /* Tx Priority Count 8 Read / Write */ +#define _TXP_PNTR 0x997 /* Tx Priority Pointer 8 Read / Write */ + +#define PRI_PEND 0x80 /* Priority data pending (bit7, Tx pri cnt) */ +#define TXFIFO_SIZE 255 /* size of Tx FIFO */ +#define RXFIFO_SIZE 1023 /* size of Rx FIFO */ + +/************************************************************************ +Tx Priority Buffer - Indexed - External - Fixed +************************************************************************/ +#define _TXP_BUF 0x9C0 /* Tx Priority Buffer 32 Bytes Read / Write */ +#define TXP_SIZE 0x20 /* 32 bytes */ + +/************************************************************************ +Channel Register Offsets - Indexed - Internal - Fixed +************************************************************************/ + +#define _TX_CTRL 0xFF0 /* Transmit Control 16 Write */ +#define _RX_CTRL 0xFF2 /* Receive Control 8 Write */ +#define _BAUD 0xFF4 /* Baud Rate 16 Write */ +#define _CLK_PRE 0xFF6 /* Clock Prescaler 8 Write */ + +#define STMBREAK 0x08 /* BREAK */ +#define STMFRAME 0x04 /* framing error */ +#define STMRCVROVR 0x02 /* receiver over run error */ +#define STMPARITY 0x01 /* parity error */ +#define STMERROR (STMBREAK | STMFRAME | STMPARITY) +#define STMBREAKH 0x800 /* BREAK */ +#define STMFRAMEH 0x400 /* framing error */ +#define STMRCVROVRH 0x200 /* receiver over run error */ +#define STMPARITYH 0x100 /* parity error */ +#define STMERRORH (STMBREAKH | STMFRAMEH | STMPARITYH) + +#define CTS_ACT 0x20 /* CTS input asserted */ +#define DSR_ACT 0x10 /* DSR input asserted */ +#define CD_ACT 0x08 /* CD input asserted */ +#define TXFIFOMT 0x04 /* Tx FIFO is empty */ +#define TXSHRMT 0x02 /* Tx shift register is empty */ +#define RDA 0x01 /* Rx data available */ +#define DRAINED (TXFIFOMT | TXSHRMT) /* indicates Tx is drained */ + +#define STATMODE 0x8000 /* status mode enable bit */ +#define RXFOVERFL 0x2000 /* receive FIFO overflow */ +#define RX2MATCH 0x1000 /* receive compare byte 2 match */ +#define RX1MATCH 0x0800 /* receive compare byte 1 match */ +#define RXBREAK 0x0400 /* received BREAK */ +#define RXFRAME 0x0200 /* received framing error */ +#define RXPARITY 0x0100 /* received parity error */ +#define STATERROR (RXBREAK | RXFRAME | RXPARITY) + +#define CTSFC_EN 0x80 /* CTS flow control enable bit */ +#define RTSTOG_EN 0x40 /* RTS toggle enable bit */ +#define TXINT_EN 0x10 /* transmit interrupt enable */ +#define STOP2 0x08 /* enable 2 stop bits (0 = 1 stop) */ +#define PARITY_EN 0x04 /* enable parity (0 = no parity) */ +#define EVEN_PAR 0x02 /* even parity (0 = odd parity) */ +#define DATA8BIT 0x01 /* 8 bit data (0 = 7 bit data) */ + +#define SETBREAK 0x10 /* send break condition (must clear) */ +#define LOCALLOOP 0x08 /* local loopback set for test */ +#define SET_DTR 0x04 /* assert DTR */ +#define SET_RTS 0x02 /* assert RTS */ +#define TX_ENABLE 0x01 /* enable transmitter */ + +#define RTSFC_EN 0x40 /* RTS flow control enable */ +#define RXPROC_EN 0x20 /* receive processor enable */ +#define TRIG_NO 0x00 /* Rx FIFO trigger level 0 (no trigger) */ +#define TRIG_1 0x08 /* trigger level 1 char */ +#define TRIG_1_2 0x10 /* trigger level 1/2 */ +#define TRIG_7_8 0x18 /* trigger level 7/8 */ +#define TRIG_MASK 0x18 /* trigger level mask */ +#define SRCINT_EN 0x04 /* special Rx condition interrupt enable */ +#define RXINT_EN 0x02 /* Rx interrupt enable */ +#define MCINT_EN 0x01 /* modem change interrupt enable */ + +#define RXF_TRIG 0x20 /* Rx FIFO trigger level interrupt */ +#define TXFIFO_MT 0x10 /* Tx FIFO empty interrupt */ +#define SRC_INT 0x08 /* special receive condition interrupt */ +#define DELTA_CD 0x04 /* CD change interrupt */ +#define DELTA_CTS 0x02 /* CTS change interrupt */ +#define DELTA_DSR 0x01 /* DSR change interrupt */ + +#define REP1W2_EN 0x10 /* replace byte 1 with 2 bytes enable */ +#define IGN2_EN 0x08 /* ignore byte 2 enable */ +#define IGN1_EN 0x04 /* ignore byte 1 enable */ +#define COMP2_EN 0x02 /* compare byte 2 enable */ +#define COMP1_EN 0x01 /* compare byte 1 enable */ + +#define RESET_ALL 0x80 /* reset AIOP (all channels) */ +#define TXOVERIDE 0x40 /* Transmit software off override */ +#define RESETUART 0x20 /* reset channel's UART */ +#define RESTXFCNT 0x10 /* reset channel's Tx FIFO count register */ +#define RESRXFCNT 0x08 /* reset channel's Rx FIFO count register */ + +#define INTSTAT0 0x01 /* AIOP 0 interrupt status */ +#define INTSTAT1 0x02 /* AIOP 1 interrupt status */ +#define INTSTAT2 0x04 /* AIOP 2 interrupt status */ +#define INTSTAT3 0x08 /* AIOP 3 interrupt status */ + +#define INTR_EN 0x08 /* allow interrupts to host */ +#define INT_STROB 0x04 /* strobe and clear interrupt line (EOI) */ + +/************************************************************************** + MUDBAC remapped for PCI +**************************************************************************/ + +#define _CFG_INT_PCI 0x40 +#define _PCI_INT_FUNC 0x3A + +#define PCI_STROB 0x2000 /* bit 13 of int aiop register */ +#define INTR_EN_PCI 0x0010 /* allow interrupts to host */ + +/* + * Definitions for Universal PCI board registers + */ +#define _PCI_9030_INT_CTRL 0x4c /* Offsets from BAR1 */ +#define _PCI_9030_GPIO_CTRL 0x54 +#define PCI_INT_CTRL_AIOP 0x0001 +#define PCI_GPIO_CTRL_8PORT 0x4000 +#define _PCI_9030_RING_IND 0xc0 /* Offsets from BAR1 */ + +#define CHAN3_EN 0x08 /* enable AIOP 3 */ +#define CHAN2_EN 0x04 /* enable AIOP 2 */ +#define CHAN1_EN 0x02 /* enable AIOP 1 */ +#define CHAN0_EN 0x01 /* enable AIOP 0 */ +#define FREQ_DIS 0x00 +#define FREQ_274HZ 0x60 +#define FREQ_137HZ 0x50 +#define FREQ_69HZ 0x40 +#define FREQ_34HZ 0x30 +#define FREQ_17HZ 0x20 +#define FREQ_9HZ 0x10 +#define PERIODIC_ONLY 0x80 /* only PERIODIC interrupt */ + +#define CHANINT_EN 0x0100 /* flags to enable/disable channel ints */ + +#define RDATASIZE 72 +#define RREGDATASIZE 52 + +/* + * AIOP interrupt bits for ISA/PCI boards and UPCI boards. + */ +#define AIOP_INTR_BIT_0 0x0001 +#define AIOP_INTR_BIT_1 0x0002 +#define AIOP_INTR_BIT_2 0x0004 +#define AIOP_INTR_BIT_3 0x0008 + +#define AIOP_INTR_BITS ( \ + AIOP_INTR_BIT_0 \ + | AIOP_INTR_BIT_1 \ + | AIOP_INTR_BIT_2 \ + | AIOP_INTR_BIT_3) + +#define UPCI_AIOP_INTR_BIT_0 0x0004 +#define UPCI_AIOP_INTR_BIT_1 0x0020 +#define UPCI_AIOP_INTR_BIT_2 0x0100 +#define UPCI_AIOP_INTR_BIT_3 0x0800 + +#define UPCI_AIOP_INTR_BITS ( \ + UPCI_AIOP_INTR_BIT_0 \ + | UPCI_AIOP_INTR_BIT_1 \ + | UPCI_AIOP_INTR_BIT_2 \ + | UPCI_AIOP_INTR_BIT_3) + +/* Controller level information structure */ +typedef struct { + int CtlID; + int CtlNum; + int BusType; + int boardType; + int isUPCI; + WordIO_t PCIIO; + WordIO_t PCIIO2; + ByteIO_t MBaseIO; + ByteIO_t MReg1IO; + ByteIO_t MReg2IO; + ByteIO_t MReg3IO; + Byte_t MReg2; + Byte_t MReg3; + int NumAiop; + int AltChanRingIndicator; + ByteIO_t UPCIRingInd; + WordIO_t AiopIO[AIOP_CTL_SIZE]; + ByteIO_t AiopIntChanIO[AIOP_CTL_SIZE]; + int AiopID[AIOP_CTL_SIZE]; + int AiopNumChan[AIOP_CTL_SIZE]; + Word_t *AiopIntrBits; +} CONTROLLER_T; + +typedef CONTROLLER_T CONTROLLER_t; + +/* Channel level information structure */ +typedef struct { + CONTROLLER_T *CtlP; + int AiopNum; + int ChanID; + int ChanNum; + int rtsToggle; + + ByteIO_t Cmd; + ByteIO_t IntChan; + ByteIO_t IntMask; + DWordIO_t IndexAddr; + WordIO_t IndexData; + + WordIO_t TxRxData; + WordIO_t ChanStat; + WordIO_t TxRxCount; + ByteIO_t IntID; + + Word_t TxFIFO; + Word_t TxFIFOPtrs; + Word_t RxFIFO; + Word_t RxFIFOPtrs; + Word_t TxPrioCnt; + Word_t TxPrioPtr; + Word_t TxPrioBuf; + + Byte_t R[RREGDATASIZE]; + + Byte_t BaudDiv[4]; + Byte_t TxControl[4]; + Byte_t RxControl[4]; + Byte_t TxEnables[4]; + Byte_t TxCompare[4]; + Byte_t TxReplace1[4]; + Byte_t TxReplace2[4]; +} CHANNEL_T; + +typedef CHANNEL_T CHANNEL_t; +typedef CHANNEL_T *CHANPTR_T; + +#define InterfaceModeRS232 0x00 +#define InterfaceModeRS422 0x08 +#define InterfaceModeRS485 0x10 +#define InterfaceModeRS232T 0x18 + +/*************************************************************************** +Function: sClrBreak +Purpose: Stop sending a transmit BREAK signal +Call: sClrBreak(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sClrBreak(ChP) \ +do { \ + (ChP)->TxControl[3] &= ~SETBREAK; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sClrDTR +Purpose: Clr the DTR output +Call: sClrDTR(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sClrDTR(ChP) \ +do { \ + (ChP)->TxControl[3] &= ~SET_DTR; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sClrRTS +Purpose: Clr the RTS output +Call: sClrRTS(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sClrRTS(ChP) \ +do { \ + if ((ChP)->rtsToggle) break; \ + (ChP)->TxControl[3] &= ~SET_RTS; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sClrTxXOFF +Purpose: Clear any existing transmit software flow control off condition +Call: sClrTxXOFF(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sClrTxXOFF(ChP) \ +do { \ + sOutB((ChP)->Cmd,TXOVERIDE | (Byte_t)(ChP)->ChanNum); \ + sOutB((ChP)->Cmd,(Byte_t)(ChP)->ChanNum); \ +} while (0) + +/*************************************************************************** +Function: sCtlNumToCtlPtr +Purpose: Convert a controller number to controller structure pointer +Call: sCtlNumToCtlPtr(CtlNum) + int CtlNum; Controller number +Return: CONTROLLER_T *: Ptr to controller structure +*/ +#define sCtlNumToCtlPtr(CTLNUM) &sController[CTLNUM] + +/*************************************************************************** +Function: sControllerEOI +Purpose: Strobe the MUDBAC's End Of Interrupt bit. +Call: sControllerEOI(CtlP) + CONTROLLER_T *CtlP; Ptr to controller structure +*/ +#define sControllerEOI(CTLP) sOutB((CTLP)->MReg2IO,(CTLP)->MReg2 | INT_STROB) + +/*************************************************************************** +Function: sPCIControllerEOI +Purpose: Strobe the PCI End Of Interrupt bit. + For the UPCI boards, toggle the AIOP interrupt enable bit + (this was taken from the Windows driver). +Call: sPCIControllerEOI(CtlP) + CONTROLLER_T *CtlP; Ptr to controller structure +*/ +#define sPCIControllerEOI(CTLP) \ +do { \ + if ((CTLP)->isUPCI) { \ + Word_t w = sInW((CTLP)->PCIIO); \ + sOutW((CTLP)->PCIIO, (w ^ PCI_INT_CTRL_AIOP)); \ + sOutW((CTLP)->PCIIO, w); \ + } \ + else { \ + sOutW((CTLP)->PCIIO, PCI_STROB); \ + } \ +} while (0) + +/*************************************************************************** +Function: sDisAiop +Purpose: Disable I/O access to an AIOP +Call: sDisAiop(CltP) + CONTROLLER_T *CtlP; Ptr to controller structure + int AiopNum; Number of AIOP on controller +*/ +#define sDisAiop(CTLP,AIOPNUM) \ +do { \ + (CTLP)->MReg3 &= sBitMapClrTbl[AIOPNUM]; \ + sOutB((CTLP)->MReg3IO,(CTLP)->MReg3); \ +} while (0) + +/*************************************************************************** +Function: sDisCTSFlowCtl +Purpose: Disable output flow control using CTS +Call: sDisCTSFlowCtl(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sDisCTSFlowCtl(ChP) \ +do { \ + (ChP)->TxControl[2] &= ~CTSFC_EN; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sDisIXANY +Purpose: Disable IXANY Software Flow Control +Call: sDisIXANY(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sDisIXANY(ChP) \ +do { \ + (ChP)->R[0x0e] = 0x86; \ + out32((ChP)->IndexAddr,&(ChP)->R[0x0c]); \ +} while (0) + +/*************************************************************************** +Function: DisParity +Purpose: Disable parity +Call: sDisParity(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Comments: Function sSetParity() can be used in place of functions sEnParity(), + sDisParity(), sSetOddParity(), and sSetEvenParity(). +*/ +#define sDisParity(ChP) \ +do { \ + (ChP)->TxControl[2] &= ~PARITY_EN; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sDisRTSToggle +Purpose: Disable RTS toggle +Call: sDisRTSToggle(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sDisRTSToggle(ChP) \ +do { \ + (ChP)->TxControl[2] &= ~RTSTOG_EN; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ + (ChP)->rtsToggle = 0; \ +} while (0) + +/*************************************************************************** +Function: sDisRxFIFO +Purpose: Disable Rx FIFO +Call: sDisRxFIFO(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sDisRxFIFO(ChP) \ +do { \ + (ChP)->R[0x32] = 0x0a; \ + out32((ChP)->IndexAddr,&(ChP)->R[0x30]); \ +} while (0) + +/*************************************************************************** +Function: sDisRxStatusMode +Purpose: Disable the Rx status mode +Call: sDisRxStatusMode(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Comments: This takes the channel out of the receive status mode. All + subsequent reads of receive data using sReadRxWord() will return + two data bytes. +*/ +#define sDisRxStatusMode(ChP) sOutW((ChP)->ChanStat,0) + +/*************************************************************************** +Function: sDisTransmit +Purpose: Disable transmit +Call: sDisTransmit(ChP) + CHANNEL_T *ChP; Ptr to channel structure + This disables movement of Tx data from the Tx FIFO into the 1 byte + Tx buffer. Therefore there could be up to a 2 byte latency + between the time sDisTransmit() is called and the transmit buffer + and transmit shift register going completely empty. +*/ +#define sDisTransmit(ChP) \ +do { \ + (ChP)->TxControl[3] &= ~TX_ENABLE; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sDisTxSoftFlowCtl +Purpose: Disable Tx Software Flow Control +Call: sDisTxSoftFlowCtl(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sDisTxSoftFlowCtl(ChP) \ +do { \ + (ChP)->R[0x06] = 0x8a; \ + out32((ChP)->IndexAddr,&(ChP)->R[0x04]); \ +} while (0) + +/*************************************************************************** +Function: sEnAiop +Purpose: Enable I/O access to an AIOP +Call: sEnAiop(CltP) + CONTROLLER_T *CtlP; Ptr to controller structure + int AiopNum; Number of AIOP on controller +*/ +#define sEnAiop(CTLP,AIOPNUM) \ +do { \ + (CTLP)->MReg3 |= sBitMapSetTbl[AIOPNUM]; \ + sOutB((CTLP)->MReg3IO,(CTLP)->MReg3); \ +} while (0) + +/*************************************************************************** +Function: sEnCTSFlowCtl +Purpose: Enable output flow control using CTS +Call: sEnCTSFlowCtl(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sEnCTSFlowCtl(ChP) \ +do { \ + (ChP)->TxControl[2] |= CTSFC_EN; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sEnIXANY +Purpose: Enable IXANY Software Flow Control +Call: sEnIXANY(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sEnIXANY(ChP) \ +do { \ + (ChP)->R[0x0e] = 0x21; \ + out32((ChP)->IndexAddr,&(ChP)->R[0x0c]); \ +} while (0) + +/*************************************************************************** +Function: EnParity +Purpose: Enable parity +Call: sEnParity(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Comments: Function sSetParity() can be used in place of functions sEnParity(), + sDisParity(), sSetOddParity(), and sSetEvenParity(). + +Warnings: Before enabling parity odd or even parity should be chosen using + functions sSetOddParity() or sSetEvenParity(). +*/ +#define sEnParity(ChP) \ +do { \ + (ChP)->TxControl[2] |= PARITY_EN; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sEnRTSToggle +Purpose: Enable RTS toggle +Call: sEnRTSToggle(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Comments: This function will disable RTS flow control and clear the RTS + line to allow operation of RTS toggle. +*/ +#define sEnRTSToggle(ChP) \ +do { \ + (ChP)->RxControl[2] &= ~RTSFC_EN; \ + out32((ChP)->IndexAddr,(ChP)->RxControl); \ + (ChP)->TxControl[2] |= RTSTOG_EN; \ + (ChP)->TxControl[3] &= ~SET_RTS; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ + (ChP)->rtsToggle = 1; \ +} while (0) + +/*************************************************************************** +Function: sEnRxFIFO +Purpose: Enable Rx FIFO +Call: sEnRxFIFO(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sEnRxFIFO(ChP) \ +do { \ + (ChP)->R[0x32] = 0x08; \ + out32((ChP)->IndexAddr,&(ChP)->R[0x30]); \ +} while (0) + +/*************************************************************************** +Function: sEnRxProcessor +Purpose: Enable the receive processor +Call: sEnRxProcessor(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Comments: This function is used to start the receive processor. When + the channel is in the reset state the receive processor is not + running. This is done to prevent the receive processor from + executing invalid microcode instructions prior to the + downloading of the microcode. + +Warnings: This function must be called after valid microcode has been + downloaded to the AIOP, and it must not be called before the + microcode has been downloaded. +*/ +#define sEnRxProcessor(ChP) \ +do { \ + (ChP)->RxControl[2] |= RXPROC_EN; \ + out32((ChP)->IndexAddr,(ChP)->RxControl); \ +} while (0) + +/*************************************************************************** +Function: sEnRxStatusMode +Purpose: Enable the Rx status mode +Call: sEnRxStatusMode(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Comments: This places the channel in the receive status mode. All subsequent + reads of receive data using sReadRxWord() will return a data byte + in the low word and a status byte in the high word. + +*/ +#define sEnRxStatusMode(ChP) sOutW((ChP)->ChanStat,STATMODE) + +/*************************************************************************** +Function: sEnTransmit +Purpose: Enable transmit +Call: sEnTransmit(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sEnTransmit(ChP) \ +do { \ + (ChP)->TxControl[3] |= TX_ENABLE; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sEnTxSoftFlowCtl +Purpose: Enable Tx Software Flow Control +Call: sEnTxSoftFlowCtl(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sEnTxSoftFlowCtl(ChP) \ +do { \ + (ChP)->R[0x06] = 0xc5; \ + out32((ChP)->IndexAddr,&(ChP)->R[0x04]); \ +} while (0) + +/*************************************************************************** +Function: sGetAiopIntStatus +Purpose: Get the AIOP interrupt status +Call: sGetAiopIntStatus(CtlP,AiopNum) + CONTROLLER_T *CtlP; Ptr to controller structure + int AiopNum; AIOP number +Return: Byte_t: The AIOP interrupt status. Bits 0 through 7 + represent channels 0 through 7 respectively. If a + bit is set that channel is interrupting. +*/ +#define sGetAiopIntStatus(CTLP,AIOPNUM) sInB((CTLP)->AiopIntChanIO[AIOPNUM]) + +/*************************************************************************** +Function: sGetAiopNumChan +Purpose: Get the number of channels supported by an AIOP +Call: sGetAiopNumChan(CtlP,AiopNum) + CONTROLLER_T *CtlP; Ptr to controller structure + int AiopNum; AIOP number +Return: int: The number of channels supported by the AIOP +*/ +#define sGetAiopNumChan(CTLP,AIOPNUM) (CTLP)->AiopNumChan[AIOPNUM] + +/*************************************************************************** +Function: sGetChanIntID +Purpose: Get a channel's interrupt identification byte +Call: sGetChanIntID(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Return: Byte_t: The channel interrupt ID. Can be any + combination of the following flags: + RXF_TRIG: Rx FIFO trigger level interrupt + TXFIFO_MT: Tx FIFO empty interrupt + SRC_INT: Special receive condition interrupt + DELTA_CD: CD change interrupt + DELTA_CTS: CTS change interrupt + DELTA_DSR: DSR change interrupt +*/ +#define sGetChanIntID(ChP) (sInB((ChP)->IntID) & (RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR)) + +/*************************************************************************** +Function: sGetChanNum +Purpose: Get the number of a channel within an AIOP +Call: sGetChanNum(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Return: int: Channel number within AIOP, or NULLCHAN if channel does + not exist. +*/ +#define sGetChanNum(ChP) (ChP)->ChanNum + +/*************************************************************************** +Function: sGetChanStatus +Purpose: Get the channel status +Call: sGetChanStatus(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Return: Word_t: The channel status. Can be any combination of + the following flags: + LOW BYTE FLAGS + CTS_ACT: CTS input asserted + DSR_ACT: DSR input asserted + CD_ACT: CD input asserted + TXFIFOMT: Tx FIFO is empty + TXSHRMT: Tx shift register is empty + RDA: Rx data available + + HIGH BYTE FLAGS + STATMODE: status mode enable bit + RXFOVERFL: receive FIFO overflow + RX2MATCH: receive compare byte 2 match + RX1MATCH: receive compare byte 1 match + RXBREAK: received BREAK + RXFRAME: received framing error + RXPARITY: received parity error +Warnings: This function will clear the high byte flags in the Channel + Status Register. +*/ +#define sGetChanStatus(ChP) sInW((ChP)->ChanStat) + +/*************************************************************************** +Function: sGetChanStatusLo +Purpose: Get the low byte only of the channel status +Call: sGetChanStatusLo(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Return: Byte_t: The channel status low byte. Can be any combination + of the following flags: + CTS_ACT: CTS input asserted + DSR_ACT: DSR input asserted + CD_ACT: CD input asserted + TXFIFOMT: Tx FIFO is empty + TXSHRMT: Tx shift register is empty + RDA: Rx data available +*/ +#define sGetChanStatusLo(ChP) sInB((ByteIO_t)(ChP)->ChanStat) + +/********************************************************************** + * Get RI status of channel + * Defined as a function in rocket.c -aes + */ +#if 0 +#define sGetChanRI(ChP) ((ChP)->CtlP->AltChanRingIndicator ? \ + (sInB((ByteIO_t)((ChP)->ChanStat+8)) & DSR_ACT) : \ + (((ChP)->CtlP->boardType == ROCKET_TYPE_PC104) ? \ + (!(sInB((ChP)->CtlP->AiopIO[3]) & sBitMapSetTbl[(ChP)->ChanNum])) : \ + 0)) +#endif + +/*************************************************************************** +Function: sGetControllerIntStatus +Purpose: Get the controller interrupt status +Call: sGetControllerIntStatus(CtlP) + CONTROLLER_T *CtlP; Ptr to controller structure +Return: Byte_t: The controller interrupt status in the lower 4 + bits. Bits 0 through 3 represent AIOP's 0 + through 3 respectively. If a bit is set that + AIOP is interrupting. Bits 4 through 7 will + always be cleared. +*/ +#define sGetControllerIntStatus(CTLP) (sInB((CTLP)->MReg1IO) & 0x0f) + +/*************************************************************************** +Function: sPCIGetControllerIntStatus +Purpose: Get the controller interrupt status +Call: sPCIGetControllerIntStatus(CtlP) + CONTROLLER_T *CtlP; Ptr to controller structure +Return: unsigned char: The controller interrupt status in the lower 4 + bits and bit 4. Bits 0 through 3 represent AIOP's 0 + through 3 respectively. Bit 4 is set if the int + was generated from periodic. If a bit is set the + AIOP is interrupting. +*/ +#define sPCIGetControllerIntStatus(CTLP) \ + ((CTLP)->isUPCI ? \ + (sInW((CTLP)->PCIIO2) & UPCI_AIOP_INTR_BITS) : \ + ((sInW((CTLP)->PCIIO) >> 8) & AIOP_INTR_BITS)) + +/*************************************************************************** + +Function: sGetRxCnt +Purpose: Get the number of data bytes in the Rx FIFO +Call: sGetRxCnt(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Return: int: The number of data bytes in the Rx FIFO. +Comments: Byte read of count register is required to obtain Rx count. + +*/ +#define sGetRxCnt(ChP) sInW((ChP)->TxRxCount) + +/*************************************************************************** +Function: sGetTxCnt +Purpose: Get the number of data bytes in the Tx FIFO +Call: sGetTxCnt(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Return: Byte_t: The number of data bytes in the Tx FIFO. +Comments: Byte read of count register is required to obtain Tx count. + +*/ +#define sGetTxCnt(ChP) sInB((ByteIO_t)(ChP)->TxRxCount) + +/***************************************************************************** +Function: sGetTxRxDataIO +Purpose: Get the I/O address of a channel's TxRx Data register +Call: sGetTxRxDataIO(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Return: WordIO_t: I/O address of a channel's TxRx Data register +*/ +#define sGetTxRxDataIO(ChP) (ChP)->TxRxData + +/*************************************************************************** +Function: sInitChanDefaults +Purpose: Initialize a channel structure to it's default state. +Call: sInitChanDefaults(ChP) + CHANNEL_T *ChP; Ptr to the channel structure +Comments: This function must be called once for every channel structure + that exists before any other SSCI calls can be made. + +*/ +#define sInitChanDefaults(ChP) \ +do { \ + (ChP)->CtlP = NULLCTLPTR; \ + (ChP)->AiopNum = NULLAIOP; \ + (ChP)->ChanID = AIOPID_NULL; \ + (ChP)->ChanNum = NULLCHAN; \ +} while (0) + +/*************************************************************************** +Function: sResetAiopByNum +Purpose: Reset the AIOP by number +Call: sResetAiopByNum(CTLP,AIOPNUM) + CONTROLLER_T CTLP; Ptr to controller structure + AIOPNUM; AIOP index +*/ +#define sResetAiopByNum(CTLP,AIOPNUM) \ +do { \ + sOutB((CTLP)->AiopIO[(AIOPNUM)]+_CMD_REG,RESET_ALL); \ + sOutB((CTLP)->AiopIO[(AIOPNUM)]+_CMD_REG,0x0); \ +} while (0) + +/*************************************************************************** +Function: sSendBreak +Purpose: Send a transmit BREAK signal +Call: sSendBreak(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sSendBreak(ChP) \ +do { \ + (ChP)->TxControl[3] |= SETBREAK; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sSetBaud +Purpose: Set baud rate +Call: sSetBaud(ChP,Divisor) + CHANNEL_T *ChP; Ptr to channel structure + Word_t Divisor; 16 bit baud rate divisor for channel +*/ +#define sSetBaud(ChP,DIVISOR) \ +do { \ + (ChP)->BaudDiv[2] = (Byte_t)(DIVISOR); \ + (ChP)->BaudDiv[3] = (Byte_t)((DIVISOR) >> 8); \ + out32((ChP)->IndexAddr,(ChP)->BaudDiv); \ +} while (0) + +/*************************************************************************** +Function: sSetData7 +Purpose: Set data bits to 7 +Call: sSetData7(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sSetData7(ChP) \ +do { \ + (ChP)->TxControl[2] &= ~DATA8BIT; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sSetData8 +Purpose: Set data bits to 8 +Call: sSetData8(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sSetData8(ChP) \ +do { \ + (ChP)->TxControl[2] |= DATA8BIT; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sSetDTR +Purpose: Set the DTR output +Call: sSetDTR(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sSetDTR(ChP) \ +do { \ + (ChP)->TxControl[3] |= SET_DTR; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sSetEvenParity +Purpose: Set even parity +Call: sSetEvenParity(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Comments: Function sSetParity() can be used in place of functions sEnParity(), + sDisParity(), sSetOddParity(), and sSetEvenParity(). + +Warnings: This function has no effect unless parity is enabled with function + sEnParity(). +*/ +#define sSetEvenParity(ChP) \ +do { \ + (ChP)->TxControl[2] |= EVEN_PAR; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sSetOddParity +Purpose: Set odd parity +Call: sSetOddParity(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Comments: Function sSetParity() can be used in place of functions sEnParity(), + sDisParity(), sSetOddParity(), and sSetEvenParity(). + +Warnings: This function has no effect unless parity is enabled with function + sEnParity(). +*/ +#define sSetOddParity(ChP) \ +do { \ + (ChP)->TxControl[2] &= ~EVEN_PAR; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sSetRTS +Purpose: Set the RTS output +Call: sSetRTS(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sSetRTS(ChP) \ +do { \ + if ((ChP)->rtsToggle) break; \ + (ChP)->TxControl[3] |= SET_RTS; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sSetRxTrigger +Purpose: Set the Rx FIFO trigger level +Call: sSetRxProcessor(ChP,Level) + CHANNEL_T *ChP; Ptr to channel structure + Byte_t Level; Number of characters in Rx FIFO at which the + interrupt will be generated. Can be any of the following flags: + + TRIG_NO: no trigger + TRIG_1: 1 character in FIFO + TRIG_1_2: FIFO 1/2 full + TRIG_7_8: FIFO 7/8 full +Comments: An interrupt will be generated when the trigger level is reached + only if function sEnInterrupt() has been called with flag + RXINT_EN set. The RXF_TRIG flag in the Interrupt Idenfification + register will be set whenever the trigger level is reached + regardless of the setting of RXINT_EN. + +*/ +#define sSetRxTrigger(ChP,LEVEL) \ +do { \ + (ChP)->RxControl[2] &= ~TRIG_MASK; \ + (ChP)->RxControl[2] |= LEVEL; \ + out32((ChP)->IndexAddr,(ChP)->RxControl); \ +} while (0) + +/*************************************************************************** +Function: sSetStop1 +Purpose: Set stop bits to 1 +Call: sSetStop1(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sSetStop1(ChP) \ +do { \ + (ChP)->TxControl[2] &= ~STOP2; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sSetStop2 +Purpose: Set stop bits to 2 +Call: sSetStop2(ChP) + CHANNEL_T *ChP; Ptr to channel structure +*/ +#define sSetStop2(ChP) \ +do { \ + (ChP)->TxControl[2] |= STOP2; \ + out32((ChP)->IndexAddr,(ChP)->TxControl); \ +} while (0) + +/*************************************************************************** +Function: sSetTxXOFFChar +Purpose: Set the Tx XOFF flow control character +Call: sSetTxXOFFChar(ChP,Ch) + CHANNEL_T *ChP; Ptr to channel structure + Byte_t Ch; The value to set the Tx XOFF character to +*/ +#define sSetTxXOFFChar(ChP,CH) \ +do { \ + (ChP)->R[0x07] = (CH); \ + out32((ChP)->IndexAddr,&(ChP)->R[0x04]); \ +} while (0) + +/*************************************************************************** +Function: sSetTxXONChar +Purpose: Set the Tx XON flow control character +Call: sSetTxXONChar(ChP,Ch) + CHANNEL_T *ChP; Ptr to channel structure + Byte_t Ch; The value to set the Tx XON character to +*/ +#define sSetTxXONChar(ChP,CH) \ +do { \ + (ChP)->R[0x0b] = (CH); \ + out32((ChP)->IndexAddr,&(ChP)->R[0x08]); \ +} while (0) + +/*************************************************************************** +Function: sStartRxProcessor +Purpose: Start a channel's receive processor +Call: sStartRxProcessor(ChP) + CHANNEL_T *ChP; Ptr to channel structure +Comments: This function is used to start a Rx processor after it was + stopped with sStopRxProcessor() or sStopSWInFlowCtl(). It + will restart both the Rx processor and software input flow control. + +*/ +#define sStartRxProcessor(ChP) out32((ChP)->IndexAddr,&(ChP)->R[0]) + +/*************************************************************************** +Function: sWriteTxByte +Purpose: Write a transmit data byte to a channel. + ByteIO_t io: Channel transmit register I/O address. This can + be obtained with sGetTxRxDataIO(). + Byte_t Data; The transmit data byte. +Warnings: This function writes the data byte without checking to see if + sMaxTxSize is exceeded in the Tx FIFO. +*/ +#define sWriteTxByte(IO,DATA) sOutB(IO,DATA) + +/* + * Begin Linux specific definitions for the Rocketport driver + * + * This code is Copyright Theodore Ts'o, 1995-1997 + */ + +struct r_port { + int magic; + struct tty_port port; + int line; + int flags; /* Don't yet match the ASY_ flags!! */ + unsigned int board:3; + unsigned int aiop:2; + unsigned int chan:3; + CONTROLLER_t *ctlp; + CHANNEL_t channel; + int intmask; + int xmit_fifo_room; /* room in xmit fifo */ + unsigned char *xmit_buf; + int xmit_head; + int xmit_tail; + int xmit_cnt; + int cd_status; + int ignore_status_mask; + int read_status_mask; + int cps; + + struct completion close_wait; /* Not yet matching the core */ + spinlock_t slock; + struct mutex write_mtx; +}; + +#define RPORT_MAGIC 0x525001 + +#define NUM_BOARDS 8 +#define MAX_RP_PORTS (32*NUM_BOARDS) + +/* + * The size of the xmit buffer is 1 page, or 4096 bytes + */ +#define XMIT_BUF_SIZE 4096 + +/* number of characters left in xmit buffer before we ask for more */ +#define WAKEUP_CHARS 256 + +/* + * Assigned major numbers for the Comtrol Rocketport + */ +#define TTY_ROCKET_MAJOR 46 +#define CUA_ROCKET_MAJOR 47 + +#ifdef PCI_VENDOR_ID_RP +#undef PCI_VENDOR_ID_RP +#undef PCI_DEVICE_ID_RP8OCTA +#undef PCI_DEVICE_ID_RP8INTF +#undef PCI_DEVICE_ID_RP16INTF +#undef PCI_DEVICE_ID_RP32INTF +#undef PCI_DEVICE_ID_URP8OCTA +#undef PCI_DEVICE_ID_URP8INTF +#undef PCI_DEVICE_ID_URP16INTF +#undef PCI_DEVICE_ID_CRP16INTF +#undef PCI_DEVICE_ID_URP32INTF +#endif + +/* Comtrol PCI Vendor ID */ +#define PCI_VENDOR_ID_RP 0x11fe + +/* Comtrol Device ID's */ +#define PCI_DEVICE_ID_RP32INTF 0x0001 /* Rocketport 32 port w/external I/F */ +#define PCI_DEVICE_ID_RP8INTF 0x0002 /* Rocketport 8 port w/external I/F */ +#define PCI_DEVICE_ID_RP16INTF 0x0003 /* Rocketport 16 port w/external I/F */ +#define PCI_DEVICE_ID_RP4QUAD 0x0004 /* Rocketport 4 port w/quad cable */ +#define PCI_DEVICE_ID_RP8OCTA 0x0005 /* Rocketport 8 port w/octa cable */ +#define PCI_DEVICE_ID_RP8J 0x0006 /* Rocketport 8 port w/RJ11 connectors */ +#define PCI_DEVICE_ID_RP4J 0x0007 /* Rocketport 4 port w/RJ11 connectors */ +#define PCI_DEVICE_ID_RP8SNI 0x0008 /* Rocketport 8 port w/ DB78 SNI (Siemens) connector */ +#define PCI_DEVICE_ID_RP16SNI 0x0009 /* Rocketport 16 port w/ DB78 SNI (Siemens) connector */ +#define PCI_DEVICE_ID_RPP4 0x000A /* Rocketport Plus 4 port */ +#define PCI_DEVICE_ID_RPP8 0x000B /* Rocketport Plus 8 port */ +#define PCI_DEVICE_ID_RP6M 0x000C /* RocketModem 6 port */ +#define PCI_DEVICE_ID_RP4M 0x000D /* RocketModem 4 port */ +#define PCI_DEVICE_ID_RP2_232 0x000E /* Rocketport Plus 2 port RS232 */ +#define PCI_DEVICE_ID_RP2_422 0x000F /* Rocketport Plus 2 port RS422 */ + +/* Universal PCI boards */ +#define PCI_DEVICE_ID_URP32INTF 0x0801 /* Rocketport UPCI 32 port w/external I/F */ +#define PCI_DEVICE_ID_URP8INTF 0x0802 /* Rocketport UPCI 8 port w/external I/F */ +#define PCI_DEVICE_ID_URP16INTF 0x0803 /* Rocketport UPCI 16 port w/external I/F */ +#define PCI_DEVICE_ID_URP8OCTA 0x0805 /* Rocketport UPCI 8 port w/octa cable */ +#define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C /* Rocketmodem III 8 port */ +#define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D /* Rocketmodem III 4 port */ + +/* Compact PCI device */ +#define PCI_DEVICE_ID_CRP16INTF 0x0903 /* Rocketport Compact PCI 16 port w/external I/F */ + diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c index be0ebce36e5..d5bfd41707e 100644 --- a/drivers/tty/serial/68328serial.c +++ b/drivers/tty/serial/68328serial.c @@ -262,7 +262,7 @@ static void status_handle(struct m68k_serial *info, unsigned short status) static void receive_chars(struct m68k_serial *info, unsigned short rx) { - struct tty_struct *tty = info->port.tty; + struct tty_struct *tty = info->tty; m68328_uart *uart = &uart_addr[info->line]; unsigned char ch, flag; @@ -329,7 +329,7 @@ static void transmit_chars(struct m68k_serial *info) goto clear_and_return; } - if((info->xmit_cnt <= 0) || info->port.tty->stopped) { + if((info->xmit_cnt <= 0) || info->tty->stopped) { /* That's peculiar... TX ints off */ uart->ustcnt &= ~USTCNT_TX_INTR_MASK; goto clear_and_return; @@ -383,7 +383,7 @@ static void do_softint(struct work_struct *work) struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue); struct tty_struct *tty; - tty = info->port.tty; + tty = info->tty; if (!tty) return; #if 0 @@ -393,28 +393,6 @@ static void do_softint(struct work_struct *work) #endif } -/* - * This routine is called from the scheduler tqueue when the interrupt - * routine has signalled that a hangup has occurred. The path of - * hangup processing is: - * - * serial interrupt routine -> (scheduler tqueue) -> - * do_serial_hangup() -> tty->hangup() -> rs_hangup() - * - */ -static void do_serial_hangup(struct work_struct *work) -{ - struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue_hangup); - struct tty_struct *tty; - - tty = info->port.tty; - if (!tty) - return; - - tty_hangup(tty); -} - - static int startup(struct m68k_serial * info) { m68328_uart *uart = &uart_addr[info->line]; @@ -451,8 +429,8 @@ static int startup(struct m68k_serial * info) uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK; #endif - if (info->port.tty) - clear_bit(TTY_IO_ERROR, &info->port.tty->flags); + if (info->tty) + clear_bit(TTY_IO_ERROR, &info->tty->flags); info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; /* @@ -486,8 +464,8 @@ static void shutdown(struct m68k_serial * info) info->xmit_buf = 0; } - if (info->port.tty) - set_bit(TTY_IO_ERROR, &info->port.tty->flags); + if (info->tty) + set_bit(TTY_IO_ERROR, &info->tty->flags); info->flags &= ~S_INITIALIZED; local_irq_restore(flags); @@ -553,9 +531,9 @@ static void change_speed(struct m68k_serial *info) unsigned cflag; int i; - if (!info->port.tty || !info->port.tty->termios) + if (!info->tty || !info->tty->termios) return; - cflag = info->port.tty->termios->c_cflag; + cflag = info->tty->termios->c_cflag; if (!(port = info->port)) return; @@ -967,10 +945,9 @@ static void send_break(struct m68k_serial * info, unsigned int duration) local_irq_restore(flags); } -static int rs_ioctl(struct tty_struct *tty, struct file * file, +static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { - int error; struct m68k_serial * info = (struct m68k_serial *)tty->driver_data; int retval; @@ -1104,7 +1081,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp) tty_ldisc_flush(tty); tty->closing = 0; info->event = 0; - info->port.tty = NULL; + info->tty = NULL; #warning "This is not and has never been valid so fix it" #if 0 if (tty->ldisc.num != ldiscs[N_TTY].num) { @@ -1142,7 +1119,7 @@ void rs_hangup(struct tty_struct *tty) info->event = 0; info->count = 0; info->flags &= ~S_NORMAL_ACTIVE; - info->port.tty = NULL; + info->tty = NULL; wake_up_interruptible(&info->open_wait); } @@ -1261,7 +1238,7 @@ int rs_open(struct tty_struct *tty, struct file * filp) info->count++; tty->driver_data = info; - info->port.tty = tty; + info->tty = tty; /* * Start up serial port @@ -1338,7 +1315,7 @@ rs68328_init(void) info = &m68k_soft[i]; info->magic = SERIAL_MAGIC; info->port = (int) &uart_addr[i]; - info->port.tty = NULL; + info->tty = NULL; info->irq = uart_irqs[i]; info->custom_divisor = 16; info->close_delay = 50; @@ -1348,7 +1325,6 @@ rs68328_init(void) info->count = 0; info->blocked_open = 0; INIT_WORK(&info->tqueue, do_softint); - INIT_WORK(&info->tqueue_hangup, do_serial_hangup); init_waitqueue_head(&info->open_wait); init_waitqueue_head(&info->close_wait); info->line = i; diff --git a/drivers/tty/serial/68328serial.h b/drivers/tty/serial/68328serial.h index 664ceb0a158..8c9c3c0745d 100644 --- a/drivers/tty/serial/68328serial.h +++ b/drivers/tty/serial/68328serial.h @@ -159,7 +159,6 @@ struct m68k_serial { int xmit_tail; int xmit_cnt; struct work_struct tqueue; - struct work_struct tqueue_hangup; wait_queue_head_t open_wait; wait_queue_head_t close_wait; }; diff --git a/drivers/tty/serial/68360serial.c b/drivers/tty/serial/68360serial.c index 88b13356ec1..0a3e8787ed5 100644 --- a/drivers/tty/serial/68360serial.c +++ b/drivers/tty/serial/68360serial.c @@ -1240,7 +1240,7 @@ static int get_lsr_info(struct async_struct * info, unsigned int *value) } #endif -static int rs_360_tiocmget(struct tty_struct *tty, struct file *file) +static int rs_360_tiocmget(struct tty_struct *tty) { ser_info_t *info = (ser_info_t *)tty->driver_data; unsigned int result = 0; @@ -1271,7 +1271,7 @@ static int rs_360_tiocmget(struct tty_struct *tty, struct file *file) return result; } -static int rs_360_tiocmset(struct tty_struct *tty, struct file *file, +static int rs_360_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { #ifdef modem_control @@ -1405,7 +1405,7 @@ static int rs_360_get_icount(struct tty_struct *tty, return 0; } -static int rs_360_ioctl(struct tty_struct *tty, struct file * file, +static int rs_360_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { int error; @@ -2428,6 +2428,7 @@ static const struct tty_operations rs_360_ops = { /* .read_proc = rs_360_read_proc, */ .tiocmget = rs_360_tiocmget, .tiocmset = rs_360_tiocmset, + .get_icount = rs_360_get_icount, }; static int __init rs_360_init(void) diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c index b25e6e49053..b3b881bc471 100644 --- a/drivers/tty/serial/8250.c +++ b/drivers/tty/serial/8250.c @@ -236,7 +236,8 @@ static const struct serial8250_config uart_config[] = { .fifo_size = 128, .tx_loadsz = 128, .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, - .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP, + /* UART_CAP_EFR breaks billionon CF bluetooth card. */ + .flags = UART_CAP_FIFO | UART_CAP_SLEEP, }, [PORT_16654] = { .name = "ST16654", @@ -953,6 +954,23 @@ static int broken_efr(struct uart_8250_port *up) return 0; } +static inline int ns16550a_goto_highspeed(struct uart_8250_port *up) +{ + unsigned char status; + + status = serial_in(up, 0x04); /* EXCR2 */ +#define PRESL(x) ((x) & 0x30) + if (PRESL(status) == 0x10) { + /* already in high speed mode */ + return 0; + } else { + status &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ + status |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ + serial_outp(up, 0x04, status); + } + return 1; +} + /* * We know that the chip has FIFOs. Does it have an EFR? The * EFR is located in the same register position as the IIR and @@ -1024,12 +1042,8 @@ static void autoconfig_16550a(struct uart_8250_port *up) quot = serial_dl_read(up); quot <<= 3; - status1 = serial_in(up, 0x04); /* EXCR2 */ - status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ - status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ - serial_outp(up, 0x04, status1); - - serial_dl_write(up, quot); + if (ns16550a_goto_highspeed(up)) + serial_dl_write(up, quot); serial_outp(up, UART_LCR, 0); @@ -3024,17 +3038,13 @@ void serial8250_resume_port(int line) struct uart_8250_port *up = &serial8250_ports[line]; if (up->capabilities & UART_NATSEMI) { - unsigned char tmp; - /* Ensure it's still in high speed mode */ serial_outp(up, UART_LCR, 0xE0); - tmp = serial_in(up, 0x04); /* EXCR2 */ - tmp &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ - tmp |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ - serial_outp(up, 0x04, tmp); + ns16550a_goto_highspeed(up); serial_outp(up, UART_LCR, 0); + up->port.uartclk = 921600*16; } uart_resume_port(&serial8250_reg, &up->port); } diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index b1682d7f1d8..e461be164f6 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1110,29 +1110,6 @@ config SERIAL_PMACZILOG_CONSOLE on your (Power)Mac as the console, you can do so by answering Y to this option. -config SERIAL_LH7A40X - tristate "Sharp LH7A40X embedded UART support" - depends on ARM && ARCH_LH7A40X - select SERIAL_CORE - help - This enables support for the three on-board UARTs of the - Sharp LH7A40X series CPUs. Choose Y or M. - -config SERIAL_LH7A40X_CONSOLE - bool "Support for console on Sharp LH7A40X serial port" - depends on SERIAL_LH7A40X=y - select SERIAL_CORE_CONSOLE - help - Say Y here if you wish to use one of the serial ports as the - system console--the system console is the device which - receives all kernel messages and warnings and which allows - logins in single user mode. - - Even if you say Y here, the currently visible framebuffer console - (/dev/tty0) will still be used as the default system console, but - you can alter that using a kernel command line, for example - "console=ttyAM1". - config SERIAL_CPM tristate "CPM SCC/SMC serial port support" depends on CPM2 || 8xx @@ -1319,6 +1296,18 @@ config SERIAL_MSM_CONSOLE depends on SERIAL_MSM=y select SERIAL_CORE_CONSOLE +config SERIAL_MSM_HS + tristate "MSM UART High Speed: Serial Driver" + depends on ARCH_MSM + select SERIAL_CORE + help + If you have a machine based on MSM family of SoCs, you + can enable its onboard high speed serial port by enabling + this option. + + Choose M here to compile it as a module. The module will be + called msm_serial_hs. + config SERIAL_VT8500 bool "VIA VT8500 on-chip serial port support" depends on ARM && ARCH_VT8500 @@ -1518,6 +1507,7 @@ config SERIAL_BCM63XX_CONSOLE config SERIAL_GRLIB_GAISLER_APBUART tristate "GRLIB APBUART serial support" depends on OF + select SERIAL_CORE ---help--- Add support for the GRLIB APBUART serial port. @@ -1587,12 +1577,25 @@ config SERIAL_IFX6X60 Support for the IFX6x60 modem devices on Intel MID platforms. config SERIAL_PCH_UART - tristate "Intel EG20T PCH UART" - depends on PCI && DMADEVICES + tristate "Intel EG20T PCH UART/OKI SEMICONDUCTOR ML7213 IOH" + depends on PCI select SERIAL_CORE - select PCH_DMA help This driver is for PCH(Platform controller Hub) UART of Intel EG20T which is an IOH(Input/Output Hub) for x86 embedded processor. Enabling PCH_DMA, this PCH UART works as DMA mode. + + This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/ + Output Hub) which is for IVI(In-Vehicle Infotainment) use. + ML7213 is companion chip for Intel Atom E6xx series. + ML7213 is completely compatible for Intel EG20T PCH. + +config SERIAL_MSM_SMD + bool "Enable tty device interface for some SMD ports" + default n + depends on MSM_SMD + help + Enables userspace clients to read and write to some streaming SMD + ports via tty device interface for MSM chipset. + endmenu diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index 8ea92e9c73b..31e868cb49b 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -54,7 +54,6 @@ obj-$(CONFIG_SERIAL_68328) += 68328serial.o obj-$(CONFIG_SERIAL_68360) += 68360serial.o obj-$(CONFIG_SERIAL_MCF) += mcf.o obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o -obj-$(CONFIG_SERIAL_LH7A40X) += serial_lh7a40x.o obj-$(CONFIG_SERIAL_DZ) += dz.o obj-$(CONFIG_SERIAL_ZS) += zs.o obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o @@ -76,6 +75,7 @@ obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o obj-$(CONFIG_SERIAL_MSM) += msm_serial.o +obj-$(CONFIG_SERIAL_MSM_HS) += msm_serial_hs.o obj-$(CONFIG_SERIAL_NETX) += netx-serial.o obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o @@ -92,3 +92,4 @@ obj-$(CONFIG_SERIAL_MRST_MAX3110) += mrst_max3110.o obj-$(CONFIG_SERIAL_MFD_HSU) += mfd.o obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o +obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c index f9b49b5ff5e..60e049b041a 100644 --- a/drivers/tty/serial/altera_jtaguart.c +++ b/drivers/tty/serial/altera_jtaguart.c @@ -305,28 +305,6 @@ static struct altera_jtaguart altera_jtaguart_ports[ALTERA_JTAGUART_MAXPORTS]; #if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE) -int __init early_altera_jtaguart_setup(struct altera_jtaguart_platform_uart - *platp) -{ - struct uart_port *port; - int i; - - for (i = 0; i < ALTERA_JTAGUART_MAXPORTS && platp[i].mapbase; i++) { - port = &altera_jtaguart_ports[i].port; - - port->line = i; - port->type = PORT_ALTERA_JTAGUART; - port->mapbase = platp[i].mapbase; - port->membase = ioremap(port->mapbase, ALTERA_JTAGUART_SIZE); - port->iotype = SERIAL_IO_MEM; - port->irq = platp[i].irq; - port->flags = ASYNC_BOOT_AUTOCONF; - port->ops = &altera_jtaguart_ops; - } - - return 0; -} - #if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS) static void altera_jtaguart_console_putc(struct console *co, const char c) { @@ -384,7 +362,7 @@ static int __init altera_jtaguart_console_setup(struct console *co, if (co->index < 0 || co->index >= ALTERA_JTAGUART_MAXPORTS) return -EINVAL; port = &altera_jtaguart_ports[co->index].port; - if (port->membase == 0) + if (port->membase == NULL) return -ENODEV; return 0; } @@ -431,22 +409,45 @@ static int __devinit altera_jtaguart_probe(struct platform_device *pdev) { struct altera_jtaguart_platform_uart *platp = pdev->dev.platform_data; struct uart_port *port; - int i; + struct resource *res_irq, *res_mem; + int i = pdev->id; - for (i = 0; i < ALTERA_JTAGUART_MAXPORTS && platp[i].mapbase; i++) { - port = &altera_jtaguart_ports[i].port; + /* -1 emphasizes that the platform must have one port, no .N suffix */ + if (i == -1) + i = 0; - port->line = i; - port->type = PORT_ALTERA_JTAGUART; - port->mapbase = platp[i].mapbase; - port->membase = ioremap(port->mapbase, ALTERA_JTAGUART_SIZE); - port->iotype = SERIAL_IO_MEM; - port->irq = platp[i].irq; - port->ops = &altera_jtaguart_ops; - port->flags = ASYNC_BOOT_AUTOCONF; + if (i >= ALTERA_JTAGUART_MAXPORTS) + return -EINVAL; - uart_add_one_port(&altera_jtaguart_driver, port); - } + port = &altera_jtaguart_ports[i].port; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res_mem) + port->mapbase = res_mem->start; + else if (platp) + port->mapbase = platp->mapbase; + else + return -ENODEV; + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (res_irq) + port->irq = res_irq->start; + else if (platp) + port->irq = platp->irq; + else + return -ENODEV; + + port->membase = ioremap(port->mapbase, ALTERA_JTAGUART_SIZE); + if (!port->membase) + return -ENOMEM; + + port->line = i; + port->type = PORT_ALTERA_JTAGUART; + port->iotype = SERIAL_IO_MEM; + port->ops = &altera_jtaguart_ops; + port->flags = UPF_BOOT_AUTOCONF; + + uart_add_one_port(&altera_jtaguart_driver, port); return 0; } @@ -454,23 +455,34 @@ static int __devinit altera_jtaguart_probe(struct platform_device *pdev) static int __devexit altera_jtaguart_remove(struct platform_device *pdev) { struct uart_port *port; - int i; + int i = pdev->id; - for (i = 0; i < ALTERA_JTAGUART_MAXPORTS; i++) { - port = &altera_jtaguart_ports[i].port; - if (port) - uart_remove_one_port(&altera_jtaguart_driver, port); - } + if (i == -1) + i = 0; + + port = &altera_jtaguart_ports[i].port; + uart_remove_one_port(&altera_jtaguart_driver, port); return 0; } +#ifdef CONFIG_OF +static struct of_device_id altera_jtaguart_match[] = { + { .compatible = "ALTR,juart-1.0", }, + {}, +}; +MODULE_DEVICE_TABLE(of, altera_jtaguart_match); +#else +#define altera_jtaguart_match NULL +#endif /* CONFIG_OF */ + static struct platform_driver altera_jtaguart_platform_driver = { .probe = altera_jtaguart_probe, .remove = __devexit_p(altera_jtaguart_remove), .driver = { - .name = DRV_NAME, - .owner = THIS_MODULE, + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = altera_jtaguart_match, }, }; diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c index 721216292a5..6d5b036ac78 100644 --- a/drivers/tty/serial/altera_uart.c +++ b/drivers/tty/serial/altera_uart.c @@ -24,6 +24,7 @@ #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/platform_device.h> +#include <linux/of.h> #include <linux/io.h> #include <linux/altera_uart.h> @@ -86,16 +87,12 @@ struct altera_uart { static u32 altera_uart_readl(struct uart_port *port, int reg) { - struct altera_uart_platform_uart *platp = port->private_data; - - return readl(port->membase + (reg << platp->bus_shift)); + return readl(port->membase + (reg << port->regshift)); } static void altera_uart_writel(struct uart_port *port, u32 dat, int reg) { - struct altera_uart_platform_uart *platp = port->private_data; - - writel(dat, port->membase + (reg << platp->bus_shift)); + writel(dat, port->membase + (reg << port->regshift)); } static unsigned int altera_uart_tx_empty(struct uart_port *port) @@ -511,6 +508,29 @@ static struct uart_driver altera_uart_driver = { .cons = ALTERA_UART_CONSOLE, }; +#ifdef CONFIG_OF +static int altera_uart_get_of_uartclk(struct platform_device *pdev, + struct uart_port *port) +{ + int len; + const __be32 *clk; + + clk = of_get_property(pdev->dev.of_node, "clock-frequency", &len); + if (!clk || len < sizeof(__be32)) + return -ENODEV; + + port->uartclk = be32_to_cpup(clk); + + return 0; +} +#else +static int altera_uart_get_of_uartclk(struct platform_device *pdev, + struct uart_port *port) +{ + return -ENODEV; +} +#endif /* CONFIG_OF */ + static int __devinit altera_uart_probe(struct platform_device *pdev) { struct altera_uart_platform_uart *platp = pdev->dev.platform_data; @@ -518,6 +538,7 @@ static int __devinit altera_uart_probe(struct platform_device *pdev) struct resource *res_mem; struct resource *res_irq; int i = pdev->id; + int ret; /* -1 emphasizes that the platform must have one port, no .N suffix */ if (i == -1) @@ -542,17 +563,29 @@ static int __devinit altera_uart_probe(struct platform_device *pdev) else if (platp->irq) port->irq = platp->irq; + /* Check platform data first so we can override device node data */ + if (platp) + port->uartclk = platp->uartclk; + else { + ret = altera_uart_get_of_uartclk(pdev, port); + if (ret) + return ret; + } + port->membase = ioremap(port->mapbase, ALTERA_UART_SIZE); if (!port->membase) return -ENOMEM; + if (platp) + port->regshift = platp->bus_shift; + else + port->regshift = 0; + port->line = i; port->type = PORT_ALTERA_UART; port->iotype = SERIAL_IO_MEM; - port->uartclk = platp->uartclk; port->ops = &altera_uart_ops; port->flags = UPF_BOOT_AUTOCONF; - port->private_data = platp; uart_add_one_port(&altera_uart_driver, port); @@ -561,19 +594,35 @@ static int __devinit altera_uart_probe(struct platform_device *pdev) static int __devexit altera_uart_remove(struct platform_device *pdev) { - struct uart_port *port = &altera_uart_ports[pdev->id].port; + struct uart_port *port; + int i = pdev->id; + + if (i == -1) + i = 0; + port = &altera_uart_ports[i].port; uart_remove_one_port(&altera_uart_driver, port); + return 0; } +#ifdef CONFIG_OF +static struct of_device_id altera_uart_match[] = { + { .compatible = "ALTR,uart-1.0", }, + {}, +}; +MODULE_DEVICE_TABLE(of, altera_uart_match); +#else +#define altera_uart_match NULL +#endif /* CONFIG_OF */ + static struct platform_driver altera_uart_platform_driver = { .probe = altera_uart_probe, .remove = __devexit_p(altera_uart_remove), .driver = { - .name = DRV_NAME, - .owner = THIS_MODULE, - .pm = NULL, + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = altera_uart_match, }, }; diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c index 2904aa04412..d742dd2c525 100644 --- a/drivers/tty/serial/amba-pl010.c +++ b/drivers/tty/serial/amba-pl010.c @@ -676,7 +676,7 @@ static struct uart_driver amba_reg = { .cons = AMBA_CONSOLE, }; -static int pl010_probe(struct amba_device *dev, struct amba_id *id) +static int pl010_probe(struct amba_device *dev, const struct amba_id *id) { struct uart_amba_port *uap; void __iomem *base; diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index e76d7d00012..57731e87008 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -96,6 +96,22 @@ static struct vendor_data vendor_st = { }; /* Deals with DMA transactions */ + +struct pl011_sgbuf { + struct scatterlist sg; + char *buf; +}; + +struct pl011_dmarx_data { + struct dma_chan *chan; + struct completion complete; + bool use_buf_b; + struct pl011_sgbuf sgbuf_a; + struct pl011_sgbuf sgbuf_b; + dma_cookie_t cookie; + bool running; +}; + struct pl011_dmatx_data { struct dma_chan *chan; struct scatterlist sg; @@ -120,12 +136,70 @@ struct uart_amba_port { char type[12]; #ifdef CONFIG_DMA_ENGINE /* DMA stuff */ - bool using_dma; + bool using_tx_dma; + bool using_rx_dma; + struct pl011_dmarx_data dmarx; struct pl011_dmatx_data dmatx; #endif }; /* + * Reads up to 256 characters from the FIFO or until it's empty and + * inserts them into the TTY layer. Returns the number of characters + * read from the FIFO. + */ +static int pl011_fifo_to_tty(struct uart_amba_port *uap) +{ + u16 status, ch; + unsigned int flag, max_count = 256; + int fifotaken = 0; + + while (max_count--) { + status = readw(uap->port.membase + UART01x_FR); + if (status & UART01x_FR_RXFE) + break; + + /* Take chars from the FIFO and update status */ + ch = readw(uap->port.membase + UART01x_DR) | + UART_DUMMY_DR_RX; + flag = TTY_NORMAL; + uap->port.icount.rx++; + fifotaken++; + + if (unlikely(ch & UART_DR_ERROR)) { + if (ch & UART011_DR_BE) { + ch &= ~(UART011_DR_FE | UART011_DR_PE); + uap->port.icount.brk++; + if (uart_handle_break(&uap->port)) + continue; + } else if (ch & UART011_DR_PE) + uap->port.icount.parity++; + else if (ch & UART011_DR_FE) + uap->port.icount.frame++; + if (ch & UART011_DR_OE) + uap->port.icount.overrun++; + + ch &= uap->port.read_status_mask; + + if (ch & UART011_DR_BE) + flag = TTY_BREAK; + else if (ch & UART011_DR_PE) + flag = TTY_PARITY; + else if (ch & UART011_DR_FE) + flag = TTY_FRAME; + } + + if (uart_handle_sysrq_char(&uap->port, ch & 255)) + continue; + + uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); + } + + return fifotaken; +} + + +/* * All the DMA operation mode stuff goes inside this ifdef. * This assumes that you have a generic DMA device interface, * no custom DMA interfaces are supported. @@ -134,6 +208,31 @@ struct uart_amba_port { #define PL011_DMA_BUFFER_SIZE PAGE_SIZE +static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg, + enum dma_data_direction dir) +{ + sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL); + if (!sg->buf) + return -ENOMEM; + + sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE); + + if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) { + kfree(sg->buf); + return -EINVAL; + } + return 0; +} + +static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg, + enum dma_data_direction dir) +{ + if (sg->buf) { + dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir); + kfree(sg->buf); + } +} + static void pl011_dma_probe_initcall(struct uart_amba_port *uap) { /* DMA is the sole user of the platform data right now */ @@ -153,7 +252,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap) return; } - /* Try to acquire a generic DMA engine slave channel */ + /* Try to acquire a generic DMA engine slave TX channel */ dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); @@ -168,6 +267,28 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap) dev_info(uap->port.dev, "DMA channel TX %s\n", dma_chan_name(uap->dmatx.chan)); + + /* Optionally make use of an RX channel as well */ + if (plat->dma_rx_param) { + struct dma_slave_config rx_conf = { + .src_addr = uap->port.mapbase + UART01x_DR, + .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .direction = DMA_FROM_DEVICE, + .src_maxburst = uap->fifosize >> 1, + }; + + chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); + if (!chan) { + dev_err(uap->port.dev, "no RX DMA channel!\n"); + return; + } + + dmaengine_slave_config(chan, &rx_conf); + uap->dmarx.chan = chan; + + dev_info(uap->port.dev, "DMA channel RX %s\n", + dma_chan_name(uap->dmarx.chan)); + } } #ifndef MODULE @@ -219,9 +340,10 @@ static void pl011_dma_remove(struct uart_amba_port *uap) /* TODO: remove the initcall if it has not yet executed */ if (uap->dmatx.chan) dma_release_channel(uap->dmatx.chan); + if (uap->dmarx.chan) + dma_release_channel(uap->dmarx.chan); } - /* Forward declare this for the refill routine */ static int pl011_dma_tx_refill(struct uart_amba_port *uap); @@ -380,7 +502,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap) */ static bool pl011_dma_tx_irq(struct uart_amba_port *uap) { - if (!uap->using_dma) + if (!uap->using_tx_dma) return false; /* @@ -432,7 +554,7 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) { u16 dmacr; - if (!uap->using_dma) + if (!uap->using_tx_dma) return false; if (!uap->port.x_char) { @@ -492,7 +614,7 @@ static void pl011_dma_flush_buffer(struct uart_port *port) { struct uart_amba_port *uap = (struct uart_amba_port *)port; - if (!uap->using_dma) + if (!uap->using_tx_dma) return; /* Avoid deadlock with the DMA engine callback */ @@ -508,9 +630,219 @@ static void pl011_dma_flush_buffer(struct uart_port *port) } } +static void pl011_dma_rx_callback(void *data); + +static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) +{ + struct dma_chan *rxchan = uap->dmarx.chan; + struct dma_device *dma_dev; + struct pl011_dmarx_data *dmarx = &uap->dmarx; + struct dma_async_tx_descriptor *desc; + struct pl011_sgbuf *sgbuf; + + if (!rxchan) + return -EIO; + + /* Start the RX DMA job */ + sgbuf = uap->dmarx.use_buf_b ? + &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; + dma_dev = rxchan->device; + desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1, + DMA_FROM_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + /* + * If the DMA engine is busy and cannot prepare a + * channel, no big deal, the driver will fall back + * to interrupt mode as a result of this error code. + */ + if (!desc) { + uap->dmarx.running = false; + dmaengine_terminate_all(rxchan); + return -EBUSY; + } + + /* Some data to go along to the callback */ + desc->callback = pl011_dma_rx_callback; + desc->callback_param = uap; + dmarx->cookie = dmaengine_submit(desc); + dma_async_issue_pending(rxchan); + + uap->dmacr |= UART011_RXDMAE; + writew(uap->dmacr, uap->port.membase + UART011_DMACR); + uap->dmarx.running = true; + + uap->im &= ~UART011_RXIM; + writew(uap->im, uap->port.membase + UART011_IMSC); + + return 0; +} + +/* + * This is called when either the DMA job is complete, or + * the FIFO timeout interrupt occurred. This must be called + * with the port spinlock uap->port.lock held. + */ +static void pl011_dma_rx_chars(struct uart_amba_port *uap, + u32 pending, bool use_buf_b, + bool readfifo) +{ + struct tty_struct *tty = uap->port.state->port.tty; + struct pl011_sgbuf *sgbuf = use_buf_b ? + &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; + struct device *dev = uap->dmarx.chan->device->dev; + int dma_count = 0; + u32 fifotaken = 0; /* only used for vdbg() */ + + /* Pick everything from the DMA first */ + if (pending) { + /* Sync in buffer */ + dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE); + + /* + * First take all chars in the DMA pipe, then look in the FIFO. + * Note that tty_insert_flip_buf() tries to take as many chars + * as it can. + */ + dma_count = tty_insert_flip_string(uap->port.state->port.tty, + sgbuf->buf, pending); + + /* Return buffer to device */ + dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE); + + uap->port.icount.rx += dma_count; + if (dma_count < pending) + dev_warn(uap->port.dev, + "couldn't insert all characters (TTY is full?)\n"); + } + + /* + * Only continue with trying to read the FIFO if all DMA chars have + * been taken first. + */ + if (dma_count == pending && readfifo) { + /* Clear any error flags */ + writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS, + uap->port.membase + UART011_ICR); + + /* + * If we read all the DMA'd characters, and we had an + * incomplete buffer, that could be due to an rx error, or + * maybe we just timed out. Read any pending chars and check + * the error status. + * + * Error conditions will only occur in the FIFO, these will + * trigger an immediate interrupt and stop the DMA job, so we + * will always find the error in the FIFO, never in the DMA + * buffer. + */ + fifotaken = pl011_fifo_to_tty(uap); + } + + spin_unlock(&uap->port.lock); + dev_vdbg(uap->port.dev, + "Took %d chars from DMA buffer and %d chars from the FIFO\n", + dma_count, fifotaken); + tty_flip_buffer_push(tty); + spin_lock(&uap->port.lock); +} + +static void pl011_dma_rx_irq(struct uart_amba_port *uap) +{ + struct pl011_dmarx_data *dmarx = &uap->dmarx; + struct dma_chan *rxchan = dmarx->chan; + struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? + &dmarx->sgbuf_b : &dmarx->sgbuf_a; + size_t pending; + struct dma_tx_state state; + enum dma_status dmastat; + + /* + * Pause the transfer so we can trust the current counter, + * do this before we pause the PL011 block, else we may + * overflow the FIFO. + */ + if (dmaengine_pause(rxchan)) + dev_err(uap->port.dev, "unable to pause DMA transfer\n"); + dmastat = rxchan->device->device_tx_status(rxchan, + dmarx->cookie, &state); + if (dmastat != DMA_PAUSED) + dev_err(uap->port.dev, "unable to pause DMA transfer\n"); + + /* Disable RX DMA - incoming data will wait in the FIFO */ + uap->dmacr &= ~UART011_RXDMAE; + writew(uap->dmacr, uap->port.membase + UART011_DMACR); + uap->dmarx.running = false; + + pending = sgbuf->sg.length - state.residue; + BUG_ON(pending > PL011_DMA_BUFFER_SIZE); + /* Then we terminate the transfer - we now know our residue */ + dmaengine_terminate_all(rxchan); + + /* + * This will take the chars we have so far and insert + * into the framework. + */ + pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true); + + /* Switch buffer & re-trigger DMA job */ + dmarx->use_buf_b = !dmarx->use_buf_b; + if (pl011_dma_rx_trigger_dma(uap)) { + dev_dbg(uap->port.dev, "could not retrigger RX DMA job " + "fall back to interrupt mode\n"); + uap->im |= UART011_RXIM; + writew(uap->im, uap->port.membase + UART011_IMSC); + } +} + +static void pl011_dma_rx_callback(void *data) +{ + struct uart_amba_port *uap = data; + struct pl011_dmarx_data *dmarx = &uap->dmarx; + bool lastbuf = dmarx->use_buf_b; + int ret; + + /* + * This completion interrupt occurs typically when the + * RX buffer is totally stuffed but no timeout has yet + * occurred. When that happens, we just want the RX + * routine to flush out the secondary DMA buffer while + * we immediately trigger the next DMA job. + */ + spin_lock_irq(&uap->port.lock); + uap->dmarx.running = false; + dmarx->use_buf_b = !lastbuf; + ret = pl011_dma_rx_trigger_dma(uap); + + pl011_dma_rx_chars(uap, PL011_DMA_BUFFER_SIZE, lastbuf, false); + spin_unlock_irq(&uap->port.lock); + /* + * Do this check after we picked the DMA chars so we don't + * get some IRQ immediately from RX. + */ + if (ret) { + dev_dbg(uap->port.dev, "could not retrigger RX DMA job " + "fall back to interrupt mode\n"); + uap->im |= UART011_RXIM; + writew(uap->im, uap->port.membase + UART011_IMSC); + } +} + +/* + * Stop accepting received characters, when we're shutting down or + * suspending this port. + * Locking: called with port lock held and IRQs disabled. + */ +static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) +{ + /* FIXME. Just disable the DMA enable */ + uap->dmacr &= ~UART011_RXDMAE; + writew(uap->dmacr, uap->port.membase + UART011_DMACR); +} static void pl011_dma_startup(struct uart_amba_port *uap) { + int ret; + if (!uap->dmatx.chan) return; @@ -525,8 +857,33 @@ static void pl011_dma_startup(struct uart_amba_port *uap) /* The DMA buffer is now the FIFO the TTY subsystem can use */ uap->port.fifosize = PL011_DMA_BUFFER_SIZE; - uap->using_dma = true; + uap->using_tx_dma = true; + + if (!uap->dmarx.chan) + goto skip_rx; + + /* Allocate and map DMA RX buffers */ + ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a, + DMA_FROM_DEVICE); + if (ret) { + dev_err(uap->port.dev, "failed to init DMA %s: %d\n", + "RX buffer A", ret); + goto skip_rx; + } + + ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b, + DMA_FROM_DEVICE); + if (ret) { + dev_err(uap->port.dev, "failed to init DMA %s: %d\n", + "RX buffer B", ret); + pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, + DMA_FROM_DEVICE); + goto skip_rx; + } + uap->using_rx_dma = true; + +skip_rx: /* Turn on DMA error (RX/TX will be enabled on demand) */ uap->dmacr |= UART011_DMAONERR; writew(uap->dmacr, uap->port.membase + UART011_DMACR); @@ -539,11 +896,17 @@ static void pl011_dma_startup(struct uart_amba_port *uap) if (uap->vendor->dma_threshold) writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, uap->port.membase + ST_UART011_DMAWM); + + if (uap->using_rx_dma) { + if (pl011_dma_rx_trigger_dma(uap)) + dev_dbg(uap->port.dev, "could not trigger initial " + "RX DMA job, fall back to interrupt mode\n"); + } } static void pl011_dma_shutdown(struct uart_amba_port *uap) { - if (!uap->using_dma) + if (!(uap->using_tx_dma || uap->using_rx_dma)) return; /* Disable RX and TX DMA */ @@ -555,19 +918,39 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap) writew(uap->dmacr, uap->port.membase + UART011_DMACR); spin_unlock_irq(&uap->port.lock); - /* In theory, this should already be done by pl011_dma_flush_buffer */ - dmaengine_terminate_all(uap->dmatx.chan); - if (uap->dmatx.queued) { - dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, - DMA_TO_DEVICE); - uap->dmatx.queued = false; + if (uap->using_tx_dma) { + /* In theory, this should already be done by pl011_dma_flush_buffer */ + dmaengine_terminate_all(uap->dmatx.chan); + if (uap->dmatx.queued) { + dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, + DMA_TO_DEVICE); + uap->dmatx.queued = false; + } + + kfree(uap->dmatx.buf); + uap->using_tx_dma = false; } - kfree(uap->dmatx.buf); + if (uap->using_rx_dma) { + dmaengine_terminate_all(uap->dmarx.chan); + /* Clean up the RX DMA */ + pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE); + pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE); + uap->using_rx_dma = false; + } +} - uap->using_dma = false; +static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) +{ + return uap->using_rx_dma; } +static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) +{ + return uap->using_rx_dma && uap->dmarx.running; +} + + #else /* Blank functions if the DMA engine is not available */ static inline void pl011_dma_probe(struct uart_amba_port *uap) @@ -600,6 +983,29 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) return false; } +static inline void pl011_dma_rx_irq(struct uart_amba_port *uap) +{ +} + +static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) +{ +} + +static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) +{ + return -EIO; +} + +static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) +{ + return false; +} + +static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) +{ + return false; +} + #define pl011_dma_flush_buffer NULL #endif @@ -630,6 +1036,8 @@ static void pl011_stop_rx(struct uart_port *port) uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM| UART011_PEIM|UART011_BEIM|UART011_OEIM); writew(uap->im, uap->port.membase + UART011_IMSC); + + pl011_dma_rx_stop(uap); } static void pl011_enable_ms(struct uart_port *port) @@ -643,51 +1051,24 @@ static void pl011_enable_ms(struct uart_port *port) static void pl011_rx_chars(struct uart_amba_port *uap) { struct tty_struct *tty = uap->port.state->port.tty; - unsigned int status, ch, flag, max_count = 256; - - status = readw(uap->port.membase + UART01x_FR); - while ((status & UART01x_FR_RXFE) == 0 && max_count--) { - ch = readw(uap->port.membase + UART01x_DR) | UART_DUMMY_DR_RX; - flag = TTY_NORMAL; - uap->port.icount.rx++; - - /* - * Note that the error handling code is - * out of the main execution path - */ - if (unlikely(ch & UART_DR_ERROR)) { - if (ch & UART011_DR_BE) { - ch &= ~(UART011_DR_FE | UART011_DR_PE); - uap->port.icount.brk++; - if (uart_handle_break(&uap->port)) - goto ignore_char; - } else if (ch & UART011_DR_PE) - uap->port.icount.parity++; - else if (ch & UART011_DR_FE) - uap->port.icount.frame++; - if (ch & UART011_DR_OE) - uap->port.icount.overrun++; - - ch &= uap->port.read_status_mask; - - if (ch & UART011_DR_BE) - flag = TTY_BREAK; - else if (ch & UART011_DR_PE) - flag = TTY_PARITY; - else if (ch & UART011_DR_FE) - flag = TTY_FRAME; - } - if (uart_handle_sysrq_char(&uap->port, ch & 255)) - goto ignore_char; - - uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); + pl011_fifo_to_tty(uap); - ignore_char: - status = readw(uap->port.membase + UART01x_FR); - } spin_unlock(&uap->port.lock); tty_flip_buffer_push(tty); + /* + * If we were temporarily out of DMA mode for a while, + * attempt to switch back to DMA mode again. + */ + if (pl011_dma_rx_available(uap)) { + if (pl011_dma_rx_trigger_dma(uap)) { + dev_dbg(uap->port.dev, "could not trigger RX DMA job " + "fall back to interrupt mode again\n"); + uap->im |= UART011_RXIM; + } else + uap->im &= ~UART011_RXIM; + writew(uap->im, uap->port.membase + UART011_IMSC); + } spin_lock(&uap->port.lock); } @@ -767,8 +1148,12 @@ static irqreturn_t pl011_int(int irq, void *dev_id) UART011_RXIS), uap->port.membase + UART011_ICR); - if (status & (UART011_RTIS|UART011_RXIS)) - pl011_rx_chars(uap); + if (status & (UART011_RTIS|UART011_RXIS)) { + if (pl011_dma_rx_running(uap)) + pl011_dma_rx_irq(uap); + else + pl011_rx_chars(uap); + } if (status & (UART011_DSRMIS|UART011_DCDMIS| UART011_CTSMIS|UART011_RIMIS)) pl011_modem_status(uap); @@ -945,10 +1330,14 @@ static int pl011_startup(struct uart_port *port) pl011_dma_startup(uap); /* - * Finally, enable interrupts + * Finally, enable interrupts, only timeouts when using DMA + * if initial RX DMA job failed, start in interrupt mode + * as well. */ spin_lock_irq(&uap->port.lock); - uap->im = UART011_RXIM | UART011_RTIM; + uap->im = UART011_RTIM; + if (!pl011_dma_rx_running(uap)) + uap->im |= UART011_RXIM; writew(uap->im, uap->port.membase + UART011_IMSC); spin_unlock_irq(&uap->port.lock); @@ -1349,7 +1738,7 @@ static struct uart_driver amba_reg = { .cons = AMBA_CONSOLE, }; -static int pl011_probe(struct amba_device *dev, struct amba_id *id) +static int pl011_probe(struct amba_device *dev, const struct amba_id *id) { struct uart_amba_port *uap; struct vendor_data *vendor = id->data; diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c index 095a5d56261..1ab999b04ef 100644 --- a/drivers/tty/serial/apbuart.c +++ b/drivers/tty/serial/apbuart.c @@ -553,8 +553,7 @@ static struct uart_driver grlib_apbuart_driver = { /* OF Platform Driver */ /* ======================================================================== */ -static int __devinit apbuart_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit apbuart_probe(struct platform_device *op) { int i = -1; struct uart_port *port = NULL; @@ -587,7 +586,7 @@ static struct of_device_id __initdata apbuart_match[] = { {}, }; -static struct of_platform_driver grlib_apbuart_of_driver = { +static struct platform_driver grlib_apbuart_of_driver = { .probe = apbuart_probe, .driver = { .owner = THIS_MODULE, @@ -676,10 +675,10 @@ static int __init grlib_apbuart_init(void) return ret; } - ret = of_register_platform_driver(&grlib_apbuart_of_driver); + ret = platform_driver_register(&grlib_apbuart_of_driver); if (ret) { printk(KERN_ERR - "%s: of_register_platform_driver failed (%i)\n", + "%s: platform_driver_register failed (%i)\n", __FILE__, ret); uart_unregister_driver(&grlib_apbuart_driver); return ret; @@ -697,7 +696,7 @@ static void __exit grlib_apbuart_exit(void) &grlib_apbuart_ports[i]); uart_unregister_driver(&grlib_apbuart_driver); - of_unregister_platform_driver(&grlib_apbuart_of_driver); + platform_driver_unregister(&grlib_apbuart_of_driver); } module_init(grlib_apbuart_init); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 2a1d52fb493..f119d176110 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1240,6 +1240,21 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, spin_unlock_irqrestore(&port->lock, flags); } +static void atmel_set_ldisc(struct uart_port *port, int new) +{ + int line = port->line; + + if (line >= port->state->port.tty->driver->num) + return; + + if (port->state->port.tty->ldisc->ops->num == N_PPS) { + port->flags |= UPF_HARDPPS_CD; + atmel_enable_ms(port); + } else { + port->flags &= ~UPF_HARDPPS_CD; + } +} + /* * Return string describing the specified port */ @@ -1380,6 +1395,7 @@ static struct uart_ops atmel_pops = { .shutdown = atmel_shutdown, .flush_buffer = atmel_flush_buffer, .set_termios = atmel_set_termios, + .set_ldisc = atmel_set_ldisc, .type = atmel_type, .release_port = atmel_release_port, .request_port = atmel_request_port, diff --git a/drivers/tty/serial/bfin_5xx.c b/drivers/tty/serial/bfin_5xx.c index e381b895b04..9b1ff2b6bb3 100644 --- a/drivers/tty/serial/bfin_5xx.c +++ b/drivers/tty/serial/bfin_5xx.c @@ -370,10 +370,8 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; - spin_lock(&uart->port.lock); while (UART_GET_LSR(uart) & DR) bfin_serial_rx_chars(uart); - spin_unlock(&uart->port.lock); return IRQ_HANDLED; } @@ -490,9 +488,8 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) { int x_pos, pos; - dma_disable_irq(uart->tx_dma_channel); - dma_disable_irq(uart->rx_dma_channel); - spin_lock_bh(&uart->port.lock); + dma_disable_irq_nosync(uart->rx_dma_channel); + spin_lock_bh(&uart->rx_lock); /* 2D DMA RX buffer ring is used. Because curr_y_count and * curr_x_count can't be read as an atomic operation, @@ -523,8 +520,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) uart->rx_dma_buf.tail = uart->rx_dma_buf.head; } - spin_unlock_bh(&uart->port.lock); - dma_enable_irq(uart->tx_dma_channel); + spin_unlock_bh(&uart->rx_lock); dma_enable_irq(uart->rx_dma_channel); mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES); @@ -571,7 +567,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) unsigned short irqstat; int x_pos, pos; - spin_lock(&uart->port.lock); + spin_lock(&uart->rx_lock); irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); clear_dma_irqstat(uart->rx_dma_channel); @@ -589,7 +585,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) uart->rx_dma_buf.tail = uart->rx_dma_buf.head; } - spin_unlock(&uart->port.lock); + spin_unlock(&uart->rx_lock); return IRQ_HANDLED; } @@ -1332,6 +1328,7 @@ static int bfin_serial_probe(struct platform_device *pdev) } #ifdef CONFIG_SERIAL_BFIN_DMA + spin_lock_init(&uart->rx_lock); uart->tx_done = 1; uart->tx_count = 0; diff --git a/drivers/tty/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c index e95c524d9d1..c3ec0a61d85 100644 --- a/drivers/tty/serial/bfin_sport_uart.c +++ b/drivers/tty/serial/bfin_sport_uart.c @@ -788,7 +788,7 @@ static int __devinit sport_uart_probe(struct platform_device *pdev) sport->port.mapbase = res->start; sport->port.irq = platform_get_irq(pdev, 0); - if (sport->port.irq < 0) { + if ((int)sport->port.irq < 0) { dev_err(&pdev->dev, "No sport RX/TX IRQ specified\n"); ret = -ENOENT; goto out_error_unmap; diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c index 8692ff98fc0..a9a6a5fd169 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c @@ -1359,8 +1359,7 @@ static struct uart_driver cpm_reg = { static int probe_index; -static int __devinit cpm_uart_probe(struct platform_device *ofdev, - const struct of_device_id *match) +static int __devinit cpm_uart_probe(struct platform_device *ofdev) { int index = probe_index++; struct uart_cpm_port *pinfo = &cpm_uart_ports[index]; @@ -1405,7 +1404,7 @@ static struct of_device_id cpm_uart_match[] = { {} }; -static struct of_platform_driver cpm_uart_driver = { +static struct platform_driver cpm_uart_driver = { .driver = { .name = "cpm_uart", .owner = THIS_MODULE, @@ -1421,7 +1420,7 @@ static int __init cpm_uart_init(void) if (ret) return ret; - ret = of_register_platform_driver(&cpm_uart_driver); + ret = platform_driver_register(&cpm_uart_driver); if (ret) uart_unregister_driver(&cpm_reg); @@ -1430,7 +1429,7 @@ static int __init cpm_uart_init(void) static void __exit cpm_uart_exit(void) { - of_unregister_platform_driver(&cpm_uart_driver); + platform_driver_unregister(&cpm_uart_driver); uart_unregister_driver(&cpm_reg); } diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c index bcc31f2140a..225123b37f1 100644 --- a/drivers/tty/serial/crisv10.c +++ b/drivers/tty/serial/crisv10.c @@ -3581,8 +3581,7 @@ rs_break(struct tty_struct *tty, int break_state) } static int -rs_tiocmset(struct tty_struct *tty, struct file *file, - unsigned int set, unsigned int clear) +rs_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; unsigned long flags; @@ -3614,7 +3613,7 @@ rs_tiocmset(struct tty_struct *tty, struct file *file, } static int -rs_tiocmget(struct tty_struct *tty, struct file *file) +rs_tiocmget(struct tty_struct *tty) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; unsigned int result; @@ -3648,7 +3647,7 @@ rs_tiocmget(struct tty_struct *tty, struct file *file) static int -rs_ioctl(struct tty_struct *tty, struct file * file, +rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct e100_serial * info = (struct e100_serial *)tty->driver_data; diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c index ab93763862d..8ee5a41d340 100644 --- a/drivers/tty/serial/ifx6x60.c +++ b/drivers/tty/serial/ifx6x60.c @@ -8,7 +8,7 @@ * Jan Dumon <j.dumon@option.com> * * Copyright (C) 2009, 2010 Intel Corp - * Russ Gorby <richardx.r.gorby@intel.com> + * Russ Gorby <russ.gorby@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -67,6 +67,7 @@ #define IFX_SPI_MORE_MASK 0x10 #define IFX_SPI_MORE_BIT 12 /* bit position in u16 */ #define IFX_SPI_CTS_BIT 13 /* bit position in u16 */ +#define IFX_SPI_MODE SPI_MODE_1 #define IFX_SPI_TTY_ID 0 #define IFX_SPI_TIMEOUT_SEC 2 #define IFX_SPI_HEADER_0 (-1) @@ -76,7 +77,7 @@ static void ifx_spi_handle_srdy(struct ifx_spi_device *ifx_dev); /* local variables */ -static int spi_b16 = 1; /* 8 or 16 bit word length */ +static int spi_bpw = 16; /* 8, 16 or 32 bit word length */ static struct tty_driver *tty_drv; static struct ifx_spi_device *saved_ifx_dev; static struct lock_class_key ifx_spi_key; @@ -244,7 +245,7 @@ static void ifx_spi_timeout(unsigned long arg) * Map the signal state into Linux modem flags and report the value * in Linux terms */ -static int ifx_spi_tiocmget(struct tty_struct *tty, struct file *filp) +static int ifx_spi_tiocmget(struct tty_struct *tty) { unsigned int value; struct ifx_spi_device *ifx_dev = tty->driver_data; @@ -262,7 +263,6 @@ static int ifx_spi_tiocmget(struct tty_struct *tty, struct file *filp) /** * ifx_spi_tiocmset - set modem bits * @tty: the tty structure - * @filp: file handle issuing the request * @set: bits to set * @clear: bits to clear * @@ -271,7 +271,7 @@ static int ifx_spi_tiocmget(struct tty_struct *tty, struct file *filp) * * FIXME: do we need to kick the tranfers when we do this ? */ -static int ifx_spi_tiocmset(struct tty_struct *tty, struct file *filp, +static int ifx_spi_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct ifx_spi_device *ifx_dev = tty->driver_data; @@ -722,9 +722,9 @@ static void ifx_spi_io(unsigned long data) /* note len is BYTES, not transfers */ ifx_dev->spi_xfer.len = IFX_SPI_TRANSFER_SIZE; ifx_dev->spi_xfer.cs_change = 0; - ifx_dev->spi_xfer.speed_hz = 12500000; + ifx_dev->spi_xfer.speed_hz = ifx_dev->spi_dev->max_speed_hz; /* ifx_dev->spi_xfer.speed_hz = 390625; */ - ifx_dev->spi_xfer.bits_per_word = spi_b16 ? 16 : 8; + ifx_dev->spi_xfer.bits_per_word = spi_bpw; ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer; ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer; @@ -732,7 +732,7 @@ static void ifx_spi_io(unsigned long data) /* * setup dma pointers */ - if (ifx_dev->is_6160) { + if (ifx_dev->use_dma) { ifx_dev->spi_msg.is_dma_mapped = 1; ifx_dev->tx_dma = ifx_dev->tx_bus; ifx_dev->rx_dma = ifx_dev->rx_bus; @@ -798,8 +798,8 @@ static int ifx_spi_create_port(struct ifx_spi_device *ifx_dev) goto error_ret; } - pport->ops = &ifx_tty_port_ops; tty_port_init(pport); + pport->ops = &ifx_tty_port_ops; ifx_dev->minor = IFX_SPI_TTY_ID; ifx_dev->tty_dev = tty_register_device(tty_drv, ifx_dev->minor, &ifx_dev->spi_dev->dev); @@ -960,7 +960,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi) { int ret; int srdy; - struct ifx_modem_platform_data *pl_data = NULL; + struct ifx_modem_platform_data *pl_data; struct ifx_spi_device *ifx_dev; if (saved_ifx_dev) { @@ -968,6 +968,12 @@ static int ifx_spi_spi_probe(struct spi_device *spi) return -ENODEV; } + pl_data = (struct ifx_modem_platform_data *)spi->dev.platform_data; + if (!pl_data) { + dev_err(&spi->dev, "missing platform data!"); + return -ENODEV; + } + /* initialize structure to hold our device variables */ ifx_dev = kzalloc(sizeof(struct ifx_spi_device), GFP_KERNEL); if (!ifx_dev) { @@ -983,14 +989,25 @@ static int ifx_spi_spi_probe(struct spi_device *spi) init_timer(&ifx_dev->spi_timer); ifx_dev->spi_timer.function = ifx_spi_timeout; ifx_dev->spi_timer.data = (unsigned long)ifx_dev; - ifx_dev->is_6160 = pl_data->is_6160; + ifx_dev->modem = pl_data->modem_type; + ifx_dev->use_dma = pl_data->use_dma; + ifx_dev->max_hz = pl_data->max_hz; + /* initialize spi mode, etc */ + spi->max_speed_hz = ifx_dev->max_hz; + spi->mode = IFX_SPI_MODE | (SPI_LOOP & spi->mode); + spi->bits_per_word = spi_bpw; + ret = spi_setup(spi); + if (ret) { + dev_err(&spi->dev, "SPI setup wasn't successful %d", ret); + return -ENODEV; + } /* ensure SPI protocol flags are initialized to enable transfer */ ifx_dev->spi_more = 0; ifx_dev->spi_slave_cts = 0; /*initialize transfer and dma buffers */ - ifx_dev->tx_buffer = dma_alloc_coherent(&ifx_dev->spi_dev->dev, + ifx_dev->tx_buffer = dma_alloc_coherent(ifx_dev->spi_dev->dev.parent, IFX_SPI_TRANSFER_SIZE, &ifx_dev->tx_bus, GFP_KERNEL); @@ -999,7 +1016,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi) ret = -ENOMEM; goto error_ret; } - ifx_dev->rx_buffer = dma_alloc_coherent(&ifx_dev->spi_dev->dev, + ifx_dev->rx_buffer = dma_alloc_coherent(ifx_dev->spi_dev->dev.parent, IFX_SPI_TRANSFER_SIZE, &ifx_dev->rx_bus, GFP_KERNEL); @@ -1025,18 +1042,11 @@ static int ifx_spi_spi_probe(struct spi_device *spi) goto error_ret; } - pl_data = (struct ifx_modem_platform_data *)spi->dev.platform_data; - if (pl_data) { - ifx_dev->gpio.reset = pl_data->rst_pmu; - ifx_dev->gpio.po = pl_data->pwr_on; - ifx_dev->gpio.mrdy = pl_data->mrdy; - ifx_dev->gpio.srdy = pl_data->srdy; - ifx_dev->gpio.reset_out = pl_data->rst_out; - } else { - dev_err(&spi->dev, "missing platform data!"); - ret = -ENODEV; - goto error_ret; - } + ifx_dev->gpio.reset = pl_data->rst_pmu; + ifx_dev->gpio.po = pl_data->pwr_on; + ifx_dev->gpio.mrdy = pl_data->mrdy; + ifx_dev->gpio.srdy = pl_data->srdy; + ifx_dev->gpio.reset_out = pl_data->rst_out; dev_info(&spi->dev, "gpios %d, %d, %d, %d, %d", ifx_dev->gpio.reset, ifx_dev->gpio.po, ifx_dev->gpio.mrdy, @@ -1322,9 +1332,9 @@ static const struct spi_device_id ifx_id_table[] = { MODULE_DEVICE_TABLE(spi, ifx_id_table); /* spi operations */ -static const struct spi_driver ifx_spi_driver_6160 = { +static const struct spi_driver ifx_spi_driver = { .driver = { - .name = "ifx6160", + .name = DRVNAME, .bus = &spi_bus_type, .pm = &ifx_spi_pm, .owner = THIS_MODULE}, @@ -1346,7 +1356,7 @@ static void __exit ifx_spi_exit(void) { /* unregister */ tty_unregister_driver(tty_drv); - spi_unregister_driver((void *)&ifx_spi_driver_6160); + spi_unregister_driver((void *)&ifx_spi_driver); } /** @@ -1388,7 +1398,7 @@ static int __init ifx_spi_init(void) return result; } - result = spi_register_driver((void *)&ifx_spi_driver_6160); + result = spi_register_driver((void *)&ifx_spi_driver); if (result) { pr_err("%s: spi_register_driver failed(%d)", DRVNAME, result); diff --git a/drivers/tty/serial/ifx6x60.h b/drivers/tty/serial/ifx6x60.h index deb7b8d977d..e8464baf9e7 100644 --- a/drivers/tty/serial/ifx6x60.h +++ b/drivers/tty/serial/ifx6x60.h @@ -29,8 +29,6 @@ #define DRVNAME "ifx6x60" #define TTYNAME "ttyIFX" -/* #define IFX_THROTTLE_CODE */ - #define IFX_SPI_MAX_MINORS 1 #define IFX_SPI_TRANSFER_SIZE 2048 #define IFX_SPI_FIFO_SIZE 4096 @@ -88,7 +86,9 @@ struct ifx_spi_device { dma_addr_t rx_dma; dma_addr_t tx_dma; - int is_6160; /* Modem type */ + int modem; /* Modem type */ + int use_dma; /* provide dma-able addrs in SPI msg */ + long max_hz; /* max SPI frequency */ spinlock_t write_lock; int write_pending; diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c index beb1afa27d8..7b951adac54 100644 --- a/drivers/tty/serial/max3100.c +++ b/drivers/tty/serial/max3100.c @@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port) s->rts = 0; sprintf(b, "max3100-%d", s->minor); - s->workqueue = create_freezeable_workqueue(b); + s->workqueue = create_freezable_workqueue(b); if (!s->workqueue) { dev_warn(&s->spi->dev, "cannot create workqueue\n"); return -EBUSY; diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c index 910870edf70..750b4f62731 100644 --- a/drivers/tty/serial/max3107.c +++ b/drivers/tty/serial/max3107.c @@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port) struct max3107_port *s = container_of(port, struct max3107_port, port); /* Initialize work queue */ - s->workqueue = create_freezeable_workqueue("max3107"); + s->workqueue = create_freezable_workqueue("max3107"); if (!s->workqueue) { dev_err(&s->spi->dev, "Workqueue creation failed\n"); return -EBUSY; diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c index d40010a22ec..c111f36f5d2 100644 --- a/drivers/tty/serial/mfd.c +++ b/drivers/tty/serial/mfd.c @@ -16,9 +16,7 @@ * 2/3 chan to port 1, 4/5 chan to port 3. Even number chans * are used for RX, odd chans for TX * - * 2. In A0 stepping, UART will not support TX half empty flag - * - * 3. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always + * 2. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always * asserted, only when the HW is reset the DDCD and DDSR will * be triggered */ @@ -41,8 +39,6 @@ #include <linux/io.h> #include <linux/debugfs.h> -#define MFD_HSU_A0_STEPPING 1 - #define HSU_DMA_BUF_SIZE 2048 #define chan_readl(chan, offset) readl(chan->reg + offset) @@ -51,7 +47,10 @@ #define mfd_readl(obj, offset) readl(obj->reg + offset) #define mfd_writel(obj, offset, val) writel(val, obj->reg + offset) -#define HSU_DMA_TIMEOUT_CHECK_FREQ (HZ/10) +static int hsu_dma_enable; +module_param(hsu_dma_enable, int, 0); +MODULE_PARM_DESC(hsu_dma_enable, "It is a bitmap to set working mode, if \ +bit[x] is 1, then port[x] will work in DMA mode, otherwise in PIO mode."); struct hsu_dma_buffer { u8 *buf; @@ -65,7 +64,6 @@ struct hsu_dma_chan { enum dma_data_direction dirt; struct uart_hsu_port *uport; void __iomem *reg; - struct timer_list rx_timer; /* only needed by RX channel */ }; struct uart_hsu_port { @@ -355,8 +353,6 @@ void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc, struct hsu_dma_buffer *dbuf | (0x1 << 24) /* timeout bit, see HSU Errata 1 */ ); chan_writel(rxc, HSU_CH_CR, 0x3); - - mod_timer(&rxc->rx_timer, jiffies + HSU_DMA_TIMEOUT_CHECK_FREQ); } /* Protected by spin_lock_irqsave(port->lock) */ @@ -420,7 +416,6 @@ void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts) chan_writel(chan, HSU_CH_CR, 0x3); return; } - del_timer(&chan->rx_timer); dma_sync_single_for_cpu(port->dev, dbuf->dma_addr, dbuf->dma_size, DMA_FROM_DEVICE); @@ -448,8 +443,6 @@ void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts) tty_flip_buffer_push(tty); chan_writel(chan, HSU_CH_CR, 0x3); - chan->rx_timer.expires = jiffies + HSU_DMA_TIMEOUT_CHECK_FREQ; - add_timer(&chan->rx_timer); } @@ -551,16 +544,9 @@ static void transmit_chars(struct uart_hsu_port *up) return; } -#ifndef MFD_HSU_A0_STEPPING + /* The IRQ is for TX FIFO half-empty */ count = up->port.fifosize / 2; -#else - /* - * A0 only supports fully empty IRQ, and the first char written - * into it won't clear the EMPT bit, so we may need be cautious - * by useing a shorter buffer - */ - count = up->port.fifosize - 4; -#endif + do { serial_out(up, UART_TX, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); @@ -769,9 +755,8 @@ static void serial_hsu_break_ctl(struct uart_port *port, int break_state) /* * What special to do: * 1. chose the 64B fifo mode - * 2. make sure not to select half empty mode for A0 stepping - * 3. start dma or pio depends on configuration - * 4. we only allocate dma memory when needed + * 2. start dma or pio depends on configuration + * 3. we only allocate dma memory when needed */ static int serial_hsu_startup(struct uart_port *port) { @@ -870,8 +855,6 @@ static void serial_hsu_shutdown(struct uart_port *port) container_of(port, struct uart_hsu_port, port); unsigned long flags; - del_timer_sync(&up->rxc->rx_timer); - /* Disable interrupts from this port */ up->ier = 0; serial_out(up, UART_IER, 0); @@ -977,10 +960,6 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios, fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_32B; fcr |= UART_FCR_HSU_64B_FIFO; -#ifdef MFD_HSU_A0_STEPPING - /* A0 doesn't support half empty IRQ */ - fcr |= UART_FCR_FULL_EMPT_TXI; -#endif /* * Ok, we're now changing the port state. Do it with @@ -1343,28 +1322,6 @@ err_disable: return ret; } -static void hsu_dma_rx_timeout(unsigned long data) -{ - struct hsu_dma_chan *chan = (void *)data; - struct uart_hsu_port *up = chan->uport; - struct hsu_dma_buffer *dbuf = &up->rxbuf; - int count = 0; - unsigned long flags; - - spin_lock_irqsave(&up->port.lock, flags); - - count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr; - - if (!count) { - mod_timer(&chan->rx_timer, jiffies + HSU_DMA_TIMEOUT_CHECK_FREQ); - goto exit; - } - - hsu_dma_rx(up, 0); -exit: - spin_unlock_irqrestore(&up->port.lock, flags); -} - static void hsu_global_init(void) { struct hsu_port *hsu; @@ -1415,6 +1372,12 @@ static void hsu_global_init(void) serial_hsu_ports[i] = uport; uport->index = i; + + if (hsu_dma_enable & (1<<i)) + uport->use_dma = 1; + else + uport->use_dma = 0; + uport++; } @@ -1427,12 +1390,6 @@ static void hsu_global_init(void) dchan->reg = hsu->reg + HSU_DMA_CHANS_REG_OFFSET + i * HSU_DMA_CHANS_REG_LENGTH; - /* Work around for RX */ - if (dchan->dirt == DMA_FROM_DEVICE) { - init_timer(&dchan->rx_timer); - dchan->rx_timer.function = hsu_dma_rx_timeout; - dchan->rx_timer.data = (unsigned long)dchan; - } dchan++; } diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c index 126ec7f568e..a0bcd8a3758 100644 --- a/drivers/tty/serial/mpc52xx_uart.c +++ b/drivers/tty/serial/mpc52xx_uart.c @@ -1302,8 +1302,7 @@ static struct of_device_id mpc52xx_uart_of_match[] = { {}, }; -static int __devinit -mpc52xx_uart_of_probe(struct platform_device *op, const struct of_device_id *match) +static int __devinit mpc52xx_uart_of_probe(struct platform_device *op) { int idx = -1; unsigned int uartclk; @@ -1311,8 +1310,6 @@ mpc52xx_uart_of_probe(struct platform_device *op, const struct of_device_id *mat struct resource res; int ret; - dev_dbg(&op->dev, "mpc52xx_uart_probe(op=%p, match=%p)\n", op, match); - /* Check validity & presence */ for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++) if (mpc52xx_uart_nodes[idx] == op->dev.of_node) @@ -1453,7 +1450,7 @@ mpc52xx_uart_of_enumerate(void) MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match); -static struct of_platform_driver mpc52xx_uart_of_driver = { +static struct platform_driver mpc52xx_uart_of_driver = { .probe = mpc52xx_uart_of_probe, .remove = mpc52xx_uart_of_remove, #ifdef CONFIG_PM @@ -1497,9 +1494,9 @@ mpc52xx_uart_init(void) return ret; } - ret = of_register_platform_driver(&mpc52xx_uart_of_driver); + ret = platform_driver_register(&mpc52xx_uart_of_driver); if (ret) { - printk(KERN_ERR "%s: of_register_platform_driver failed (%i)\n", + printk(KERN_ERR "%s: platform_driver_register failed (%i)\n", __FILE__, ret); uart_unregister_driver(&mpc52xx_uart_driver); return ret; @@ -1514,7 +1511,7 @@ mpc52xx_uart_exit(void) if (psc_ops->fifoc_uninit) psc_ops->fifoc_uninit(); - of_unregister_platform_driver(&mpc52xx_uart_of_driver); + platform_driver_unregister(&mpc52xx_uart_of_driver); uart_unregister_driver(&mpc52xx_uart_driver); } diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c index b62857bf2fd..37e13c3d91d 100644 --- a/drivers/tty/serial/mrst_max3110.c +++ b/drivers/tty/serial/mrst_max3110.c @@ -51,7 +51,7 @@ struct uart_max3110 { struct uart_port port; struct spi_device *spi; - char name[24]; + char name[SPI_NAME_SIZE]; wait_queue_head_t wq; struct task_struct *main_thread; diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c new file mode 100644 index 00000000000..2e7fc9cee9c --- /dev/null +++ b/drivers/tty/serial/msm_serial_hs.c @@ -0,0 +1,1880 @@ +/* + * MSM 7k/8k High speed uart driver + * + * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved. + * Copyright (c) 2008 Google Inc. + * Modified: Nick Pelly <npelly@google.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * Has optional support for uart power management independent of linux + * suspend/resume: + * + * RX wakeup. + * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the + * UART RX pin). This should only be used if there is not a wakeup + * GPIO on the UART CTS, and the first RX byte is known (for example, with the + * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will + * always be lost. RTS will be asserted even while the UART is off in this mode + * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq. + */ + +#include <linux/module.h> + +#include <linux/serial.h> +#include <linux/serial_core.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/timer.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/wait.h> +#include <linux/workqueue.h> + +#include <linux/atomic.h> +#include <asm/irq.h> +#include <asm/system.h> + +#include <mach/hardware.h> +#include <mach/dma.h> +#include <linux/platform_data/msm_serial_hs.h> + +/* HSUART Registers */ +#define UARTDM_MR1_ADDR 0x0 +#define UARTDM_MR2_ADDR 0x4 + +/* Data Mover result codes */ +#define RSLT_FIFO_CNTR_BMSK (0xE << 28) +#define RSLT_VLD BIT(1) + +/* write only register */ +#define UARTDM_CSR_ADDR 0x8 +#define UARTDM_CSR_115200 0xFF +#define UARTDM_CSR_57600 0xEE +#define UARTDM_CSR_38400 0xDD +#define UARTDM_CSR_28800 0xCC +#define UARTDM_CSR_19200 0xBB +#define UARTDM_CSR_14400 0xAA +#define UARTDM_CSR_9600 0x99 +#define UARTDM_CSR_7200 0x88 +#define UARTDM_CSR_4800 0x77 +#define UARTDM_CSR_3600 0x66 +#define UARTDM_CSR_2400 0x55 +#define UARTDM_CSR_1200 0x44 +#define UARTDM_CSR_600 0x33 +#define UARTDM_CSR_300 0x22 +#define UARTDM_CSR_150 0x11 +#define UARTDM_CSR_75 0x00 + +/* write only register */ +#define UARTDM_TF_ADDR 0x70 +#define UARTDM_TF2_ADDR 0x74 +#define UARTDM_TF3_ADDR 0x78 +#define UARTDM_TF4_ADDR 0x7C + +/* write only register */ +#define UARTDM_CR_ADDR 0x10 +#define UARTDM_IMR_ADDR 0x14 + +#define UARTDM_IPR_ADDR 0x18 +#define UARTDM_TFWR_ADDR 0x1c +#define UARTDM_RFWR_ADDR 0x20 +#define UARTDM_HCR_ADDR 0x24 +#define UARTDM_DMRX_ADDR 0x34 +#define UARTDM_IRDA_ADDR 0x38 +#define UARTDM_DMEN_ADDR 0x3c + +/* UART_DM_NO_CHARS_FOR_TX */ +#define UARTDM_NCF_TX_ADDR 0x40 + +#define UARTDM_BADR_ADDR 0x44 + +#define UARTDM_SIM_CFG_ADDR 0x80 +/* Read Only register */ +#define UARTDM_SR_ADDR 0x8 + +/* Read Only register */ +#define UARTDM_RF_ADDR 0x70 +#define UARTDM_RF2_ADDR 0x74 +#define UARTDM_RF3_ADDR 0x78 +#define UARTDM_RF4_ADDR 0x7C + +/* Read Only register */ +#define UARTDM_MISR_ADDR 0x10 + +/* Read Only register */ +#define UARTDM_ISR_ADDR 0x14 +#define UARTDM_RX_TOTAL_SNAP_ADDR 0x38 + +#define UARTDM_RXFS_ADDR 0x50 + +/* Register field Mask Mapping */ +#define UARTDM_SR_PAR_FRAME_BMSK BIT(5) +#define UARTDM_SR_OVERRUN_BMSK BIT(4) +#define UARTDM_SR_TXEMT_BMSK BIT(3) +#define UARTDM_SR_TXRDY_BMSK BIT(2) +#define UARTDM_SR_RXRDY_BMSK BIT(0) + +#define UARTDM_CR_TX_DISABLE_BMSK BIT(3) +#define UARTDM_CR_RX_DISABLE_BMSK BIT(1) +#define UARTDM_CR_TX_EN_BMSK BIT(2) +#define UARTDM_CR_RX_EN_BMSK BIT(0) + +/* UARTDM_CR channel_comman bit value (register field is bits 8:4) */ +#define RESET_RX 0x10 +#define RESET_TX 0x20 +#define RESET_ERROR_STATUS 0x30 +#define RESET_BREAK_INT 0x40 +#define START_BREAK 0x50 +#define STOP_BREAK 0x60 +#define RESET_CTS 0x70 +#define RESET_STALE_INT 0x80 +#define RFR_LOW 0xD0 +#define RFR_HIGH 0xE0 +#define CR_PROTECTION_EN 0x100 +#define STALE_EVENT_ENABLE 0x500 +#define STALE_EVENT_DISABLE 0x600 +#define FORCE_STALE_EVENT 0x400 +#define CLEAR_TX_READY 0x300 +#define RESET_TX_ERROR 0x800 +#define RESET_TX_DONE 0x810 + +#define UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK 0xffffff00 +#define UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK 0x3f +#define UARTDM_MR1_CTS_CTL_BMSK 0x40 +#define UARTDM_MR1_RX_RDY_CTL_BMSK 0x80 + +#define UARTDM_MR2_ERROR_MODE_BMSK 0x40 +#define UARTDM_MR2_BITS_PER_CHAR_BMSK 0x30 + +/* bits per character configuration */ +#define FIVE_BPC (0 << 4) +#define SIX_BPC (1 << 4) +#define SEVEN_BPC (2 << 4) +#define EIGHT_BPC (3 << 4) + +#define UARTDM_MR2_STOP_BIT_LEN_BMSK 0xc +#define STOP_BIT_ONE (1 << 2) +#define STOP_BIT_TWO (3 << 2) + +#define UARTDM_MR2_PARITY_MODE_BMSK 0x3 + +/* Parity configuration */ +#define NO_PARITY 0x0 +#define EVEN_PARITY 0x1 +#define ODD_PARITY 0x2 +#define SPACE_PARITY 0x3 + +#define UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK 0xffffff80 +#define UARTDM_IPR_STALE_LSB_BMSK 0x1f + +/* These can be used for both ISR and IMR register */ +#define UARTDM_ISR_TX_READY_BMSK BIT(7) +#define UARTDM_ISR_CURRENT_CTS_BMSK BIT(6) +#define UARTDM_ISR_DELTA_CTS_BMSK BIT(5) +#define UARTDM_ISR_RXLEV_BMSK BIT(4) +#define UARTDM_ISR_RXSTALE_BMSK BIT(3) +#define UARTDM_ISR_RXBREAK_BMSK BIT(2) +#define UARTDM_ISR_RXHUNT_BMSK BIT(1) +#define UARTDM_ISR_TXLEV_BMSK BIT(0) + +/* Field definitions for UART_DM_DMEN*/ +#define UARTDM_TX_DM_EN_BMSK 0x1 +#define UARTDM_RX_DM_EN_BMSK 0x2 + +#define UART_FIFOSIZE 64 +#define UARTCLK 7372800 + +/* Rx DMA request states */ +enum flush_reason { + FLUSH_NONE, + FLUSH_DATA_READY, + FLUSH_DATA_INVALID, /* values after this indicate invalid data */ + FLUSH_IGNORE = FLUSH_DATA_INVALID, + FLUSH_STOP, + FLUSH_SHUTDOWN, +}; + +/* UART clock states */ +enum msm_hs_clk_states_e { + MSM_HS_CLK_PORT_OFF, /* port not in use */ + MSM_HS_CLK_OFF, /* clock disabled */ + MSM_HS_CLK_REQUEST_OFF, /* disable after TX and RX flushed */ + MSM_HS_CLK_ON, /* clock enabled */ +}; + +/* Track the forced RXSTALE flush during clock off sequence. + * These states are only valid during MSM_HS_CLK_REQUEST_OFF */ +enum msm_hs_clk_req_off_state_e { + CLK_REQ_OFF_START, + CLK_REQ_OFF_RXSTALE_ISSUED, + CLK_REQ_OFF_FLUSH_ISSUED, + CLK_REQ_OFF_RXSTALE_FLUSHED, +}; + +/** + * struct msm_hs_tx + * @tx_ready_int_en: ok to dma more tx? + * @dma_in_flight: tx dma in progress + * @xfer: top level DMA command pointer structure + * @command_ptr: third level command struct pointer + * @command_ptr_ptr: second level command list struct pointer + * @mapped_cmd_ptr: DMA view of third level command struct + * @mapped_cmd_ptr_ptr: DMA view of second level command list struct + * @tx_count: number of bytes to transfer in DMA transfer + * @dma_base: DMA view of UART xmit buffer + * + * This structure describes a single Tx DMA transaction. MSM DMA + * commands have two levels of indirection. The top level command + * ptr points to a list of command ptr which in turn points to a + * single DMA 'command'. In our case each Tx transaction consists + * of a single second level pointer pointing to a 'box type' command. + */ +struct msm_hs_tx { + unsigned int tx_ready_int_en; + unsigned int dma_in_flight; + struct msm_dmov_cmd xfer; + dmov_box *command_ptr; + u32 *command_ptr_ptr; + dma_addr_t mapped_cmd_ptr; + dma_addr_t mapped_cmd_ptr_ptr; + int tx_count; + dma_addr_t dma_base; +}; + +/** + * struct msm_hs_rx + * @flush: Rx DMA request state + * @xfer: top level DMA command pointer structure + * @cmdptr_dmaaddr: DMA view of second level command structure + * @command_ptr: third level DMA command pointer structure + * @command_ptr_ptr: second level DMA command list pointer + * @mapped_cmd_ptr: DMA view of the third level command structure + * @wait: wait for DMA completion before shutdown + * @buffer: destination buffer for RX DMA + * @rbuffer: DMA view of buffer + * @pool: dma pool out of which coherent rx buffer is allocated + * @tty_work: private work-queue for tty flip buffer push task + * + * This structure describes a single Rx DMA transaction. Rx DMA + * transactions use box mode DMA commands. + */ +struct msm_hs_rx { + enum flush_reason flush; + struct msm_dmov_cmd xfer; + dma_addr_t cmdptr_dmaaddr; + dmov_box *command_ptr; + u32 *command_ptr_ptr; + dma_addr_t mapped_cmd_ptr; + wait_queue_head_t wait; + dma_addr_t rbuffer; + unsigned char *buffer; + struct dma_pool *pool; + struct work_struct tty_work; +}; + +/** + * struct msm_hs_rx_wakeup + * @irq: IRQ line to be configured as interrupt source on Rx activity + * @ignore: boolean value. 1 = ignore the wakeup interrupt + * @rx_to_inject: extra character to be inserted to Rx tty on wakeup + * @inject_rx: 1 = insert rx_to_inject. 0 = do not insert extra character + * + * This is an optional structure required for UART Rx GPIO IRQ based + * wakeup from low power state. UART wakeup can be triggered by RX activity + * (using a wakeup GPIO on the UART RX pin). This should only be used if + * there is not a wakeup GPIO on the UART CTS, and the first RX byte is + * known (eg., with the Bluetooth Texas Instruments HCILL protocol), + * since the first RX byte will always be lost. RTS will be asserted even + * while the UART is clocked off in this mode of operation. + */ +struct msm_hs_rx_wakeup { + int irq; /* < 0 indicates low power wakeup disabled */ + unsigned char ignore; + unsigned char inject_rx; + char rx_to_inject; +}; + +/** + * struct msm_hs_port + * @uport: embedded uart port structure + * @imr_reg: shadow value of UARTDM_IMR + * @clk: uart input clock handle + * @tx: Tx transaction related data structure + * @rx: Rx transaction related data structure + * @dma_tx_channel: Tx DMA command channel + * @dma_rx_channel Rx DMA command channel + * @dma_tx_crci: Tx channel rate control interface number + * @dma_rx_crci: Rx channel rate control interface number + * @clk_off_timer: Timer to poll DMA event completion before clock off + * @clk_off_delay: clk_off_timer poll interval + * @clk_state: overall clock state + * @clk_req_off_state: post flush clock states + * @rx_wakeup: optional rx_wakeup feature related data + * @exit_lpm_cb: optional callback to exit low power mode + * + * Low level serial port structure. + */ +struct msm_hs_port { + struct uart_port uport; + unsigned long imr_reg; + struct clk *clk; + struct msm_hs_tx tx; + struct msm_hs_rx rx; + + int dma_tx_channel; + int dma_rx_channel; + int dma_tx_crci; + int dma_rx_crci; + + struct hrtimer clk_off_timer; + ktime_t clk_off_delay; + enum msm_hs_clk_states_e clk_state; + enum msm_hs_clk_req_off_state_e clk_req_off_state; + + struct msm_hs_rx_wakeup rx_wakeup; + void (*exit_lpm_cb)(struct uart_port *); +}; + +#define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */ +#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE +#define UARTDM_RX_BUF_SIZE 512 + +#define UARTDM_NR 2 + +static struct msm_hs_port q_uart_port[UARTDM_NR]; +static struct platform_driver msm_serial_hs_platform_driver; +static struct uart_driver msm_hs_driver; +static struct uart_ops msm_hs_ops; +static struct workqueue_struct *msm_hs_workqueue; + +#define UARTDM_TO_MSM(uart_port) \ + container_of((uart_port), struct msm_hs_port, uport) + +static unsigned int use_low_power_rx_wakeup(struct msm_hs_port + *msm_uport) +{ + return (msm_uport->rx_wakeup.irq >= 0); +} + +static unsigned int msm_hs_read(struct uart_port *uport, + unsigned int offset) +{ + return ioread32(uport->membase + offset); +} + +static void msm_hs_write(struct uart_port *uport, unsigned int offset, + unsigned int value) +{ + iowrite32(value, uport->membase + offset); +} + +static void msm_hs_release_port(struct uart_port *port) +{ + iounmap(port->membase); +} + +static int msm_hs_request_port(struct uart_port *port) +{ + port->membase = ioremap(port->mapbase, PAGE_SIZE); + if (unlikely(!port->membase)) + return -ENOMEM; + + /* configure the CR Protection to Enable */ + msm_hs_write(port, UARTDM_CR_ADDR, CR_PROTECTION_EN); + return 0; +} + +static int __devexit msm_hs_remove(struct platform_device *pdev) +{ + + struct msm_hs_port *msm_uport; + struct device *dev; + + if (pdev->id < 0 || pdev->id >= UARTDM_NR) { + printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id); + return -EINVAL; + } + + msm_uport = &q_uart_port[pdev->id]; + dev = msm_uport->uport.dev; + + dma_unmap_single(dev, msm_uport->rx.mapped_cmd_ptr, sizeof(dmov_box), + DMA_TO_DEVICE); + dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer, + msm_uport->rx.rbuffer); + dma_pool_destroy(msm_uport->rx.pool); + + dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32 *), + DMA_TO_DEVICE); + dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32 *), + DMA_TO_DEVICE); + dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr, sizeof(dmov_box), + DMA_TO_DEVICE); + + uart_remove_one_port(&msm_hs_driver, &msm_uport->uport); + clk_put(msm_uport->clk); + + /* Free the tx resources */ + kfree(msm_uport->tx.command_ptr); + kfree(msm_uport->tx.command_ptr_ptr); + + /* Free the rx resources */ + kfree(msm_uport->rx.command_ptr); + kfree(msm_uport->rx.command_ptr_ptr); + + iounmap(msm_uport->uport.membase); + + return 0; +} + +static int msm_hs_init_clk_locked(struct uart_port *uport) +{ + int ret; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + ret = clk_enable(msm_uport->clk); + if (ret) { + printk(KERN_ERR "Error could not turn on UART clk\n"); + return ret; + } + + /* Set up the MREG/NREG/DREG/MNDREG */ + ret = clk_set_rate(msm_uport->clk, uport->uartclk); + if (ret) { + printk(KERN_WARNING "Error setting clock rate on UART\n"); + clk_disable(msm_uport->clk); + return ret; + } + + msm_uport->clk_state = MSM_HS_CLK_ON; + return 0; +} + +/* Enable and Disable clocks (Used for power management) */ +static void msm_hs_pm(struct uart_port *uport, unsigned int state, + unsigned int oldstate) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + if (use_low_power_rx_wakeup(msm_uport) || + msm_uport->exit_lpm_cb) + return; /* ignore linux PM states, + use msm_hs_request_clock API */ + + switch (state) { + case 0: + clk_enable(msm_uport->clk); + break; + case 3: + clk_disable(msm_uport->clk); + break; + default: + dev_err(uport->dev, "msm_serial: Unknown PM state %d\n", + state); + } +} + +/* + * programs the UARTDM_CSR register with correct bit rates + * + * Interrupts should be disabled before we are called, as + * we modify Set Baud rate + * Set receive stale interrupt level, dependant on Bit Rate + * Goal is to have around 8 ms before indicate stale. + * roundup (((Bit Rate * .008) / 10) + 1 + */ +static void msm_hs_set_bps_locked(struct uart_port *uport, + unsigned int bps) +{ + unsigned long rxstale; + unsigned long data; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + switch (bps) { + case 300: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_75); + rxstale = 1; + break; + case 600: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_150); + rxstale = 1; + break; + case 1200: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_300); + rxstale = 1; + break; + case 2400: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_600); + rxstale = 1; + break; + case 4800: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_1200); + rxstale = 1; + break; + case 9600: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_2400); + rxstale = 2; + break; + case 14400: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_3600); + rxstale = 3; + break; + case 19200: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_4800); + rxstale = 4; + break; + case 28800: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_7200); + rxstale = 6; + break; + case 38400: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_9600); + rxstale = 8; + break; + case 57600: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_14400); + rxstale = 16; + break; + case 76800: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_19200); + rxstale = 16; + break; + case 115200: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_28800); + rxstale = 31; + break; + case 230400: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_57600); + rxstale = 31; + break; + case 460800: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_115200); + rxstale = 31; + break; + case 4000000: + case 3686400: + case 3200000: + case 3500000: + case 3000000: + case 2500000: + case 1500000: + case 1152000: + case 1000000: + case 921600: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_115200); + rxstale = 31; + break; + default: + msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_2400); + /* default to 9600 */ + bps = 9600; + rxstale = 2; + break; + } + if (bps > 460800) + uport->uartclk = bps * 16; + else + uport->uartclk = UARTCLK; + + if (clk_set_rate(msm_uport->clk, uport->uartclk)) { + printk(KERN_WARNING "Error setting clock rate on UART\n"); + return; + } + + data = rxstale & UARTDM_IPR_STALE_LSB_BMSK; + data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2); + + msm_hs_write(uport, UARTDM_IPR_ADDR, data); +} + +/* + * termios : new ktermios + * oldtermios: old ktermios previous setting + * + * Configure the serial port + */ +static void msm_hs_set_termios(struct uart_port *uport, + struct ktermios *termios, + struct ktermios *oldtermios) +{ + unsigned int bps; + unsigned long data; + unsigned long flags; + unsigned int c_cflag = termios->c_cflag; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + spin_lock_irqsave(&uport->lock, flags); + clk_enable(msm_uport->clk); + + /* 300 is the minimum baud support by the driver */ + bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000); + + /* Temporary remapping 200 BAUD to 3.2 mbps */ + if (bps == 200) + bps = 3200000; + + msm_hs_set_bps_locked(uport, bps); + + data = msm_hs_read(uport, UARTDM_MR2_ADDR); + data &= ~UARTDM_MR2_PARITY_MODE_BMSK; + /* set parity */ + if (PARENB == (c_cflag & PARENB)) { + if (PARODD == (c_cflag & PARODD)) + data |= ODD_PARITY; + else if (CMSPAR == (c_cflag & CMSPAR)) + data |= SPACE_PARITY; + else + data |= EVEN_PARITY; + } + + /* Set bits per char */ + data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK; + + switch (c_cflag & CSIZE) { + case CS5: + data |= FIVE_BPC; + break; + case CS6: + data |= SIX_BPC; + break; + case CS7: + data |= SEVEN_BPC; + break; + default: + data |= EIGHT_BPC; + break; + } + /* stop bits */ + if (c_cflag & CSTOPB) { + data |= STOP_BIT_TWO; + } else { + /* otherwise 1 stop bit */ + data |= STOP_BIT_ONE; + } + data |= UARTDM_MR2_ERROR_MODE_BMSK; + /* write parity/bits per char/stop bit configuration */ + msm_hs_write(uport, UARTDM_MR2_ADDR, data); + + /* Configure HW flow control */ + data = msm_hs_read(uport, UARTDM_MR1_ADDR); + + data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK); + + if (c_cflag & CRTSCTS) { + data |= UARTDM_MR1_CTS_CTL_BMSK; + data |= UARTDM_MR1_RX_RDY_CTL_BMSK; + } + + msm_hs_write(uport, UARTDM_MR1_ADDR, data); + + uport->ignore_status_mask = termios->c_iflag & INPCK; + uport->ignore_status_mask |= termios->c_iflag & IGNPAR; + uport->read_status_mask = (termios->c_cflag & CREAD); + + msm_hs_write(uport, UARTDM_IMR_ADDR, 0); + + /* Set Transmit software time out */ + uart_update_timeout(uport, c_cflag, bps); + + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX); + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX); + + if (msm_uport->rx.flush == FLUSH_NONE) { + msm_uport->rx.flush = FLUSH_IGNORE; + msm_dmov_stop_cmd(msm_uport->dma_rx_channel, NULL, 1); + } + + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + + clk_disable(msm_uport->clk); + spin_unlock_irqrestore(&uport->lock, flags); +} + +/* + * Standard API, Transmitter + * Any character in the transmit shift register is sent + */ +static unsigned int msm_hs_tx_empty(struct uart_port *uport) +{ + unsigned int data; + unsigned int ret = 0; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + clk_enable(msm_uport->clk); + + data = msm_hs_read(uport, UARTDM_SR_ADDR); + if (data & UARTDM_SR_TXEMT_BMSK) + ret = TIOCSER_TEMT; + + clk_disable(msm_uport->clk); + + return ret; +} + +/* + * Standard API, Stop transmitter. + * Any character in the transmit shift register is sent as + * well as the current data mover transfer . + */ +static void msm_hs_stop_tx_locked(struct uart_port *uport) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + msm_uport->tx.tx_ready_int_en = 0; +} + +/* + * Standard API, Stop receiver as soon as possible. + * + * Function immediately terminates the operation of the + * channel receiver and any incoming characters are lost. None + * of the receiver status bits are affected by this command and + * characters that are already in the receive FIFO there. + */ +static void msm_hs_stop_rx_locked(struct uart_port *uport) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + unsigned int data; + + clk_enable(msm_uport->clk); + + /* disable dlink */ + data = msm_hs_read(uport, UARTDM_DMEN_ADDR); + data &= ~UARTDM_RX_DM_EN_BMSK; + msm_hs_write(uport, UARTDM_DMEN_ADDR, data); + + /* Disable the receiver */ + if (msm_uport->rx.flush == FLUSH_NONE) + msm_dmov_stop_cmd(msm_uport->dma_rx_channel, NULL, 1); + + if (msm_uport->rx.flush != FLUSH_SHUTDOWN) + msm_uport->rx.flush = FLUSH_STOP; + + clk_disable(msm_uport->clk); +} + +/* Transmit the next chunk of data */ +static void msm_hs_submit_tx_locked(struct uart_port *uport) +{ + int left; + int tx_count; + dma_addr_t src_addr; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + struct msm_hs_tx *tx = &msm_uport->tx; + struct circ_buf *tx_buf = &msm_uport->uport.state->xmit; + + if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) { + msm_hs_stop_tx_locked(uport); + return; + } + + tx->dma_in_flight = 1; + + tx_count = uart_circ_chars_pending(tx_buf); + + if (UARTDM_TX_BUF_SIZE < tx_count) + tx_count = UARTDM_TX_BUF_SIZE; + + left = UART_XMIT_SIZE - tx_buf->tail; + + if (tx_count > left) + tx_count = left; + + src_addr = tx->dma_base + tx_buf->tail; + dma_sync_single_for_device(uport->dev, src_addr, tx_count, + DMA_TO_DEVICE); + + tx->command_ptr->num_rows = (((tx_count + 15) >> 4) << 16) | + ((tx_count + 15) >> 4); + tx->command_ptr->src_row_addr = src_addr; + + dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr, + sizeof(dmov_box), DMA_TO_DEVICE); + + *tx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(tx->mapped_cmd_ptr); + + dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr, + sizeof(u32 *), DMA_TO_DEVICE); + + /* Save tx_count to use in Callback */ + tx->tx_count = tx_count; + msm_hs_write(uport, UARTDM_NCF_TX_ADDR, tx_count); + + /* Disable the tx_ready interrupt */ + msm_uport->imr_reg &= ~UARTDM_ISR_TX_READY_BMSK; + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + msm_dmov_enqueue_cmd(msm_uport->dma_tx_channel, &tx->xfer); +} + +/* Start to receive the next chunk of data */ +static void msm_hs_start_rx_locked(struct uart_port *uport) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT); + msm_hs_write(uport, UARTDM_DMRX_ADDR, UARTDM_RX_BUF_SIZE); + msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_ENABLE); + msm_uport->imr_reg |= UARTDM_ISR_RXLEV_BMSK; + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + + msm_uport->rx.flush = FLUSH_NONE; + msm_dmov_enqueue_cmd(msm_uport->dma_rx_channel, &msm_uport->rx.xfer); + + /* might have finished RX and be ready to clock off */ + hrtimer_start(&msm_uport->clk_off_timer, msm_uport->clk_off_delay, + HRTIMER_MODE_REL); +} + +/* Enable the transmitter Interrupt */ +static void msm_hs_start_tx_locked(struct uart_port *uport) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + clk_enable(msm_uport->clk); + + if (msm_uport->exit_lpm_cb) + msm_uport->exit_lpm_cb(uport); + + if (msm_uport->tx.tx_ready_int_en == 0) { + msm_uport->tx.tx_ready_int_en = 1; + msm_hs_submit_tx_locked(uport); + } + + clk_disable(msm_uport->clk); +} + +/* + * This routine is called when we are done with a DMA transfer + * + * This routine is registered with Data mover when we set + * up a Data Mover transfer. It is called from Data mover ISR + * when the DMA transfer is done. + */ +static void msm_hs_dmov_tx_callback(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, + struct msm_dmov_errdata *err) +{ + unsigned long flags; + struct msm_hs_port *msm_uport; + + /* DMA did not finish properly */ + WARN_ON((((result & RSLT_FIFO_CNTR_BMSK) >> 28) == 1) && + !(result & RSLT_VLD)); + + msm_uport = container_of(cmd_ptr, struct msm_hs_port, tx.xfer); + + spin_lock_irqsave(&msm_uport->uport.lock, flags); + clk_enable(msm_uport->clk); + + msm_uport->imr_reg |= UARTDM_ISR_TX_READY_BMSK; + msm_hs_write(&msm_uport->uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + + clk_disable(msm_uport->clk); + spin_unlock_irqrestore(&msm_uport->uport.lock, flags); +} + +/* + * This routine is called when we are done with a DMA transfer or the + * a flush has been sent to the data mover driver. + * + * This routine is registered with Data mover when we set up a Data Mover + * transfer. It is called from Data mover ISR when the DMA transfer is done. + */ +static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, + struct msm_dmov_errdata *err) +{ + int retval; + int rx_count; + unsigned long status; + unsigned int error_f = 0; + unsigned long flags; + unsigned int flush; + struct tty_struct *tty; + struct uart_port *uport; + struct msm_hs_port *msm_uport; + + msm_uport = container_of(cmd_ptr, struct msm_hs_port, rx.xfer); + uport = &msm_uport->uport; + + spin_lock_irqsave(&uport->lock, flags); + clk_enable(msm_uport->clk); + + tty = uport->state->port.tty; + + msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE); + + status = msm_hs_read(uport, UARTDM_SR_ADDR); + + /* overflow is not connect to data in a FIFO */ + if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) && + (uport->read_status_mask & CREAD))) { + tty_insert_flip_char(tty, 0, TTY_OVERRUN); + uport->icount.buf_overrun++; + error_f = 1; + } + + if (!(uport->ignore_status_mask & INPCK)) + status = status & ~(UARTDM_SR_PAR_FRAME_BMSK); + + if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) { + /* Can not tell difference between parity & frame error */ + uport->icount.parity++; + error_f = 1; + if (uport->ignore_status_mask & IGNPAR) + tty_insert_flip_char(tty, 0, TTY_PARITY); + } + + if (error_f) + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS); + + if (msm_uport->clk_req_off_state == CLK_REQ_OFF_FLUSH_ISSUED) + msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_FLUSHED; + + flush = msm_uport->rx.flush; + if (flush == FLUSH_IGNORE) + msm_hs_start_rx_locked(uport); + if (flush == FLUSH_STOP) + msm_uport->rx.flush = FLUSH_SHUTDOWN; + if (flush >= FLUSH_DATA_INVALID) + goto out; + + rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR); + + if (0 != (uport->read_status_mask & CREAD)) { + retval = tty_insert_flip_string(tty, msm_uport->rx.buffer, + rx_count); + BUG_ON(retval != rx_count); + } + + msm_hs_start_rx_locked(uport); + +out: + clk_disable(msm_uport->clk); + + spin_unlock_irqrestore(&uport->lock, flags); + + if (flush < FLUSH_DATA_INVALID) + queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work); +} + +static void msm_hs_tty_flip_buffer_work(struct work_struct *work) +{ + struct msm_hs_port *msm_uport = + container_of(work, struct msm_hs_port, rx.tty_work); + struct tty_struct *tty = msm_uport->uport.state->port.tty; + + tty_flip_buffer_push(tty); +} + +/* + * Standard API, Current states of modem control inputs + * + * Since CTS can be handled entirely by HARDWARE we always + * indicate clear to send and count on the TX FIFO to block when + * it fills up. + * + * - TIOCM_DCD + * - TIOCM_CTS + * - TIOCM_DSR + * - TIOCM_RI + * (Unsupported) DCD and DSR will return them high. RI will return low. + */ +static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport) +{ + return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS; +} + +/* + * True enables UART auto RFR, which indicates we are ready for data if the RX + * buffer is not full. False disables auto RFR, and deasserts RFR to indicate + * we are not ready for data. Must be called with UART clock on. + */ +static void set_rfr_locked(struct uart_port *uport, int auto_rfr) +{ + unsigned int data; + + data = msm_hs_read(uport, UARTDM_MR1_ADDR); + + if (auto_rfr) { + /* enable auto ready-for-receiving */ + data |= UARTDM_MR1_RX_RDY_CTL_BMSK; + msm_hs_write(uport, UARTDM_MR1_ADDR, data); + } else { + /* disable auto ready-for-receiving */ + data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK; + msm_hs_write(uport, UARTDM_MR1_ADDR, data); + /* RFR is active low, set high */ + msm_hs_write(uport, UARTDM_CR_ADDR, RFR_HIGH); + } +} + +/* + * Standard API, used to set or clear RFR + */ +static void msm_hs_set_mctrl_locked(struct uart_port *uport, + unsigned int mctrl) +{ + unsigned int auto_rfr; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + clk_enable(msm_uport->clk); + + auto_rfr = TIOCM_RTS & mctrl ? 1 : 0; + set_rfr_locked(uport, auto_rfr); + + clk_disable(msm_uport->clk); +} + +/* Standard API, Enable modem status (CTS) interrupt */ +static void msm_hs_enable_ms_locked(struct uart_port *uport) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + clk_enable(msm_uport->clk); + + /* Enable DELTA_CTS Interrupt */ + msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK; + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + + clk_disable(msm_uport->clk); + +} + +/* + * Standard API, Break Signal + * + * Control the transmission of a break signal. ctl eq 0 => break + * signal terminate ctl ne 0 => start break signal + */ +static void msm_hs_break_ctl(struct uart_port *uport, int ctl) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + clk_enable(msm_uport->clk); + msm_hs_write(uport, UARTDM_CR_ADDR, ctl ? START_BREAK : STOP_BREAK); + clk_disable(msm_uport->clk); +} + +static void msm_hs_config_port(struct uart_port *uport, int cfg_flags) +{ + unsigned long flags; + + spin_lock_irqsave(&uport->lock, flags); + if (cfg_flags & UART_CONFIG_TYPE) { + uport->type = PORT_MSM; + msm_hs_request_port(uport); + } + spin_unlock_irqrestore(&uport->lock, flags); +} + +/* Handle CTS changes (Called from interrupt handler) */ +static void msm_hs_handle_delta_cts(struct uart_port *uport) +{ + unsigned long flags; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + spin_lock_irqsave(&uport->lock, flags); + clk_enable(msm_uport->clk); + + /* clear interrupt */ + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS); + uport->icount.cts++; + + clk_disable(msm_uport->clk); + spin_unlock_irqrestore(&uport->lock, flags); + + /* clear the IOCTL TIOCMIWAIT if called */ + wake_up_interruptible(&uport->state->port.delta_msr_wait); +} + +/* check if the TX path is flushed, and if so clock off + * returns 0 did not clock off, need to retry (still sending final byte) + * -1 did not clock off, do not retry + * 1 if we clocked off + */ +static int msm_hs_check_clock_off_locked(struct uart_port *uport) +{ + unsigned long sr_status; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + struct circ_buf *tx_buf = &uport->state->xmit; + + /* Cancel if tx tty buffer is not empty, dma is in flight, + * or tx fifo is not empty, or rx fifo is not empty */ + if (msm_uport->clk_state != MSM_HS_CLK_REQUEST_OFF || + !uart_circ_empty(tx_buf) || msm_uport->tx.dma_in_flight || + (msm_uport->imr_reg & UARTDM_ISR_TXLEV_BMSK) || + !(msm_uport->imr_reg & UARTDM_ISR_RXLEV_BMSK)) { + return -1; + } + + /* Make sure the uart is finished with the last byte */ + sr_status = msm_hs_read(uport, UARTDM_SR_ADDR); + if (!(sr_status & UARTDM_SR_TXEMT_BMSK)) + return 0; /* retry */ + + /* Make sure forced RXSTALE flush complete */ + switch (msm_uport->clk_req_off_state) { + case CLK_REQ_OFF_START: + msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_ISSUED; + msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT); + return 0; /* RXSTALE flush not complete - retry */ + case CLK_REQ_OFF_RXSTALE_ISSUED: + case CLK_REQ_OFF_FLUSH_ISSUED: + return 0; /* RXSTALE flush not complete - retry */ + case CLK_REQ_OFF_RXSTALE_FLUSHED: + break; /* continue */ + } + + if (msm_uport->rx.flush != FLUSH_SHUTDOWN) { + if (msm_uport->rx.flush == FLUSH_NONE) + msm_hs_stop_rx_locked(uport); + return 0; /* come back later to really clock off */ + } + + /* we really want to clock off */ + clk_disable(msm_uport->clk); + msm_uport->clk_state = MSM_HS_CLK_OFF; + + if (use_low_power_rx_wakeup(msm_uport)) { + msm_uport->rx_wakeup.ignore = 1; + enable_irq(msm_uport->rx_wakeup.irq); + } + return 1; +} + +static enum hrtimer_restart msm_hs_clk_off_retry(struct hrtimer *timer) +{ + unsigned long flags; + int ret = HRTIMER_NORESTART; + struct msm_hs_port *msm_uport = container_of(timer, struct msm_hs_port, + clk_off_timer); + struct uart_port *uport = &msm_uport->uport; + + spin_lock_irqsave(&uport->lock, flags); + + if (!msm_hs_check_clock_off_locked(uport)) { + hrtimer_forward_now(timer, msm_uport->clk_off_delay); + ret = HRTIMER_RESTART; + } + + spin_unlock_irqrestore(&uport->lock, flags); + + return ret; +} + +static irqreturn_t msm_hs_isr(int irq, void *dev) +{ + unsigned long flags; + unsigned long isr_status; + struct msm_hs_port *msm_uport = dev; + struct uart_port *uport = &msm_uport->uport; + struct circ_buf *tx_buf = &uport->state->xmit; + struct msm_hs_tx *tx = &msm_uport->tx; + struct msm_hs_rx *rx = &msm_uport->rx; + + spin_lock_irqsave(&uport->lock, flags); + + isr_status = msm_hs_read(uport, UARTDM_MISR_ADDR); + + /* Uart RX starting */ + if (isr_status & UARTDM_ISR_RXLEV_BMSK) { + msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK; + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + } + /* Stale rx interrupt */ + if (isr_status & UARTDM_ISR_RXSTALE_BMSK) { + msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE); + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT); + + if (msm_uport->clk_req_off_state == CLK_REQ_OFF_RXSTALE_ISSUED) + msm_uport->clk_req_off_state = + CLK_REQ_OFF_FLUSH_ISSUED; + if (rx->flush == FLUSH_NONE) { + rx->flush = FLUSH_DATA_READY; + msm_dmov_stop_cmd(msm_uport->dma_rx_channel, NULL, 1); + } + } + /* tx ready interrupt */ + if (isr_status & UARTDM_ISR_TX_READY_BMSK) { + /* Clear TX Ready */ + msm_hs_write(uport, UARTDM_CR_ADDR, CLEAR_TX_READY); + + if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) { + msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK; + msm_hs_write(uport, UARTDM_IMR_ADDR, + msm_uport->imr_reg); + } + + /* Complete DMA TX transactions and submit new transactions */ + tx_buf->tail = (tx_buf->tail + tx->tx_count) & ~UART_XMIT_SIZE; + + tx->dma_in_flight = 0; + + uport->icount.tx += tx->tx_count; + if (tx->tx_ready_int_en) + msm_hs_submit_tx_locked(uport); + + if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS) + uart_write_wakeup(uport); + } + if (isr_status & UARTDM_ISR_TXLEV_BMSK) { + /* TX FIFO is empty */ + msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK; + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + if (!msm_hs_check_clock_off_locked(uport)) + hrtimer_start(&msm_uport->clk_off_timer, + msm_uport->clk_off_delay, + HRTIMER_MODE_REL); + } + + /* Change in CTS interrupt */ + if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK) + msm_hs_handle_delta_cts(uport); + + spin_unlock_irqrestore(&uport->lock, flags); + + return IRQ_HANDLED; +} + +void msm_hs_request_clock_off_locked(struct uart_port *uport) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + if (msm_uport->clk_state == MSM_HS_CLK_ON) { + msm_uport->clk_state = MSM_HS_CLK_REQUEST_OFF; + msm_uport->clk_req_off_state = CLK_REQ_OFF_START; + if (!use_low_power_rx_wakeup(msm_uport)) + set_rfr_locked(uport, 0); + msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK; + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + } +} + +/** + * msm_hs_request_clock_off - request to (i.e. asynchronously) turn off uart + * clock once pending TX is flushed and Rx DMA command is terminated. + * @uport: uart_port structure for the device instance. + * + * This functions puts the device into a partially active low power mode. It + * waits to complete all pending tx transactions, flushes ongoing Rx DMA + * command and terminates UART side Rx transaction, puts UART HW in non DMA + * mode and then clocks off the device. A client calls this when no UART + * data is expected. msm_request_clock_on() must be called before any further + * UART can be sent or received. + */ +void msm_hs_request_clock_off(struct uart_port *uport) +{ + unsigned long flags; + + spin_lock_irqsave(&uport->lock, flags); + msm_hs_request_clock_off_locked(uport); + spin_unlock_irqrestore(&uport->lock, flags); +} + +void msm_hs_request_clock_on_locked(struct uart_port *uport) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + unsigned int data; + + switch (msm_uport->clk_state) { + case MSM_HS_CLK_OFF: + clk_enable(msm_uport->clk); + disable_irq_nosync(msm_uport->rx_wakeup.irq); + /* fall-through */ + case MSM_HS_CLK_REQUEST_OFF: + if (msm_uport->rx.flush == FLUSH_STOP || + msm_uport->rx.flush == FLUSH_SHUTDOWN) { + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX); + data = msm_hs_read(uport, UARTDM_DMEN_ADDR); + data |= UARTDM_RX_DM_EN_BMSK; + msm_hs_write(uport, UARTDM_DMEN_ADDR, data); + } + hrtimer_try_to_cancel(&msm_uport->clk_off_timer); + if (msm_uport->rx.flush == FLUSH_SHUTDOWN) + msm_hs_start_rx_locked(uport); + if (!use_low_power_rx_wakeup(msm_uport)) + set_rfr_locked(uport, 1); + if (msm_uport->rx.flush == FLUSH_STOP) + msm_uport->rx.flush = FLUSH_IGNORE; + msm_uport->clk_state = MSM_HS_CLK_ON; + break; + case MSM_HS_CLK_ON: + break; + case MSM_HS_CLK_PORT_OFF: + break; + } +} + +/** + * msm_hs_request_clock_on - Switch the device from partially active low + * power mode to fully active (i.e. clock on) mode. + * @uport: uart_port structure for the device. + * + * This function switches on the input clock, puts UART HW into DMA mode + * and enqueues an Rx DMA command if the device was in partially active + * mode. It has no effect if called with the device in inactive state. + */ +void msm_hs_request_clock_on(struct uart_port *uport) +{ + unsigned long flags; + + spin_lock_irqsave(&uport->lock, flags); + msm_hs_request_clock_on_locked(uport); + spin_unlock_irqrestore(&uport->lock, flags); +} + +static irqreturn_t msm_hs_rx_wakeup_isr(int irq, void *dev) +{ + unsigned int wakeup = 0; + unsigned long flags; + struct msm_hs_port *msm_uport = dev; + struct uart_port *uport = &msm_uport->uport; + struct tty_struct *tty = NULL; + + spin_lock_irqsave(&uport->lock, flags); + if (msm_uport->clk_state == MSM_HS_CLK_OFF) { + /* ignore the first irq - it is a pending irq that occured + * before enable_irq() */ + if (msm_uport->rx_wakeup.ignore) + msm_uport->rx_wakeup.ignore = 0; + else + wakeup = 1; + } + + if (wakeup) { + /* the uart was clocked off during an rx, wake up and + * optionally inject char into tty rx */ + msm_hs_request_clock_on_locked(uport); + if (msm_uport->rx_wakeup.inject_rx) { + tty = uport->state->port.tty; + tty_insert_flip_char(tty, + msm_uport->rx_wakeup.rx_to_inject, + TTY_NORMAL); + queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work); + } + } + + spin_unlock_irqrestore(&uport->lock, flags); + + return IRQ_HANDLED; +} + +static const char *msm_hs_type(struct uart_port *port) +{ + return (port->type == PORT_MSM) ? "MSM_HS_UART" : NULL; +} + +/* Called when port is opened */ +static int msm_hs_startup(struct uart_port *uport) +{ + int ret; + int rfr_level; + unsigned long flags; + unsigned int data; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + struct circ_buf *tx_buf = &uport->state->xmit; + struct msm_hs_tx *tx = &msm_uport->tx; + struct msm_hs_rx *rx = &msm_uport->rx; + + rfr_level = uport->fifosize; + if (rfr_level > 16) + rfr_level -= 16; + + tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE, + DMA_TO_DEVICE); + + /* do not let tty layer execute RX in global workqueue, use a + * dedicated workqueue managed by this driver */ + uport->state->port.tty->low_latency = 1; + + /* turn on uart clk */ + ret = msm_hs_init_clk_locked(uport); + if (unlikely(ret)) { + printk(KERN_ERR "Turning uartclk failed!\n"); + goto err_msm_hs_init_clk; + } + + /* Set auto RFR Level */ + data = msm_hs_read(uport, UARTDM_MR1_ADDR); + data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK; + data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK; + data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2)); + data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level); + msm_hs_write(uport, UARTDM_MR1_ADDR, data); + + /* Make sure RXSTALE count is non-zero */ + data = msm_hs_read(uport, UARTDM_IPR_ADDR); + if (!data) { + data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK; + msm_hs_write(uport, UARTDM_IPR_ADDR, data); + } + + /* Enable Data Mover Mode */ + data = UARTDM_TX_DM_EN_BMSK | UARTDM_RX_DM_EN_BMSK; + msm_hs_write(uport, UARTDM_DMEN_ADDR, data); + + /* Reset TX */ + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX); + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX); + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS); + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_BREAK_INT); + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT); + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS); + msm_hs_write(uport, UARTDM_CR_ADDR, RFR_LOW); + /* Turn on Uart Receiver */ + msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_EN_BMSK); + + /* Turn on Uart Transmitter */ + msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_EN_BMSK); + + /* Initialize the tx */ + tx->tx_ready_int_en = 0; + tx->dma_in_flight = 0; + + tx->xfer.complete_func = msm_hs_dmov_tx_callback; + tx->xfer.execute_func = NULL; + + tx->command_ptr->cmd = CMD_LC | + CMD_DST_CRCI(msm_uport->dma_tx_crci) | CMD_MODE_BOX; + + tx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16) + | (MSM_UARTDM_BURST_SIZE); + + tx->command_ptr->row_offset = (MSM_UARTDM_BURST_SIZE << 16); + + tx->command_ptr->dst_row_addr = + msm_uport->uport.mapbase + UARTDM_TF_ADDR; + + + /* Turn on Uart Receive */ + rx->xfer.complete_func = msm_hs_dmov_rx_callback; + rx->xfer.execute_func = NULL; + + rx->command_ptr->cmd = CMD_LC | + CMD_SRC_CRCI(msm_uport->dma_rx_crci) | CMD_MODE_BOX; + + rx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16) + | (MSM_UARTDM_BURST_SIZE); + rx->command_ptr->row_offset = MSM_UARTDM_BURST_SIZE; + rx->command_ptr->src_row_addr = uport->mapbase + UARTDM_RF_ADDR; + + + msm_uport->imr_reg |= UARTDM_ISR_RXSTALE_BMSK; + /* Enable reading the current CTS, no harm even if CTS is ignored */ + msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK; + + msm_hs_write(uport, UARTDM_TFWR_ADDR, 0); /* TXLEV on empty TX fifo */ + + + ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH, + "msm_hs_uart", msm_uport); + if (unlikely(ret)) { + printk(KERN_ERR "Request msm_hs_uart IRQ failed!\n"); + goto err_request_irq; + } + if (use_low_power_rx_wakeup(msm_uport)) { + ret = request_irq(msm_uport->rx_wakeup.irq, + msm_hs_rx_wakeup_isr, + IRQF_TRIGGER_FALLING, + "msm_hs_rx_wakeup", msm_uport); + if (unlikely(ret)) { + printk(KERN_ERR "Request msm_hs_rx_wakeup IRQ failed!\n"); + free_irq(uport->irq, msm_uport); + goto err_request_irq; + } + disable_irq(msm_uport->rx_wakeup.irq); + } + + spin_lock_irqsave(&uport->lock, flags); + + msm_hs_write(uport, UARTDM_RFWR_ADDR, 0); + msm_hs_start_rx_locked(uport); + + spin_unlock_irqrestore(&uport->lock, flags); + ret = pm_runtime_set_active(uport->dev); + if (ret) + dev_err(uport->dev, "set active error:%d\n", ret); + pm_runtime_enable(uport->dev); + + return 0; + +err_request_irq: +err_msm_hs_init_clk: + dma_unmap_single(uport->dev, tx->dma_base, + UART_XMIT_SIZE, DMA_TO_DEVICE); + return ret; +} + +/* Initialize tx and rx data structures */ +static int __devinit uartdm_init_port(struct uart_port *uport) +{ + int ret = 0; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + struct msm_hs_tx *tx = &msm_uport->tx; + struct msm_hs_rx *rx = &msm_uport->rx; + + /* Allocate the command pointer. Needs to be 64 bit aligned */ + tx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA); + if (!tx->command_ptr) + return -ENOMEM; + + tx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA); + if (!tx->command_ptr_ptr) { + ret = -ENOMEM; + goto err_tx_command_ptr_ptr; + } + + tx->mapped_cmd_ptr = dma_map_single(uport->dev, tx->command_ptr, + sizeof(dmov_box), DMA_TO_DEVICE); + tx->mapped_cmd_ptr_ptr = dma_map_single(uport->dev, + tx->command_ptr_ptr, + sizeof(u32 *), DMA_TO_DEVICE); + tx->xfer.cmdptr = DMOV_CMD_ADDR(tx->mapped_cmd_ptr_ptr); + + init_waitqueue_head(&rx->wait); + + rx->pool = dma_pool_create("rx_buffer_pool", uport->dev, + UARTDM_RX_BUF_SIZE, 16, 0); + if (!rx->pool) { + pr_err("%s(): cannot allocate rx_buffer_pool", __func__); + ret = -ENOMEM; + goto err_dma_pool_create; + } + + rx->buffer = dma_pool_alloc(rx->pool, GFP_KERNEL, &rx->rbuffer); + if (!rx->buffer) { + pr_err("%s(): cannot allocate rx->buffer", __func__); + ret = -ENOMEM; + goto err_dma_pool_alloc; + } + + /* Allocate the command pointer. Needs to be 64 bit aligned */ + rx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA); + if (!rx->command_ptr) { + pr_err("%s(): cannot allocate rx->command_ptr", __func__); + ret = -ENOMEM; + goto err_rx_command_ptr; + } + + rx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA); + if (!rx->command_ptr_ptr) { + pr_err("%s(): cannot allocate rx->command_ptr_ptr", __func__); + ret = -ENOMEM; + goto err_rx_command_ptr_ptr; + } + + rx->command_ptr->num_rows = ((UARTDM_RX_BUF_SIZE >> 4) << 16) | + (UARTDM_RX_BUF_SIZE >> 4); + + rx->command_ptr->dst_row_addr = rx->rbuffer; + + rx->mapped_cmd_ptr = dma_map_single(uport->dev, rx->command_ptr, + sizeof(dmov_box), DMA_TO_DEVICE); + + *rx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(rx->mapped_cmd_ptr); + + rx->cmdptr_dmaaddr = dma_map_single(uport->dev, rx->command_ptr_ptr, + sizeof(u32 *), DMA_TO_DEVICE); + rx->xfer.cmdptr = DMOV_CMD_ADDR(rx->cmdptr_dmaaddr); + + INIT_WORK(&rx->tty_work, msm_hs_tty_flip_buffer_work); + + return ret; + +err_rx_command_ptr_ptr: + kfree(rx->command_ptr); +err_rx_command_ptr: + dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer, + msm_uport->rx.rbuffer); +err_dma_pool_alloc: + dma_pool_destroy(msm_uport->rx.pool); +err_dma_pool_create: + dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr_ptr, + sizeof(u32 *), DMA_TO_DEVICE); + dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr, + sizeof(dmov_box), DMA_TO_DEVICE); + kfree(msm_uport->tx.command_ptr_ptr); +err_tx_command_ptr_ptr: + kfree(msm_uport->tx.command_ptr); + return ret; +} + +static int __devinit msm_hs_probe(struct platform_device *pdev) +{ + int ret; + struct uart_port *uport; + struct msm_hs_port *msm_uport; + struct resource *resource; + const struct msm_serial_hs_platform_data *pdata = + pdev->dev.platform_data; + + if (pdev->id < 0 || pdev->id >= UARTDM_NR) { + printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id); + return -EINVAL; + } + + msm_uport = &q_uart_port[pdev->id]; + uport = &msm_uport->uport; + + uport->dev = &pdev->dev; + + resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(!resource)) + return -ENXIO; + + uport->mapbase = resource->start; + uport->irq = platform_get_irq(pdev, 0); + if (unlikely(uport->irq < 0)) + return -ENXIO; + + if (unlikely(set_irq_wake(uport->irq, 1))) + return -ENXIO; + + if (pdata == NULL || pdata->rx_wakeup_irq < 0) + msm_uport->rx_wakeup.irq = -1; + else { + msm_uport->rx_wakeup.irq = pdata->rx_wakeup_irq; + msm_uport->rx_wakeup.ignore = 1; + msm_uport->rx_wakeup.inject_rx = pdata->inject_rx_on_wakeup; + msm_uport->rx_wakeup.rx_to_inject = pdata->rx_to_inject; + + if (unlikely(msm_uport->rx_wakeup.irq < 0)) + return -ENXIO; + + if (unlikely(set_irq_wake(msm_uport->rx_wakeup.irq, 1))) + return -ENXIO; + } + + if (pdata == NULL) + msm_uport->exit_lpm_cb = NULL; + else + msm_uport->exit_lpm_cb = pdata->exit_lpm_cb; + + resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, + "uartdm_channels"); + if (unlikely(!resource)) + return -ENXIO; + + msm_uport->dma_tx_channel = resource->start; + msm_uport->dma_rx_channel = resource->end; + + resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, + "uartdm_crci"); + if (unlikely(!resource)) + return -ENXIO; + + msm_uport->dma_tx_crci = resource->start; + msm_uport->dma_rx_crci = resource->end; + + uport->iotype = UPIO_MEM; + uport->fifosize = UART_FIFOSIZE; + uport->ops = &msm_hs_ops; + uport->flags = UPF_BOOT_AUTOCONF; + uport->uartclk = UARTCLK; + msm_uport->imr_reg = 0x0; + msm_uport->clk = clk_get(&pdev->dev, "uartdm_clk"); + if (IS_ERR(msm_uport->clk)) + return PTR_ERR(msm_uport->clk); + + ret = uartdm_init_port(uport); + if (unlikely(ret)) + return ret; + + msm_uport->clk_state = MSM_HS_CLK_PORT_OFF; + hrtimer_init(&msm_uport->clk_off_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + msm_uport->clk_off_timer.function = msm_hs_clk_off_retry; + msm_uport->clk_off_delay = ktime_set(0, 1000000); /* 1ms */ + + uport->line = pdev->id; + return uart_add_one_port(&msm_hs_driver, uport); +} + +static int __init msm_serial_hs_init(void) +{ + int ret, i; + + /* Init all UARTS as non-configured */ + for (i = 0; i < UARTDM_NR; i++) + q_uart_port[i].uport.type = PORT_UNKNOWN; + + msm_hs_workqueue = create_singlethread_workqueue("msm_serial_hs"); + if (unlikely(!msm_hs_workqueue)) + return -ENOMEM; + + ret = uart_register_driver(&msm_hs_driver); + if (unlikely(ret)) { + printk(KERN_ERR "%s failed to load\n", __func__); + goto err_uart_register_driver; + } + + ret = platform_driver_register(&msm_serial_hs_platform_driver); + if (ret) { + printk(KERN_ERR "%s failed to load\n", __func__); + goto err_platform_driver_register; + } + + return ret; + +err_platform_driver_register: + uart_unregister_driver(&msm_hs_driver); +err_uart_register_driver: + destroy_workqueue(msm_hs_workqueue); + return ret; +} +module_init(msm_serial_hs_init); + +/* + * Called by the upper layer when port is closed. + * - Disables the port + * - Unhook the ISR + */ +static void msm_hs_shutdown(struct uart_port *uport) +{ + unsigned long flags; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + BUG_ON(msm_uport->rx.flush < FLUSH_STOP); + + spin_lock_irqsave(&uport->lock, flags); + clk_enable(msm_uport->clk); + + /* Disable the transmitter */ + msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_DISABLE_BMSK); + /* Disable the receiver */ + msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_DISABLE_BMSK); + + pm_runtime_disable(uport->dev); + pm_runtime_set_suspended(uport->dev); + + /* Free the interrupt */ + free_irq(uport->irq, msm_uport); + if (use_low_power_rx_wakeup(msm_uport)) + free_irq(msm_uport->rx_wakeup.irq, msm_uport); + + msm_uport->imr_reg = 0; + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + + wait_event(msm_uport->rx.wait, msm_uport->rx.flush == FLUSH_SHUTDOWN); + + clk_disable(msm_uport->clk); /* to balance local clk_enable() */ + if (msm_uport->clk_state != MSM_HS_CLK_OFF) + clk_disable(msm_uport->clk); /* to balance clk_state */ + msm_uport->clk_state = MSM_HS_CLK_PORT_OFF; + + dma_unmap_single(uport->dev, msm_uport->tx.dma_base, + UART_XMIT_SIZE, DMA_TO_DEVICE); + + spin_unlock_irqrestore(&uport->lock, flags); + + if (cancel_work_sync(&msm_uport->rx.tty_work)) + msm_hs_tty_flip_buffer_work(&msm_uport->rx.tty_work); +} + +static void __exit msm_serial_hs_exit(void) +{ + flush_workqueue(msm_hs_workqueue); + destroy_workqueue(msm_hs_workqueue); + platform_driver_unregister(&msm_serial_hs_platform_driver); + uart_unregister_driver(&msm_hs_driver); +} +module_exit(msm_serial_hs_exit); + +#ifdef CONFIG_PM_RUNTIME +static int msm_hs_runtime_idle(struct device *dev) +{ + /* + * returning success from idle results in runtime suspend to be + * called + */ + return 0; +} + +static int msm_hs_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = container_of(dev, struct + platform_device, dev); + struct msm_hs_port *msm_uport = &q_uart_port[pdev->id]; + + msm_hs_request_clock_on(&msm_uport->uport); + return 0; +} + +static int msm_hs_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = container_of(dev, struct + platform_device, dev); + struct msm_hs_port *msm_uport = &q_uart_port[pdev->id]; + + msm_hs_request_clock_off(&msm_uport->uport); + return 0; +} +#else +#define msm_hs_runtime_idle NULL +#define msm_hs_runtime_resume NULL +#define msm_hs_runtime_suspend NULL +#endif + +static const struct dev_pm_ops msm_hs_dev_pm_ops = { + .runtime_suspend = msm_hs_runtime_suspend, + .runtime_resume = msm_hs_runtime_resume, + .runtime_idle = msm_hs_runtime_idle, +}; + +static struct platform_driver msm_serial_hs_platform_driver = { + .probe = msm_hs_probe, + .remove = __devexit_p(msm_hs_remove), + .driver = { + .name = "msm_serial_hs", + .owner = THIS_MODULE, + .pm = &msm_hs_dev_pm_ops, + }, +}; + +static struct uart_driver msm_hs_driver = { + .owner = THIS_MODULE, + .driver_name = "msm_serial_hs", + .dev_name = "ttyHS", + .nr = UARTDM_NR, + .cons = 0, +}; + +static struct uart_ops msm_hs_ops = { + .tx_empty = msm_hs_tx_empty, + .set_mctrl = msm_hs_set_mctrl_locked, + .get_mctrl = msm_hs_get_mctrl_locked, + .stop_tx = msm_hs_stop_tx_locked, + .start_tx = msm_hs_start_tx_locked, + .stop_rx = msm_hs_stop_rx_locked, + .enable_ms = msm_hs_enable_ms_locked, + .break_ctl = msm_hs_break_ctl, + .startup = msm_hs_startup, + .shutdown = msm_hs_shutdown, + .set_termios = msm_hs_set_termios, + .pm = msm_hs_pm, + .type = msm_hs_type, + .config_port = msm_hs_config_port, + .release_port = msm_hs_release_port, + .request_port = msm_hs_request_port, +}; + +MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset"); +MODULE_VERSION("1.2"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/msm_smd_tty.c b/drivers/tty/serial/msm_smd_tty.c new file mode 100644 index 00000000000..beeff1e8609 --- /dev/null +++ b/drivers/tty/serial/msm_smd_tty.c @@ -0,0 +1,236 @@ +/* drivers/tty/serial/msm_smd_tty.c + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * Author: Brian Swetland <swetland@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/wait.h> + +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/tty_flip.h> + +#include <mach/msm_smd.h> + +#define MAX_SMD_TTYS 32 + +struct smd_tty_info { + struct tty_port port; + smd_channel_t *ch; +}; + +struct smd_tty_channel_desc { + int id; + const char *name; +}; + +static struct smd_tty_info smd_tty[MAX_SMD_TTYS]; + +static const struct smd_tty_channel_desc smd_default_tty_channels[] = { + { .id = 0, .name = "SMD_DS" }, + { .id = 27, .name = "SMD_GPSNMEA" }, +}; + +static const struct smd_tty_channel_desc *smd_tty_channels = + smd_default_tty_channels; +static int smd_tty_channels_len = ARRAY_SIZE(smd_default_tty_channels); + +static void smd_tty_notify(void *priv, unsigned event) +{ + unsigned char *ptr; + int avail; + struct smd_tty_info *info = priv; + struct tty_struct *tty; + + if (event != SMD_EVENT_DATA) + return; + + tty = tty_port_tty_get(&info->port); + if (!tty) + return; + + for (;;) { + if (test_bit(TTY_THROTTLED, &tty->flags)) + break; + avail = smd_read_avail(info->ch); + if (avail == 0) + break; + + avail = tty_prepare_flip_string(tty, &ptr, avail); + + if (smd_read(info->ch, ptr, avail) != avail) { + /* shouldn't be possible since we're in interrupt + ** context here and nobody else could 'steal' our + ** characters. + */ + pr_err("OOPS - smd_tty_buffer mismatch?!"); + } + + tty_flip_buffer_push(tty); + } + + /* XXX only when writable and necessary */ + tty_wakeup(tty); + tty_kref_put(tty); +} + +static int smd_tty_port_activate(struct tty_port *tport, struct tty_struct *tty) +{ + int i, res = 0; + int n = tty->index; + const char *name = NULL; + struct smd_tty_info *info = smd_tty + n; + + for (i = 0; i < smd_tty_channels_len; i++) { + if (smd_tty_channels[i].id == n) { + name = smd_tty_channels[i].name; + break; + } + } + if (!name) + return -ENODEV; + + if (info->ch) + smd_kick(info->ch); + else + res = smd_open(name, &info->ch, info, smd_tty_notify); + + if (!res) + tty->driver_data = info; + + return res; +} + +static void smd_tty_port_shutdown(struct tty_port *tport) +{ + struct smd_tty_info *info; + struct tty_struct *tty = tty_port_tty_get(tport); + + info = tty->driver_data; + if (info->ch) { + smd_close(info->ch); + info->ch = 0; + } + + tty->driver_data = 0; + tty_kref_put(tty); +} + +static int smd_tty_open(struct tty_struct *tty, struct file *f) +{ + struct smd_tty_info *info = smd_tty + tty->index; + + return tty_port_open(&info->port, tty, f); +} + +static void smd_tty_close(struct tty_struct *tty, struct file *f) +{ + struct smd_tty_info *info = tty->driver_data; + + tty_port_close(&info->port, tty, f); +} + +static int smd_tty_write(struct tty_struct *tty, + const unsigned char *buf, int len) +{ + struct smd_tty_info *info = tty->driver_data; + int avail; + + /* if we're writing to a packet channel we will + ** never be able to write more data than there + ** is currently space for + */ + avail = smd_write_avail(info->ch); + if (len > avail) + len = avail; + + return smd_write(info->ch, buf, len); +} + +static int smd_tty_write_room(struct tty_struct *tty) +{ + struct smd_tty_info *info = tty->driver_data; + return smd_write_avail(info->ch); +} + +static int smd_tty_chars_in_buffer(struct tty_struct *tty) +{ + struct smd_tty_info *info = tty->driver_data; + return smd_read_avail(info->ch); +} + +static void smd_tty_unthrottle(struct tty_struct *tty) +{ + struct smd_tty_info *info = tty->driver_data; + smd_kick(info->ch); +} + +static const struct tty_port_operations smd_tty_port_ops = { + .shutdown = smd_tty_port_shutdown, + .activate = smd_tty_port_activate, +}; + +static const struct tty_operations smd_tty_ops = { + .open = smd_tty_open, + .close = smd_tty_close, + .write = smd_tty_write, + .write_room = smd_tty_write_room, + .chars_in_buffer = smd_tty_chars_in_buffer, + .unthrottle = smd_tty_unthrottle, +}; + +static struct tty_driver *smd_tty_driver; + +static int __init smd_tty_init(void) +{ + int ret, i; + + smd_tty_driver = alloc_tty_driver(MAX_SMD_TTYS); + if (smd_tty_driver == 0) + return -ENOMEM; + + smd_tty_driver->owner = THIS_MODULE; + smd_tty_driver->driver_name = "smd_tty_driver"; + smd_tty_driver->name = "smd"; + smd_tty_driver->major = 0; + smd_tty_driver->minor_start = 0; + smd_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + smd_tty_driver->subtype = SERIAL_TYPE_NORMAL; + smd_tty_driver->init_termios = tty_std_termios; + smd_tty_driver->init_termios.c_iflag = 0; + smd_tty_driver->init_termios.c_oflag = 0; + smd_tty_driver->init_termios.c_cflag = B38400 | CS8 | CREAD; + smd_tty_driver->init_termios.c_lflag = 0; + smd_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS | + TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; + tty_set_operations(smd_tty_driver, &smd_tty_ops); + + ret = tty_register_driver(smd_tty_driver); + if (ret) + return ret; + + for (i = 0; i < smd_tty_channels_len; i++) { + tty_port_init(&smd_tty[smd_tty_channels[i].id].port); + smd_tty[smd_tty_channels[i].id].port.ops = &smd_tty_port_ops; + tty_register_device(smd_tty_driver, smd_tty_channels[i].id, 0); + } + + return 0; +} + +module_init(smd_tty_init); diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c index 5c7abe4c94d..0e8eec516df 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/of_serial.c @@ -80,14 +80,16 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev, /* * Try to register a serial port */ -static int __devinit of_platform_serial_probe(struct platform_device *ofdev, - const struct of_device_id *id) +static int __devinit of_platform_serial_probe(struct platform_device *ofdev) { struct of_serial_info *info; struct uart_port port; int port_type; int ret; + if (!ofdev->dev.of_match) + return -EINVAL; + if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) return -EBUSY; @@ -95,7 +97,7 @@ static int __devinit of_platform_serial_probe(struct platform_device *ofdev, if (info == NULL) return -ENOMEM; - port_type = (unsigned long)id->data; + port_type = (unsigned long)ofdev->dev.of_match->data; ret = of_platform_serial_setup(ofdev, port_type, &port); if (ret) goto out; @@ -160,21 +162,21 @@ static int of_platform_serial_remove(struct platform_device *ofdev) * A few common types, add more as needed. */ static struct of_device_id __devinitdata of_platform_serial_table[] = { - { .type = "serial", .compatible = "ns8250", .data = (void *)PORT_8250, }, - { .type = "serial", .compatible = "ns16450", .data = (void *)PORT_16450, }, - { .type = "serial", .compatible = "ns16550a", .data = (void *)PORT_16550A, }, - { .type = "serial", .compatible = "ns16550", .data = (void *)PORT_16550, }, - { .type = "serial", .compatible = "ns16750", .data = (void *)PORT_16750, }, - { .type = "serial", .compatible = "ns16850", .data = (void *)PORT_16850, }, + { .compatible = "ns8250", .data = (void *)PORT_8250, }, + { .compatible = "ns16450", .data = (void *)PORT_16450, }, + { .compatible = "ns16550a", .data = (void *)PORT_16550A, }, + { .compatible = "ns16550", .data = (void *)PORT_16550, }, + { .compatible = "ns16750", .data = (void *)PORT_16750, }, + { .compatible = "ns16850", .data = (void *)PORT_16850, }, #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL - { .type = "serial", .compatible = "ibm,qpace-nwp-serial", - .data = (void *)PORT_NWPSERIAL, }, + { .compatible = "ibm,qpace-nwp-serial", + .data = (void *)PORT_NWPSERIAL, }, #endif - { .type = "serial", .data = (void *)PORT_UNKNOWN, }, + { .type = "serial", .data = (void *)PORT_UNKNOWN, }, { /* end of list */ }, }; -static struct of_platform_driver of_platform_serial_driver = { +static struct platform_driver of_platform_serial_driver = { .driver = { .name = "of_serial", .owner = THIS_MODULE, @@ -186,13 +188,13 @@ static struct of_platform_driver of_platform_serial_driver = { static int __init of_platform_serial_init(void) { - return of_register_platform_driver(&of_platform_serial_driver); + return platform_driver_register(&of_platform_serial_driver); } module_init(of_platform_serial_init); static void __exit of_platform_serial_exit(void) { - return of_unregister_platform_driver(&of_platform_serial_driver); + return platform_driver_unregister(&of_platform_serial_driver); }; module_exit(of_platform_serial_exit); diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 7f2f0105878..763537943a5 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -20,6 +20,10 @@ * this driver as required for the omap-platform. */ +#if defined(CONFIG_SERIAL_OMAP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) +#define SUPPORT_SYSRQ +#endif + #include <linux/module.h> #include <linux/init.h> #include <linux/console.h> @@ -190,7 +194,6 @@ static inline void receive_chars(struct uart_omap_port *up, int *status) if (up->port.line == up->port.cons->index) { /* Recover the break flag from console xmit */ lsr |= up->lsr_break_flag; - up->lsr_break_flag = 0; } #endif if (lsr & UART_LSR_BI) @@ -517,6 +520,9 @@ static int serial_omap_startup(struct uart_port *port) up->ier = UART_IER_RLSI | UART_IER_RDI; serial_out(up, UART_IER, up->ier); + /* Enable module level wake up */ + serial_out(up, UART_OMAP_WER, OMAP_UART_WER_MOD_WKUP); + up->port_activity = jiffies; return 0; } @@ -824,9 +830,6 @@ serial_omap_pm(struct uart_port *port, unsigned int state, serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(up, UART_EFR, efr); serial_out(up, UART_LCR, 0); - /* Enable module level wake up */ - serial_out(up, UART_OMAP_WER, - (state != 0) ? OMAP_UART_WER_MOD_WKUP : 0); } static void serial_omap_release_port(struct uart_port *port) diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 70a61458ec4..a9ad7f33526 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -21,6 +21,7 @@ #include <linux/serial_core.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/dmi.h> #include <linux/dmaengine.h> #include <linux/pch_dma.h> @@ -40,10 +41,11 @@ enum { #define PCH_UART_DRIVER_DEVICE "ttyPCH" -#define PCH_UART_NR_GE_256FIFO 1 -#define PCH_UART_NR_GE_64FIFO 3 -#define PCH_UART_NR_GE (PCH_UART_NR_GE_256FIFO+PCH_UART_NR_GE_64FIFO) -#define PCH_UART_NR PCH_UART_NR_GE +/* Set the max number of UART port + * Intel EG20T PCH: 4 port + * OKI SEMICONDUCTOR ML7213 IOH: 3 port +*/ +#define PCH_UART_NR 4 #define PCH_UART_HANDLED_RX_INT (1<<((PCH_UART_HANDLED_RX_INT_SHIFT)<<1)) #define PCH_UART_HANDLED_TX_INT (1<<((PCH_UART_HANDLED_TX_INT_SHIFT)<<1)) @@ -192,6 +194,8 @@ enum { #define PCH_UART_HAL_LOOP (PCH_UART_MCR_LOOP) #define PCH_UART_HAL_AFE (PCH_UART_MCR_AFE) +#define PCI_VENDOR_ID_ROHM 0x10DB + struct pch_uart_buffer { unsigned char *buf; int size; @@ -215,6 +219,7 @@ struct eg20t_port { struct pch_uart_buffer rxbuf; unsigned int dmsr; unsigned int fcr; + unsigned int mcr; unsigned int use_dma; unsigned int use_dma_flag; struct dma_async_tx_descriptor *desc_tx; @@ -223,13 +228,44 @@ struct eg20t_port { struct pch_dma_slave param_rx; struct dma_chan *chan_tx; struct dma_chan *chan_rx; - struct scatterlist sg_tx; + struct scatterlist *sg_tx_p; + int nent; struct scatterlist sg_rx; int tx_dma_use; void *rx_buf_virt; dma_addr_t rx_buf_dma; }; +/** + * struct pch_uart_driver_data - private data structure for UART-DMA + * @port_type: The number of DMA channel + * @line_no: UART port line number (0, 1, 2...) + */ +struct pch_uart_driver_data { + int port_type; + int line_no; +}; + +enum pch_uart_num_t { + pch_et20t_uart0 = 0, + pch_et20t_uart1, + pch_et20t_uart2, + pch_et20t_uart3, + pch_ml7213_uart0, + pch_ml7213_uart1, + pch_ml7213_uart2, +}; + +static struct pch_uart_driver_data drv_dat[] = { + [pch_et20t_uart0] = {PCH_UART_8LINE, 0}, + [pch_et20t_uart1] = {PCH_UART_2LINE, 1}, + [pch_et20t_uart2] = {PCH_UART_2LINE, 2}, + [pch_et20t_uart3] = {PCH_UART_2LINE, 3}, + [pch_ml7213_uart0] = {PCH_UART_8LINE, 0}, + [pch_ml7213_uart1] = {PCH_UART_2LINE, 1}, + [pch_ml7213_uart2] = {PCH_UART_2LINE, 2}, +}; + static unsigned int default_baud = 9600; static const int trigger_level_256[4] = { 1, 64, 128, 224 }; static const int trigger_level_64[4] = { 1, 16, 32, 56 }; @@ -278,7 +314,7 @@ static int pch_uart_hal_set_line(struct eg20t_port *priv, int baud, div = DIV_ROUND(priv->base_baud / 16, baud); if (div < 0 || USHRT_MAX <= div) { - pr_err("Invalid Baud(div=0x%x)\n", div); + dev_err(priv->port.dev, "Invalid Baud(div=0x%x)\n", div); return -EINVAL; } @@ -286,17 +322,17 @@ static int pch_uart_hal_set_line(struct eg20t_port *priv, int baud, dlm = ((unsigned int)div >> 8) & 0x00FFU; if (parity & ~(PCH_UART_LCR_PEN | PCH_UART_LCR_EPS | PCH_UART_LCR_SP)) { - pr_err("Invalid parity(0x%x)\n", parity); + dev_err(priv->port.dev, "Invalid parity(0x%x)\n", parity); return -EINVAL; } if (bits & ~PCH_UART_LCR_WLS) { - pr_err("Invalid bits(0x%x)\n", bits); + dev_err(priv->port.dev, "Invalid bits(0x%x)\n", bits); return -EINVAL; } if (stb & ~PCH_UART_LCR_STB) { - pr_err("Invalid STB(0x%x)\n", stb); + dev_err(priv->port.dev, "Invalid STB(0x%x)\n", stb); return -EINVAL; } @@ -304,7 +340,7 @@ static int pch_uart_hal_set_line(struct eg20t_port *priv, int baud, lcr |= bits; lcr |= stb; - pr_debug("%s:baud = %d, div = %04x, lcr = %02x (%lu)\n", + dev_dbg(priv->port.dev, "%s:baud = %d, div = %04x, lcr = %02x (%lu)\n", __func__, baud, div, lcr, jiffies); iowrite8(PCH_UART_LCR_DLAB, priv->membase + UART_LCR); iowrite8(dll, priv->membase + PCH_UART_DLL); @@ -318,7 +354,8 @@ static int pch_uart_hal_fifo_reset(struct eg20t_port *priv, unsigned int flag) { if (flag & ~(PCH_UART_FCR_TFR | PCH_UART_FCR_RFR)) { - pr_err("%s:Invalid flag(0x%x)\n", __func__, flag); + dev_err(priv->port.dev, "%s:Invalid flag(0x%x)\n", + __func__, flag); return -EINVAL; } @@ -337,17 +374,20 @@ static int pch_uart_hal_set_fifo(struct eg20t_port *priv, u8 fcr; if (dmamode & ~PCH_UART_FCR_DMS) { - pr_err("%s:Invalid DMA Mode(0x%x)\n", __func__, dmamode); + dev_err(priv->port.dev, "%s:Invalid DMA Mode(0x%x)\n", + __func__, dmamode); return -EINVAL; } if (fifo_size & ~(PCH_UART_FCR_FIFOE | PCH_UART_FCR_FIFO256)) { - pr_err("%s:Invalid FIFO SIZE(0x%x)\n", __func__, fifo_size); + dev_err(priv->port.dev, "%s:Invalid FIFO SIZE(0x%x)\n", + __func__, fifo_size); return -EINVAL; } if (trigger & ~PCH_UART_FCR_RFTL) { - pr_err("%s:Invalid TRIGGER(0x%x)\n", __func__, trigger); + dev_err(priv->port.dev, "%s:Invalid TRIGGER(0x%x)\n", + __func__, trigger); return -EINVAL; } @@ -386,7 +426,7 @@ static u8 pch_uart_hal_get_modem(struct eg20t_port *priv) return get_msr(priv, priv->membase); } -static int pch_uart_hal_write(struct eg20t_port *priv, +static void pch_uart_hal_write(struct eg20t_port *priv, const unsigned char *buf, int tx_size) { int i; @@ -396,7 +436,6 @@ static int pch_uart_hal_write(struct eg20t_port *priv, thr = buf[i++]; iowrite8(thr, priv->membase + PCH_UART_THR); } - return i; } static int pch_uart_hal_read(struct eg20t_port *priv, unsigned char *buf, @@ -452,7 +491,7 @@ static int push_rx(struct eg20t_port *priv, const unsigned char *buf, port = &priv->port; tty = tty_port_tty_get(&port->state->port); if (!tty) { - pr_debug("%s:tty is busy now", __func__); + dev_dbg(priv->port.dev, "%s:tty is busy now", __func__); return -EBUSY; } @@ -469,8 +508,8 @@ static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf) struct uart_port *port = &priv->port; if (port->x_char) { - pr_debug("%s:X character send %02x (%lu)\n", __func__, - port->x_char, jiffies); + dev_dbg(priv->port.dev, "%s:X character send %02x (%lu)\n", + __func__, port->x_char, jiffies); buf[0] = port->x_char; port->x_char = 0; ret = 1; @@ -490,7 +529,7 @@ static int dma_push_rx(struct eg20t_port *priv, int size) port = &priv->port; tty = tty_port_tty_get(&port->state->port); if (!tty) { - pr_debug("%s:tty is busy now", __func__); + dev_dbg(priv->port.dev, "%s:tty is busy now", __func__); return 0; } @@ -560,11 +599,13 @@ static void pch_request_dma(struct uart_port *port) /* Set Tx DMA */ param = &priv->param_tx; param->dma_dev = &dma_dev->dev; - param->chan_id = priv->port.line; + param->chan_id = priv->port.line * 2; /* Tx = 0, 2, 4, ... */ + param->tx_reg = port->mapbase + UART_TX; chan = dma_request_channel(mask, filter, param); if (!chan) { - pr_err("%s:dma_request_channel FAILS(Tx)\n", __func__); + dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Tx)\n", + __func__); return; } priv->chan_tx = chan; @@ -572,11 +613,13 @@ static void pch_request_dma(struct uart_port *port) /* Set Rx DMA */ param = &priv->param_rx; param->dma_dev = &dma_dev->dev; - param->chan_id = priv->port.line + 1; /* Rx = Tx + 1 */ + param->chan_id = priv->port.line * 2 + 1; /* Rx = Tx + 1 */ + param->rx_reg = port->mapbase + UART_RX; chan = dma_request_channel(mask, filter, param); if (!chan) { - pr_err("%s:dma_request_channel FAILS(Rx)\n", __func__); + dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n", + __func__); dma_release_channel(priv->chan_tx); return; } @@ -592,16 +635,20 @@ static void pch_dma_rx_complete(void *arg) struct eg20t_port *priv = arg; struct uart_port *port = &priv->port; struct tty_struct *tty = tty_port_tty_get(&port->state->port); + int count; if (!tty) { - pr_debug("%s:tty is busy now", __func__); + dev_dbg(priv->port.dev, "%s:tty is busy now", __func__); return; } - if (dma_push_rx(priv, priv->trigger_level)) + dma_sync_sg_for_cpu(port->dev, &priv->sg_rx, 1, DMA_FROM_DEVICE); + count = dma_push_rx(priv, priv->trigger_level); + if (count) tty_flip_buffer_push(tty); - tty_kref_put(tty); + async_tx_ack(priv->desc_rx); + pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT); } static void pch_dma_tx_complete(void *arg) @@ -609,16 +656,23 @@ static void pch_dma_tx_complete(void *arg) struct eg20t_port *priv = arg; struct uart_port *port = &priv->port; struct circ_buf *xmit = &port->state->xmit; + struct scatterlist *sg = priv->sg_tx_p; + int i; - xmit->tail += sg_dma_len(&priv->sg_tx); + for (i = 0; i < priv->nent; i++, sg++) { + xmit->tail += sg_dma_len(sg); + port->icount.tx += sg_dma_len(sg); + } xmit->tail &= UART_XMIT_SIZE - 1; - port->icount.tx += sg_dma_len(&priv->sg_tx); - async_tx_ack(priv->desc_tx); + dma_unmap_sg(port->dev, sg, priv->nent, DMA_TO_DEVICE); priv->tx_dma_use = 0; + priv->nent = 0; + kfree(priv->sg_tx_p); + pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT); } -static int pop_tx(struct eg20t_port *priv, unsigned char *buf, int size) +static int pop_tx(struct eg20t_port *priv, int size) { int count = 0; struct uart_port *port = &priv->port; @@ -631,13 +685,13 @@ static int pop_tx(struct eg20t_port *priv, unsigned char *buf, int size) int cnt_to_end = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); int sz = min(size - count, cnt_to_end); - memcpy(&buf[count], &xmit->buf[xmit->tail], sz); + pch_uart_hal_write(priv, &xmit->buf[xmit->tail], sz); xmit->tail = (xmit->tail + sz) & (UART_XMIT_SIZE - 1); count += sz; } while (!uart_circ_empty(xmit) && count < size); pop_tx_end: - pr_debug("%d characters. Remained %d characters. (%lu)\n", + dev_dbg(priv->port.dev, "%d characters. Remained %d characters.(%lu)\n", count, size - count, jiffies); return count; @@ -679,7 +733,7 @@ static int dma_handle_rx(struct eg20t_port *priv) sg_init_table(&priv->sg_rx, 1); /* Initialize SG table */ - sg_dma_len(sg) = priv->fifo_size; + sg_dma_len(sg) = priv->trigger_level; sg_set_page(&priv->sg_rx, virt_to_page(priv->rx_buf_virt), sg_dma_len(sg), (unsigned long)priv->rx_buf_virt & @@ -689,7 +743,8 @@ static int dma_handle_rx(struct eg20t_port *priv) desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx, sg, 1, DMA_FROM_DEVICE, - DMA_PREP_INTERRUPT); + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) return 0; @@ -706,14 +761,14 @@ static unsigned int handle_tx(struct eg20t_port *priv) { struct uart_port *port = &priv->port; struct circ_buf *xmit = &port->state->xmit; - int ret; int fifo_size; int tx_size; int size; int tx_empty; if (!priv->start_tx) { - pr_info("%s:Tx isn't started. (%lu)\n", __func__, jiffies); + dev_info(priv->port.dev, "%s:Tx isn't started. (%lu)\n", + __func__, jiffies); pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT); priv->tx_empty = 1; return 0; @@ -728,17 +783,21 @@ static unsigned int handle_tx(struct eg20t_port *priv) fifo_size--; } size = min(xmit->head - xmit->tail, fifo_size); - tx_size = pop_tx(priv, xmit->buf, size); + if (size < 0) + size = fifo_size; + + tx_size = pop_tx(priv, size); if (tx_size > 0) { - ret = pch_uart_hal_write(priv, xmit->buf, tx_size); - port->icount.tx += ret; + port->icount.tx += tx_size; tx_empty = 0; } priv->tx_empty = tx_empty; - if (tx_empty) + if (tx_empty) { pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT); + uart_write_wakeup(port); + } return PCH_UART_HANDLED_TX_INT; } @@ -747,14 +806,28 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv) { struct uart_port *port = &priv->port; struct circ_buf *xmit = &port->state->xmit; - struct scatterlist *sg = &priv->sg_tx; + struct scatterlist *sg; int nent; int fifo_size; int tx_empty; struct dma_async_tx_descriptor *desc; + int num; + int i; + int bytes; + int size; + int rem; if (!priv->start_tx) { - pr_info("%s:Tx isn't started. (%lu)\n", __func__, jiffies); + dev_info(priv->port.dev, "%s:Tx isn't started. (%lu)\n", + __func__, jiffies); + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT); + priv->tx_empty = 1; + return 0; + } + + if (priv->tx_dma_use) { + dev_dbg(priv->port.dev, "%s:Tx is not completed. (%lu)\n", + __func__, jiffies); pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT); priv->tx_empty = 1; return 0; @@ -769,37 +842,73 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv) fifo_size--; } - pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT); + bytes = min((int)CIRC_CNT(xmit->head, xmit->tail, + UART_XMIT_SIZE), CIRC_CNT_TO_END(xmit->head, + xmit->tail, UART_XMIT_SIZE)); + if (!bytes) { + dev_dbg(priv->port.dev, "%s 0 bytes return\n", __func__); + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT); + uart_write_wakeup(port); + return 0; + } + + if (bytes > fifo_size) { + num = bytes / fifo_size + 1; + size = fifo_size; + rem = bytes % fifo_size; + } else { + num = 1; + size = bytes; + rem = bytes; + } + + dev_dbg(priv->port.dev, "%s num=%d size=%d rem=%d\n", + __func__, num, size, rem); priv->tx_dma_use = 1; - sg_init_table(&priv->sg_tx, 1); /* Initialize SG table */ + priv->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); - sg_set_page(&priv->sg_tx, virt_to_page(xmit->buf), - UART_XMIT_SIZE, (int)xmit->buf & ~PAGE_MASK); + sg_init_table(priv->sg_tx_p, num); /* Initialize SG table */ + sg = priv->sg_tx_p; - nent = dma_map_sg(port->dev, &priv->sg_tx, 1, DMA_TO_DEVICE); + for (i = 0; i < num; i++, sg++) { + if (i == (num - 1)) + sg_set_page(sg, virt_to_page(xmit->buf), + rem, fifo_size * i); + else + sg_set_page(sg, virt_to_page(xmit->buf), + size, fifo_size * i); + } + + sg = priv->sg_tx_p; + nent = dma_map_sg(port->dev, sg, num, DMA_TO_DEVICE); if (!nent) { - pr_err("%s:dma_map_sg Failed\n", __func__); + dev_err(priv->port.dev, "%s:dma_map_sg Failed\n", __func__); return 0; } - - sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); - sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) + - sg->offset; - sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, - UART_XMIT_SIZE), CIRC_CNT_TO_END(xmit->head, - xmit->tail, UART_XMIT_SIZE)); + priv->nent = nent; + + for (i = 0; i < nent; i++, sg++) { + sg->offset = (xmit->tail & (UART_XMIT_SIZE - 1)) + + fifo_size * i; + sg_dma_address(sg) = (sg_dma_address(sg) & + ~(UART_XMIT_SIZE - 1)) + sg->offset; + if (i == (nent - 1)) + sg_dma_len(sg) = rem; + else + sg_dma_len(sg) = size; + } desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx, - sg, nent, DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + priv->sg_tx_p, nent, DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { - pr_err("%s:device_prep_slave_sg Failed\n", __func__); + dev_err(priv->port.dev, "%s:device_prep_slave_sg Failed\n", + __func__); return 0; } - - dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE); - + dma_sync_sg_for_device(port->dev, priv->sg_tx_p, nent, DMA_TO_DEVICE); priv->desc_tx = desc; desc->callback = pch_dma_tx_complete; desc->callback_param = priv; @@ -854,10 +963,16 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id) } break; case PCH_UART_IID_RDR: /* Received Data Ready */ - if (priv->use_dma) + if (priv->use_dma) { + pch_uart_hal_disable_interrupt(priv, + PCH_UART_HAL_RX_INT); ret = dma_handle_rx(priv); - else + if (!ret) + pch_uart_hal_enable_interrupt(priv, + PCH_UART_HAL_RX_INT); + } else { ret = handle_rx(priv); + } break; case PCH_UART_IID_RDR_TO: /* Received Data Ready (FIFO Timeout) */ @@ -874,7 +989,8 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id) ret = PCH_UART_HANDLED_MS_INT; break; default: /* Never junp to this label */ - pr_err("%s:iid=%d (%lu)\n", __func__, iid, jiffies); + dev_err(priv->port.dev, "%s:iid=%d (%lu)\n", __func__, + iid, jiffies); ret = -1; break; } @@ -932,7 +1048,6 @@ static unsigned int pch_uart_get_mctrl(struct uart_port *port) static void pch_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) { u32 mcr = 0; - unsigned int dat; struct eg20t_port *priv = container_of(port, struct eg20t_port, port); if (mctrl & TIOCM_DTR) @@ -942,11 +1057,11 @@ static void pch_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) if (mctrl & TIOCM_LOOP) mcr |= UART_MCR_LOOP; - if (mctrl) { - dat = pch_uart_get_mctrl(port); - dat |= mcr; - iowrite8(dat, priv->membase + UART_MCR); - } + if (priv->mcr & UART_MCR_AFE) + mcr |= UART_MCR_AFE; + + if (mctrl) + iowrite8(mcr, priv->membase + UART_MCR); } static void pch_uart_stop_tx(struct uart_port *port) @@ -963,9 +1078,13 @@ static void pch_uart_start_tx(struct uart_port *port) priv = container_of(port, struct eg20t_port, port); - if (priv->use_dma) - if (priv->tx_dma_use) + if (priv->use_dma) { + if (priv->tx_dma_use) { + dev_dbg(priv->port.dev, "%s : Tx DMA is NOT empty.\n", + __func__); return; + } + } priv->start_tx = 1; pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT); @@ -1010,7 +1129,12 @@ static int pch_uart_startup(struct uart_port *port) priv = container_of(port, struct eg20t_port, port); priv->tx_empty = 1; - port->uartclk = priv->base_baud; + + if (port->uartclk) + priv->base_baud = port->uartclk; + else + port->uartclk = priv->base_baud; + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_ALL_INT); ret = pch_uart_hal_set_line(priv, default_baud, PCH_UART_HAL_PARITY_NONE, PCH_UART_HAL_8BIT, @@ -1081,7 +1205,8 @@ static void pch_uart_shutdown(struct uart_port *port) ret = pch_uart_hal_set_fifo(priv, PCH_UART_HAL_DMA_MODE0, PCH_UART_HAL_FIFO_DIS, PCH_UART_HAL_TRIGGER1); if (ret) - pr_err("pch_uart_hal_set_fifo Failed(ret=%d)\n", ret); + dev_err(priv->port.dev, + "pch_uart_hal_set_fifo Failed(ret=%d)\n", ret); if (priv->use_dma_flag) pch_free_dma(port); @@ -1130,6 +1255,13 @@ static void pch_uart_set_termios(struct uart_port *port, } else { parity = PCH_UART_HAL_PARITY_NONE; } + + /* Only UART0 has auto hardware flow function */ + if ((termios->c_cflag & CRTSCTS) && (priv->fifo_size == 256)) + priv->mcr |= UART_MCR_AFE; + else + priv->mcr &= ~UART_MCR_AFE; + termios->c_cflag &= ~CMSPAR; /* Mark/Space parity is not supported */ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); @@ -1202,17 +1334,19 @@ static int pch_uart_verify_port(struct uart_port *port, priv = container_of(port, struct eg20t_port, port); if (serinfo->flags & UPF_LOW_LATENCY) { - pr_info("PCH UART : Use PIO Mode (without DMA)\n"); + dev_info(priv->port.dev, + "PCH UART : Use PIO Mode (without DMA)\n"); priv->use_dma = 0; serinfo->flags &= ~UPF_LOW_LATENCY; } else { #ifndef CONFIG_PCH_DMA - pr_err("%s : PCH DMA is not Loaded.\n", __func__); + dev_err(priv->port.dev, "%s : PCH DMA is not Loaded.\n", + __func__); return -EOPNOTSUPP; #endif priv->use_dma = 1; priv->use_dma_flag = 1; - pr_info("PCH UART : Use DMA Mode\n"); + dev_info(priv->port.dev, "PCH UART : Use DMA Mode\n"); } return 0; @@ -1249,7 +1383,7 @@ static struct uart_driver pch_uart_driver = { }; static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev, - int port_type) + const struct pci_device_id *id) { struct eg20t_port *priv; int ret; @@ -1257,7 +1391,11 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev, unsigned int mapbase; unsigned char *rxbuf; int fifosize, base_baud; - static int num; + int port_type; + struct pch_uart_driver_data *board; + + board = &drv_dat[id->driver_data]; + port_type = board->port_type; priv = kzalloc(sizeof(struct eg20t_port), GFP_KERNEL); if (priv == NULL) @@ -1267,14 +1405,18 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev, if (!rxbuf) goto init_port_free_txbuf; + base_baud = 1843200; /* 1.8432MHz */ + + /* quirk for CM-iTC board */ + if (strstr(dmi_get_system_info(DMI_BOARD_NAME), "CM-iTC")) + base_baud = 192000000; /* 192.0MHz */ + switch (port_type) { case PORT_UNKNOWN: - fifosize = 256; /* UART0 */ - base_baud = 1843200; /* 1.8432MHz */ + fifosize = 256; /* EG20T/ML7213: UART0 */ break; case PORT_8250: - fifosize = 64; /* UART1~3 */ - base_baud = 1843200; /* 1.8432MHz */ + fifosize = 64; /* EG20T:UART1~3 ML7213: UART1~2*/ break; default: dev_err(&pdev->dev, "Invalid Port Type(=%d)\n", port_type); @@ -1302,11 +1444,14 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev, priv->port.ops = &pch_uart_ops; priv->port.flags = UPF_BOOT_AUTOCONF; priv->port.fifosize = fifosize; - priv->port.line = num++; + priv->port.line = board->line_no; priv->trigger = PCH_UART_HAL_TRIGGER_M; + spin_lock_init(&priv->port.lock); + pci_set_drvdata(pdev, priv); pch_uart_hal_request(pdev, fifosize, base_baud); + ret = uart_add_one_port(&pch_uart_driver, &priv->port); if (ret < 0) goto init_port_hal_free; @@ -1377,13 +1522,19 @@ static int pch_uart_pci_resume(struct pci_dev *pdev) static DEFINE_PCI_DEVICE_TABLE(pch_uart_pci_id) = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8811), - .driver_data = PCH_UART_8LINE}, + .driver_data = pch_et20t_uart0}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8812), - .driver_data = PCH_UART_2LINE}, + .driver_data = pch_et20t_uart1}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8813), - .driver_data = PCH_UART_2LINE}, + .driver_data = pch_et20t_uart2}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8814), - .driver_data = PCH_UART_2LINE}, + .driver_data = pch_et20t_uart3}, + {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8027), + .driver_data = pch_ml7213_uart0}, + {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8028), + .driver_data = pch_ml7213_uart1}, + {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8029), + .driver_data = pch_ml7213_uart2}, {0,}, }; @@ -1397,7 +1548,7 @@ static int __devinit pch_uart_pci_probe(struct pci_dev *pdev, if (ret < 0) goto probe_error; - priv = pch_uart_init_port(pdev, id->driver_data); + priv = pch_uart_init_port(pdev, id); if (!priv) { ret = -EBUSY; goto probe_disable_device; diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c index a2f2b325449..602d9845c52 100644 --- a/drivers/tty/serial/sb1250-duart.c +++ b/drivers/tty/serial/sb1250-duart.c @@ -829,7 +829,7 @@ static void __init sbd_probe_duarts(void) #ifdef CONFIG_SERIAL_SB1250_DUART_CONSOLE /* * Serial console stuff. Very basic, polling driver for doing serial - * console output. The console_sem is held by the caller, so we + * console output. The console_lock is held by the caller, so we * shouldn't be interrupted for more console activity. */ static void sbd_console_putchar(struct uart_port *uport, int ch) diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 460a72d91bb..733fe8e73f0 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -905,7 +905,7 @@ static int uart_get_lsr_info(struct tty_struct *tty, return put_user(result, value); } -static int uart_tiocmget(struct tty_struct *tty, struct file *file) +static int uart_tiocmget(struct tty_struct *tty) { struct uart_state *state = tty->driver_data; struct tty_port *port = &state->port; @@ -913,10 +913,8 @@ static int uart_tiocmget(struct tty_struct *tty, struct file *file) int result = -EIO; mutex_lock(&port->mutex); - if ((!file || !tty_hung_up_p(file)) && - !(tty->flags & (1 << TTY_IO_ERROR))) { + if (!(tty->flags & (1 << TTY_IO_ERROR))) { result = uport->mctrl; - spin_lock_irq(&uport->lock); result |= uport->ops->get_mctrl(uport); spin_unlock_irq(&uport->lock); @@ -927,8 +925,7 @@ static int uart_tiocmget(struct tty_struct *tty, struct file *file) } static int -uart_tiocmset(struct tty_struct *tty, struct file *file, - unsigned int set, unsigned int clear) +uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct uart_state *state = tty->driver_data; struct uart_port *uport = state->uart_port; @@ -936,8 +933,7 @@ uart_tiocmset(struct tty_struct *tty, struct file *file, int ret = -EIO; mutex_lock(&port->mutex); - if ((!file || !tty_hung_up_p(file)) && - !(tty->flags & (1 << TTY_IO_ERROR))) { + if (!(tty->flags & (1 << TTY_IO_ERROR))) { uart_update_mctrl(uport, set, clear); ret = 0; } @@ -1103,7 +1099,7 @@ static int uart_get_icount(struct tty_struct *tty, * Called via sys_ioctl. We can use spin_lock_irq() here. */ static int -uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, +uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct uart_state *state = tty->driver_data; @@ -1156,7 +1152,7 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, mutex_lock(&port->mutex); - if (tty_hung_up_p(filp)) { + if (tty->flags & (1 << TTY_IO_ERROR)) { ret = -EIO; goto out_up; } @@ -2064,7 +2060,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) /* * Re-enable the console device after suspending. */ - if (console_suspend_enabled && uart_console(uport)) { + if (uart_console(uport)) { /* * First try to use the console cflag setting. */ @@ -2077,9 +2073,9 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) if (port->tty && port->tty->termios && termios.c_cflag == 0) termios = *(port->tty->termios); - uart_change_pm(state, 0); uport->ops->set_termios(uport, &termios, NULL); - console_start(uport->cons); + if (console_suspend_enabled) + console_start(uport->cons); } if (port->flags & ASYNC_SUSPENDED) { diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c index 93760b2ea17..1ef4df9bf7e 100644 --- a/drivers/tty/serial/serial_cs.c +++ b/drivers/tty/serial/serial_cs.c @@ -712,6 +712,7 @@ static struct pcmcia_device_id serial_ids[] = { PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562), diff --git a/drivers/tty/serial/serial_lh7a40x.c b/drivers/tty/serial/serial_lh7a40x.c deleted file mode 100644 index ea744707c4d..00000000000 --- a/drivers/tty/serial/serial_lh7a40x.c +++ /dev/null @@ -1,682 +0,0 @@ -/* drivers/serial/serial_lh7a40x.c - * - * Copyright (C) 2004 Coastal Environmental Systems - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - */ - -/* Driver for Sharp LH7A40X embedded serial ports - * - * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. - * Based on drivers/serial/amba.c, by Deep Blue Solutions Ltd. - * - * --- - * - * This driver supports the embedded UARTs of the Sharp LH7A40X series - * CPUs. While similar to the 16550 and other UART chips, there is - * nothing close to register compatibility. Moreover, some of the - * modem control lines are not available, either in the chip or they - * are lacking in the board-level implementation. - * - * - Use of SIRDIS - * For simplicity, we disable the IR functions of any UART whenever - * we enable it. - * - */ - - -#if defined(CONFIG_SERIAL_LH7A40X_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) -#define SUPPORT_SYSRQ -#endif - -#include <linux/module.h> -#include <linux/ioport.h> -#include <linux/init.h> -#include <linux/console.h> -#include <linux/sysrq.h> -#include <linux/tty.h> -#include <linux/tty_flip.h> -#include <linux/serial_core.h> -#include <linux/serial.h> -#include <linux/io.h> - -#include <asm/irq.h> -#include <mach/hardware.h> - -#define DEV_MAJOR 204 -#define DEV_MINOR 16 -#define DEV_NR 3 - -#define ISR_LOOP_LIMIT 256 - -#define UR(p,o) _UR ((p)->membase, o) -#define _UR(b,o) (*((volatile unsigned int*)(((unsigned char*) b) + (o)))) -#define BIT_CLR(p,o,m) UR(p,o) = UR(p,o) & (~(unsigned int)m) -#define BIT_SET(p,o,m) UR(p,o) = UR(p,o) | ( (unsigned int)m) - -#define UART_REG_SIZE 32 - -#define UART_R_DATA (0x00) -#define UART_R_FCON (0x04) -#define UART_R_BRCON (0x08) -#define UART_R_CON (0x0c) -#define UART_R_STATUS (0x10) -#define UART_R_RAWISR (0x14) -#define UART_R_INTEN (0x18) -#define UART_R_ISR (0x1c) - -#define UARTEN (0x01) /* UART enable */ -#define SIRDIS (0x02) /* Serial IR disable (UART1 only) */ - -#define RxEmpty (0x10) -#define TxEmpty (0x80) -#define TxFull (0x20) -#define nRxRdy RxEmpty -#define nTxRdy TxFull -#define TxBusy (0x08) - -#define RxBreak (0x0800) -#define RxOverrunError (0x0400) -#define RxParityError (0x0200) -#define RxFramingError (0x0100) -#define RxError (RxBreak | RxOverrunError | RxParityError | RxFramingError) - -#define DCD (0x04) -#define DSR (0x02) -#define CTS (0x01) - -#define RxInt (0x01) -#define TxInt (0x02) -#define ModemInt (0x04) -#define RxTimeoutInt (0x08) - -#define MSEOI (0x10) - -#define WLEN_8 (0x60) -#define WLEN_7 (0x40) -#define WLEN_6 (0x20) -#define WLEN_5 (0x00) -#define WLEN (0x60) /* Mask for all word-length bits */ -#define STP2 (0x08) -#define PEN (0x02) /* Parity Enable */ -#define EPS (0x04) /* Even Parity Set */ -#define FEN (0x10) /* FIFO Enable */ -#define BRK (0x01) /* Send Break */ - - -struct uart_port_lh7a40x { - struct uart_port port; - unsigned int statusPrev; /* Most recently read modem status */ -}; - -static void lh7a40xuart_stop_tx (struct uart_port* port) -{ - BIT_CLR (port, UART_R_INTEN, TxInt); -} - -static void lh7a40xuart_start_tx (struct uart_port* port) -{ - BIT_SET (port, UART_R_INTEN, TxInt); - - /* *** FIXME: do I need to check for startup of the - transmitter? The old driver did, but AMBA - doesn't . */ -} - -static void lh7a40xuart_stop_rx (struct uart_port* port) -{ - BIT_SET (port, UART_R_INTEN, RxTimeoutInt | RxInt); -} - -static void lh7a40xuart_enable_ms (struct uart_port* port) -{ - BIT_SET (port, UART_R_INTEN, ModemInt); -} - -static void lh7a40xuart_rx_chars (struct uart_port* port) -{ - struct tty_struct* tty = port->state->port.tty; - int cbRxMax = 256; /* (Gross) limit on receive */ - unsigned int data; /* Received data and status */ - unsigned int flag; - - while (!(UR (port, UART_R_STATUS) & nRxRdy) && --cbRxMax) { - data = UR (port, UART_R_DATA); - flag = TTY_NORMAL; - ++port->icount.rx; - - if (unlikely(data & RxError)) { - if (data & RxBreak) { - data &= ~(RxFramingError | RxParityError); - ++port->icount.brk; - if (uart_handle_break (port)) - continue; - } - else if (data & RxParityError) - ++port->icount.parity; - else if (data & RxFramingError) - ++port->icount.frame; - if (data & RxOverrunError) - ++port->icount.overrun; - - /* Mask by termios, leave Rx'd byte */ - data &= port->read_status_mask | 0xff; - - if (data & RxBreak) - flag = TTY_BREAK; - else if (data & RxParityError) - flag = TTY_PARITY; - else if (data & RxFramingError) - flag = TTY_FRAME; - } - - if (uart_handle_sysrq_char (port, (unsigned char) data)) - continue; - - uart_insert_char(port, data, RxOverrunError, data, flag); - } - tty_flip_buffer_push (tty); - return; -} - -static void lh7a40xuart_tx_chars (struct uart_port* port) -{ - struct circ_buf* xmit = &port->state->xmit; - int cbTxMax = port->fifosize; - - if (port->x_char) { - UR (port, UART_R_DATA) = port->x_char; - ++port->icount.tx; - port->x_char = 0; - return; - } - if (uart_circ_empty (xmit) || uart_tx_stopped (port)) { - lh7a40xuart_stop_tx (port); - return; - } - - /* Unlike the AMBA UART, the lh7a40x UART does not guarantee - that at least half of the FIFO is empty. Instead, we check - status for every character. Using the AMBA method causes - the transmitter to drop characters. */ - - do { - UR (port, UART_R_DATA) = xmit->buf[xmit->tail]; - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - ++port->icount.tx; - if (uart_circ_empty(xmit)) - break; - } while (!(UR (port, UART_R_STATUS) & nTxRdy) - && cbTxMax--); - - if (uart_circ_chars_pending (xmit) < WAKEUP_CHARS) - uart_write_wakeup (port); - - if (uart_circ_empty (xmit)) - lh7a40xuart_stop_tx (port); -} - -static void lh7a40xuart_modem_status (struct uart_port* port) -{ - unsigned int status = UR (port, UART_R_STATUS); - unsigned int delta - = status ^ ((struct uart_port_lh7a40x*) port)->statusPrev; - - BIT_SET (port, UART_R_RAWISR, MSEOI); /* Clear modem status intr */ - - if (!delta) /* Only happens if we missed 2 transitions */ - return; - - ((struct uart_port_lh7a40x*) port)->statusPrev = status; - - if (delta & DCD) - uart_handle_dcd_change (port, status & DCD); - - if (delta & DSR) - ++port->icount.dsr; - - if (delta & CTS) - uart_handle_cts_change (port, status & CTS); - - wake_up_interruptible (&port->state->port.delta_msr_wait); -} - -static irqreturn_t lh7a40xuart_int (int irq, void* dev_id) -{ - struct uart_port* port = dev_id; - unsigned int cLoopLimit = ISR_LOOP_LIMIT; - unsigned int isr = UR (port, UART_R_ISR); - - - do { - if (isr & (RxInt | RxTimeoutInt)) - lh7a40xuart_rx_chars(port); - if (isr & ModemInt) - lh7a40xuart_modem_status (port); - if (isr & TxInt) - lh7a40xuart_tx_chars (port); - - if (--cLoopLimit == 0) - break; - - isr = UR (port, UART_R_ISR); - } while (isr & (RxInt | TxInt | RxTimeoutInt)); - - return IRQ_HANDLED; -} - -static unsigned int lh7a40xuart_tx_empty (struct uart_port* port) -{ - return (UR (port, UART_R_STATUS) & TxEmpty) ? TIOCSER_TEMT : 0; -} - -static unsigned int lh7a40xuart_get_mctrl (struct uart_port* port) -{ - unsigned int result = 0; - unsigned int status = UR (port, UART_R_STATUS); - - if (status & DCD) - result |= TIOCM_CAR; - if (status & DSR) - result |= TIOCM_DSR; - if (status & CTS) - result |= TIOCM_CTS; - - return result; -} - -static void lh7a40xuart_set_mctrl (struct uart_port* port, unsigned int mctrl) -{ - /* None of the ports supports DTR. UART1 supports RTS through GPIO. */ - /* Note, kernel appears to be setting DTR and RTS on console. */ - - /* *** FIXME: this deserves more work. There's some work in - tracing all of the IO pins. */ -#if 0 - if( port->mapbase == UART1_PHYS) { - gpioRegs_t *gpio = (gpioRegs_t *)IO_ADDRESS(GPIO_PHYS); - - if (mctrl & TIOCM_RTS) - gpio->pbdr &= ~GPIOB_UART1_RTS; - else - gpio->pbdr |= GPIOB_UART1_RTS; - } -#endif -} - -static void lh7a40xuart_break_ctl (struct uart_port* port, int break_state) -{ - unsigned long flags; - - spin_lock_irqsave(&port->lock, flags); - if (break_state == -1) - BIT_SET (port, UART_R_FCON, BRK); /* Assert break */ - else - BIT_CLR (port, UART_R_FCON, BRK); /* Deassert break */ - spin_unlock_irqrestore(&port->lock, flags); -} - -static int lh7a40xuart_startup (struct uart_port* port) -{ - int retval; - - retval = request_irq (port->irq, lh7a40xuart_int, 0, - "serial_lh7a40x", port); - if (retval) - return retval; - - /* Initial modem control-line settings */ - ((struct uart_port_lh7a40x*) port)->statusPrev - = UR (port, UART_R_STATUS); - - /* There is presently no configuration option to enable IR. - Thus, we always disable it. */ - - BIT_SET (port, UART_R_CON, UARTEN | SIRDIS); - BIT_SET (port, UART_R_INTEN, RxTimeoutInt | RxInt); - - return 0; -} - -static void lh7a40xuart_shutdown (struct uart_port* port) -{ - free_irq (port->irq, port); - BIT_CLR (port, UART_R_FCON, BRK | FEN); - BIT_CLR (port, UART_R_CON, UARTEN); -} - -static void lh7a40xuart_set_termios (struct uart_port* port, - struct ktermios* termios, - struct ktermios* old) -{ - unsigned int con; - unsigned int inten; - unsigned int fcon; - unsigned long flags; - unsigned int baud; - unsigned int quot; - - baud = uart_get_baud_rate (port, termios, old, 8, port->uartclk/16); - quot = uart_get_divisor (port, baud); /* -1 performed elsewhere */ - - switch (termios->c_cflag & CSIZE) { - case CS5: - fcon = WLEN_5; - break; - case CS6: - fcon = WLEN_6; - break; - case CS7: - fcon = WLEN_7; - break; - case CS8: - default: - fcon = WLEN_8; - break; - } - if (termios->c_cflag & CSTOPB) - fcon |= STP2; - if (termios->c_cflag & PARENB) { - fcon |= PEN; - if (!(termios->c_cflag & PARODD)) - fcon |= EPS; - } - if (port->fifosize > 1) - fcon |= FEN; - - spin_lock_irqsave (&port->lock, flags); - - uart_update_timeout (port, termios->c_cflag, baud); - - port->read_status_mask = RxOverrunError; - if (termios->c_iflag & INPCK) - port->read_status_mask |= RxFramingError | RxParityError; - if (termios->c_iflag & (BRKINT | PARMRK)) - port->read_status_mask |= RxBreak; - - /* Figure mask for status we ignore */ - port->ignore_status_mask = 0; - if (termios->c_iflag & IGNPAR) - port->ignore_status_mask |= RxFramingError | RxParityError; - if (termios->c_iflag & IGNBRK) { - port->ignore_status_mask |= RxBreak; - /* Ignore overrun when ignorning parity */ - /* *** FIXME: is this in the right place? */ - if (termios->c_iflag & IGNPAR) - port->ignore_status_mask |= RxOverrunError; - } - - /* Ignore all receive errors when receive disabled */ - if ((termios->c_cflag & CREAD) == 0) - port->ignore_status_mask |= RxError; - - con = UR (port, UART_R_CON); - inten = (UR (port, UART_R_INTEN) & ~ModemInt); - - if (UART_ENABLE_MS (port, termios->c_cflag)) - inten |= ModemInt; - - BIT_CLR (port, UART_R_CON, UARTEN); /* Disable UART */ - UR (port, UART_R_INTEN) = 0; /* Disable interrupts */ - UR (port, UART_R_BRCON) = quot - 1; /* Set baud rate divisor */ - UR (port, UART_R_FCON) = fcon; /* Set FIFO and frame ctrl */ - UR (port, UART_R_INTEN) = inten; /* Enable interrupts */ - UR (port, UART_R_CON) = con; /* Restore UART mode */ - - spin_unlock_irqrestore(&port->lock, flags); -} - -static const char* lh7a40xuart_type (struct uart_port* port) -{ - return port->type == PORT_LH7A40X ? "LH7A40X" : NULL; -} - -static void lh7a40xuart_release_port (struct uart_port* port) -{ - release_mem_region (port->mapbase, UART_REG_SIZE); -} - -static int lh7a40xuart_request_port (struct uart_port* port) -{ - return request_mem_region (port->mapbase, UART_REG_SIZE, - "serial_lh7a40x") != NULL - ? 0 : -EBUSY; -} - -static void lh7a40xuart_config_port (struct uart_port* port, int flags) -{ - if (flags & UART_CONFIG_TYPE) { - port->type = PORT_LH7A40X; - lh7a40xuart_request_port (port); - } -} - -static int lh7a40xuart_verify_port (struct uart_port* port, - struct serial_struct* ser) -{ - int ret = 0; - - if (ser->type != PORT_UNKNOWN && ser->type != PORT_LH7A40X) - ret = -EINVAL; - if (ser->irq < 0 || ser->irq >= nr_irqs) - ret = -EINVAL; - if (ser->baud_base < 9600) /* *** FIXME: is this true? */ - ret = -EINVAL; - return ret; -} - -static struct uart_ops lh7a40x_uart_ops = { - .tx_empty = lh7a40xuart_tx_empty, - .set_mctrl = lh7a40xuart_set_mctrl, - .get_mctrl = lh7a40xuart_get_mctrl, - .stop_tx = lh7a40xuart_stop_tx, - .start_tx = lh7a40xuart_start_tx, - .stop_rx = lh7a40xuart_stop_rx, - .enable_ms = lh7a40xuart_enable_ms, - .break_ctl = lh7a40xuart_break_ctl, - .startup = lh7a40xuart_startup, - .shutdown = lh7a40xuart_shutdown, - .set_termios = lh7a40xuart_set_termios, - .type = lh7a40xuart_type, - .release_port = lh7a40xuart_release_port, - .request_port = lh7a40xuart_request_port, - .config_port = lh7a40xuart_config_port, - .verify_port = lh7a40xuart_verify_port, -}; - -static struct uart_port_lh7a40x lh7a40x_ports[DEV_NR] = { - { - .port = { - .membase = (void*) io_p2v (UART1_PHYS), - .mapbase = UART1_PHYS, - .iotype = UPIO_MEM, - .irq = IRQ_UART1INTR, - .uartclk = 14745600/2, - .fifosize = 16, - .ops = &lh7a40x_uart_ops, - .flags = UPF_BOOT_AUTOCONF, - .line = 0, - }, - }, - { - .port = { - .membase = (void*) io_p2v (UART2_PHYS), - .mapbase = UART2_PHYS, - .iotype = UPIO_MEM, - .irq = IRQ_UART2INTR, - .uartclk = 14745600/2, - .fifosize = 16, - .ops = &lh7a40x_uart_ops, - .flags = UPF_BOOT_AUTOCONF, - .line = 1, - }, - }, - { - .port = { - .membase = (void*) io_p2v (UART3_PHYS), - .mapbase = UART3_PHYS, - .iotype = UPIO_MEM, - .irq = IRQ_UART3INTR, - .uartclk = 14745600/2, - .fifosize = 16, - .ops = &lh7a40x_uart_ops, - .flags = UPF_BOOT_AUTOCONF, - .line = 2, - }, - }, -}; - -#ifndef CONFIG_SERIAL_LH7A40X_CONSOLE -# define LH7A40X_CONSOLE NULL -#else -# define LH7A40X_CONSOLE &lh7a40x_console - -static void lh7a40xuart_console_putchar(struct uart_port *port, int ch) -{ - while (UR(port, UART_R_STATUS) & nTxRdy) - ; - UR(port, UART_R_DATA) = ch; -} - -static void lh7a40xuart_console_write (struct console* co, - const char* s, - unsigned int count) -{ - struct uart_port* port = &lh7a40x_ports[co->index].port; - unsigned int con = UR (port, UART_R_CON); - unsigned int inten = UR (port, UART_R_INTEN); - - - UR (port, UART_R_INTEN) = 0; /* Disable all interrupts */ - BIT_SET (port, UART_R_CON, UARTEN | SIRDIS); /* Enable UART */ - - uart_console_write(port, s, count, lh7a40xuart_console_putchar); - - /* Wait until all characters are sent */ - while (UR (port, UART_R_STATUS) & TxBusy) - ; - - /* Restore control and interrupt mask */ - UR (port, UART_R_CON) = con; - UR (port, UART_R_INTEN) = inten; -} - -static void __init lh7a40xuart_console_get_options (struct uart_port* port, - int* baud, - int* parity, - int* bits) -{ - if (UR (port, UART_R_CON) & UARTEN) { - unsigned int fcon = UR (port, UART_R_FCON); - unsigned int quot = UR (port, UART_R_BRCON) + 1; - - switch (fcon & (PEN | EPS)) { - default: *parity = 'n'; break; - case PEN: *parity = 'o'; break; - case PEN | EPS: *parity = 'e'; break; - } - - switch (fcon & WLEN) { - default: - case WLEN_8: *bits = 8; break; - case WLEN_7: *bits = 7; break; - case WLEN_6: *bits = 6; break; - case WLEN_5: *bits = 5; break; - } - - *baud = port->uartclk/(16*quot); - } -} - -static int __init lh7a40xuart_console_setup (struct console* co, char* options) -{ - struct uart_port* port; - int baud = 38400; - int bits = 8; - int parity = 'n'; - int flow = 'n'; - - if (co->index >= DEV_NR) /* Bounds check on device number */ - co->index = 0; - port = &lh7a40x_ports[co->index].port; - - if (options) - uart_parse_options (options, &baud, &parity, &bits, &flow); - else - lh7a40xuart_console_get_options (port, &baud, &parity, &bits); - - return uart_set_options (port, co, baud, parity, bits, flow); -} - -static struct uart_driver lh7a40x_reg; -static struct console lh7a40x_console = { - .name = "ttyAM", - .write = lh7a40xuart_console_write, - .device = uart_console_device, - .setup = lh7a40xuart_console_setup, - .flags = CON_PRINTBUFFER, - .index = -1, - .data = &lh7a40x_reg, -}; - -static int __init lh7a40xuart_console_init(void) -{ - register_console (&lh7a40x_console); - return 0; -} - -console_initcall (lh7a40xuart_console_init); - -#endif - -static struct uart_driver lh7a40x_reg = { - .owner = THIS_MODULE, - .driver_name = "ttyAM", - .dev_name = "ttyAM", - .major = DEV_MAJOR, - .minor = DEV_MINOR, - .nr = DEV_NR, - .cons = LH7A40X_CONSOLE, -}; - -static int __init lh7a40xuart_init(void) -{ - int ret; - - printk (KERN_INFO "serial: LH7A40X serial driver\n"); - - ret = uart_register_driver (&lh7a40x_reg); - - if (ret == 0) { - int i; - - for (i = 0; i < DEV_NR; i++) { - /* UART3, when used, requires GPIO pin reallocation */ - if (lh7a40x_ports[i].port.mapbase == UART3_PHYS) - GPIO_PINMUX |= 1<<3; - uart_add_one_port (&lh7a40x_reg, - &lh7a40x_ports[i].port); - } - } - return ret; -} - -static void __exit lh7a40xuart_exit(void) -{ - int i; - - for (i = 0; i < DEV_NR; i++) - uart_remove_one_port (&lh7a40x_reg, &lh7a40x_ports[i].port); - - uart_unregister_driver (&lh7a40x_reg); -} - -module_init (lh7a40xuart_init); -module_exit (lh7a40xuart_exit); - -MODULE_AUTHOR ("Marc Singer"); -MODULE_DESCRIPTION ("Sharp LH7A40X serial port driver"); -MODULE_LICENSE ("GPL"); diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 92c91c83edd..eb7958c675a 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -47,7 +47,6 @@ #include <linux/clk.h> #include <linux/ctype.h> #include <linux/err.h> -#include <linux/list.h> #include <linux/dmaengine.h> #include <linux/scatterlist.h> #include <linux/slab.h> @@ -65,11 +64,8 @@ struct sci_port { struct uart_port port; - /* Port type */ - unsigned int type; - - /* Port IRQs: ERI, RXI, TXI, BRI (optional) */ - unsigned int irqs[SCIx_NR_IRQS]; + /* Platform configuration */ + struct plat_sci_port *cfg; /* Port enable callback */ void (*enable)(struct uart_port *port); @@ -81,26 +77,15 @@ struct sci_port { struct timer_list break_timer; int break_flag; - /* SCSCR initialization */ - unsigned int scscr; - - /* SCBRR calculation algo */ - unsigned int scbrr_algo_id; - /* Interface clock */ struct clk *iclk; /* Function clock */ struct clk *fclk; - struct list_head node; - struct dma_chan *chan_tx; struct dma_chan *chan_rx; #ifdef CONFIG_SERIAL_SH_SCI_DMA - struct device *dma_dev; - unsigned int slave_tx; - unsigned int slave_rx; struct dma_async_tx_descriptor *desc_tx; struct dma_async_tx_descriptor *desc_rx[2]; dma_cookie_t cookie_tx; @@ -117,16 +102,14 @@ struct sci_port { struct timer_list rx_timer; unsigned int rx_timeout; #endif -}; -struct sh_sci_priv { - spinlock_t lock; - struct list_head ports; - struct notifier_block clk_nb; + struct notifier_block freq_transition; }; /* Function prototypes */ +static void sci_start_tx(struct uart_port *port); static void sci_stop_tx(struct uart_port *port); +static void sci_start_rx(struct uart_port *port); #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS @@ -142,12 +125,6 @@ to_sci_port(struct uart_port *uart) #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) #ifdef CONFIG_CONSOLE_POLL -static inline void handle_error(struct uart_port *port) -{ - /* Clear error flags */ - sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port)); -} - static int sci_poll_get_char(struct uart_port *port) { unsigned short status; @@ -156,7 +133,7 @@ static int sci_poll_get_char(struct uart_port *port) do { status = sci_in(port, SCxSR); if (status & SCxSR_ERRORS(port)) { - handle_error(port); + sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port)); continue; } break; @@ -475,7 +452,7 @@ static void sci_transmit_chars(struct uart_port *port) /* On SH3, SCIF may read end-of-break as a space->mark char */ #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); }) -static inline void sci_receive_chars(struct uart_port *port) +static void sci_receive_chars(struct uart_port *port) { struct sci_port *sci_port = to_sci_port(port); struct tty_struct *tty = port->state->port.tty; @@ -566,18 +543,20 @@ static inline void sci_receive_chars(struct uart_port *port) } #define SCI_BREAK_JIFFIES (HZ/20) -/* The sci generates interrupts during the break, + +/* + * The sci generates interrupts during the break, * 1 per millisecond or so during the break period, for 9600 baud. * So dont bother disabling interrupts. * But dont want more than 1 break event. * Use a kernel timer to periodically poll the rx line until * the break is finished. */ -static void sci_schedule_break_timer(struct sci_port *port) +static inline void sci_schedule_break_timer(struct sci_port *port) { - port->break_timer.expires = jiffies + SCI_BREAK_JIFFIES; - add_timer(&port->break_timer); + mod_timer(&port->break_timer, jiffies + SCI_BREAK_JIFFIES); } + /* Ensure that two consecutive samples find the break over. */ static void sci_break_timer(unsigned long data) { @@ -594,7 +573,7 @@ static void sci_break_timer(unsigned long data) port->break_flag = 0; } -static inline int sci_handle_errors(struct uart_port *port) +static int sci_handle_errors(struct uart_port *port) { int copied = 0; unsigned short status = sci_in(port, SCxSR); @@ -650,7 +629,7 @@ static inline int sci_handle_errors(struct uart_port *port) return copied; } -static inline int sci_handle_fifo_overrun(struct uart_port *port) +static int sci_handle_fifo_overrun(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; int copied = 0; @@ -671,7 +650,7 @@ static inline int sci_handle_fifo_overrun(struct uart_port *port) return copied; } -static inline int sci_handle_breaks(struct uart_port *port) +static int sci_handle_breaks(struct uart_port *port) { int copied = 0; unsigned short status = sci_in(port, SCxSR); @@ -794,7 +773,7 @@ static inline unsigned long port_rx_irq_mask(struct uart_port *port) * it's unset, it's logically inferred that there's no point in * testing for it. */ - return SCSCR_RIE | (to_sci_port(port)->scscr & SCSCR_REIE); + return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE); } static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) @@ -839,17 +818,18 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) static int sci_notifier(struct notifier_block *self, unsigned long phase, void *p) { - struct sh_sci_priv *priv = container_of(self, - struct sh_sci_priv, clk_nb); struct sci_port *sci_port; unsigned long flags; + sci_port = container_of(self, struct sci_port, freq_transition); + if ((phase == CPUFREQ_POSTCHANGE) || (phase == CPUFREQ_RESUMECHANGE)) { - spin_lock_irqsave(&priv->lock, flags); - list_for_each_entry(sci_port, &priv->ports, node) - sci_port->port.uartclk = clk_get_rate(sci_port->iclk); - spin_unlock_irqrestore(&priv->lock, flags); + struct uart_port *port = &sci_port->port; + + spin_lock_irqsave(&port->lock, flags); + port->uartclk = clk_get_rate(sci_port->iclk); + spin_unlock_irqrestore(&port->lock, flags); } return NOTIFY_OK; @@ -882,21 +862,21 @@ static int sci_request_irq(struct sci_port *port) const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full", "SCI Transmit Data Empty", "SCI Break" }; - if (port->irqs[0] == port->irqs[1]) { - if (unlikely(!port->irqs[0])) + if (port->cfg->irqs[0] == port->cfg->irqs[1]) { + if (unlikely(!port->cfg->irqs[0])) return -ENODEV; - if (request_irq(port->irqs[0], sci_mpxed_interrupt, + if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt, IRQF_DISABLED, "sci", port)) { dev_err(port->port.dev, "Can't allocate IRQ\n"); return -ENODEV; } } else { for (i = 0; i < ARRAY_SIZE(handlers); i++) { - if (unlikely(!port->irqs[i])) + if (unlikely(!port->cfg->irqs[i])) continue; - if (request_irq(port->irqs[i], handlers[i], + if (request_irq(port->cfg->irqs[i], handlers[i], IRQF_DISABLED, desc[i], port)) { dev_err(port->port.dev, "Can't allocate IRQ\n"); return -ENODEV; @@ -911,14 +891,14 @@ static void sci_free_irq(struct sci_port *port) { int i; - if (port->irqs[0] == port->irqs[1]) - free_irq(port->irqs[0], port); + if (port->cfg->irqs[0] == port->cfg->irqs[1]) + free_irq(port->cfg->irqs[0], port); else { - for (i = 0; i < ARRAY_SIZE(port->irqs); i++) { - if (!port->irqs[i]) + for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) { + if (!port->cfg->irqs[i]) continue; - free_irq(port->irqs[i], port); + free_irq(port->cfg->irqs[i], port); } } } @@ -1037,9 +1017,6 @@ static void sci_dma_rx_complete(void *arg) schedule_work(&s->work_rx); } -static void sci_start_rx(struct uart_port *port); -static void sci_start_tx(struct uart_port *port); - static void sci_rx_dma_release(struct sci_port *s, bool enable_pio) { struct dma_chan *chan = s->chan_rx; @@ -1325,7 +1302,7 @@ static void rx_timer_fn(unsigned long arg) if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { scr &= ~0x4000; - enable_irq(s->irqs[1]); + enable_irq(s->cfg->irqs[1]); } sci_out(port, SCSCR, scr | SCSCR_RIE); dev_dbg(port->dev, "DMA Rx timed out\n"); @@ -1341,9 +1318,9 @@ static void sci_request_dma(struct uart_port *port) int nent; dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__, - port->line, s->dma_dev); + port->line, s->cfg->dma_dev); - if (!s->dma_dev) + if (!s->cfg->dma_dev) return; dma_cap_zero(mask); @@ -1352,8 +1329,8 @@ static void sci_request_dma(struct uart_port *port) param = &s->param_tx; /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */ - param->slave_id = s->slave_tx; - param->dma_dev = s->dma_dev; + param->slave_id = s->cfg->dma_slave_tx; + param->dma_dev = s->cfg->dma_dev; s->cookie_tx = -EINVAL; chan = dma_request_channel(mask, filter, param); @@ -1381,8 +1358,8 @@ static void sci_request_dma(struct uart_port *port) param = &s->param_rx; /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */ - param->slave_id = s->slave_rx; - param->dma_dev = s->dma_dev; + param->slave_id = s->cfg->dma_slave_rx; + param->dma_dev = s->cfg->dma_dev; chan = dma_request_channel(mask, filter, param); dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan); @@ -1427,7 +1404,7 @@ static void sci_free_dma(struct uart_port *port) { struct sci_port *s = to_sci_port(port); - if (!s->dma_dev) + if (!s->cfg->dma_dev) return; if (s->chan_tx) @@ -1435,21 +1412,32 @@ static void sci_free_dma(struct uart_port *port) if (s->chan_rx) sci_rx_dma_release(s, false); } +#else +static inline void sci_request_dma(struct uart_port *port) +{ +} + +static inline void sci_free_dma(struct uart_port *port) +{ +} #endif static int sci_startup(struct uart_port *port) { struct sci_port *s = to_sci_port(port); + int ret; dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); if (s->enable) s->enable(port); - sci_request_irq(s); -#ifdef CONFIG_SERIAL_SH_SCI_DMA + ret = sci_request_irq(s); + if (unlikely(ret < 0)) + return ret; + sci_request_dma(port); -#endif + sci_start_tx(port); sci_start_rx(port); @@ -1464,9 +1452,8 @@ static void sci_shutdown(struct uart_port *port) sci_stop_rx(port); sci_stop_tx(port); -#ifdef CONFIG_SERIAL_SH_SCI_DMA + sci_free_dma(port); -#endif sci_free_irq(s); if (s->disable) @@ -1491,6 +1478,7 @@ static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps, /* Warn, but use a safe default */ WARN_ON(1); + return ((freq + 16 * bps) / (32 * bps) - 1); } @@ -1514,7 +1502,10 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, baud = uart_get_baud_rate(port, termios, old, 0, max_baud); if (likely(baud && port->uartclk)) - t = sci_scbrr_calc(s->scbrr_algo_id, baud, port->uartclk); + t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk); + + if (s->enable) + s->enable(port); do { status = sci_in(port, SCxSR); @@ -1526,6 +1517,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST); smr_val = sci_in(port, SCSMR) & 3; + if ((termios->c_cflag & CSIZE) == CS7) smr_val |= 0x40; if (termios->c_cflag & PARENB) @@ -1540,7 +1532,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, sci_out(port, SCSMR, smr_val); dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t, - s->scscr); + s->cfg->scscr); if (t > 0) { if (t >= 256) { @@ -1556,7 +1548,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, sci_init_pins(port, termios->c_cflag); sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0)); - sci_out(port, SCSCR, s->scscr); + sci_out(port, SCSCR, s->cfg->scscr); #ifdef CONFIG_SERIAL_SH_SCI_DMA /* @@ -1582,6 +1574,9 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, if ((termios->c_cflag & CREAD) != 0) sci_start_rx(port); + + if (s->disable) + s->disable(port); } static const char *sci_type(struct uart_port *port) @@ -1602,31 +1597,33 @@ static const char *sci_type(struct uart_port *port) return NULL; } -static void sci_release_port(struct uart_port *port) +static inline unsigned long sci_port_size(struct uart_port *port) { - /* Nothing here yet .. */ -} - -static int sci_request_port(struct uart_port *port) -{ - /* Nothing here yet .. */ - return 0; + /* + * Pick an arbitrary size that encapsulates all of the base + * registers by default. This can be optimized later, or derived + * from platform resource data at such a time that ports begin to + * behave more erratically. + */ + return 64; } -static void sci_config_port(struct uart_port *port, int flags) +static int sci_remap_port(struct uart_port *port) { - struct sci_port *s = to_sci_port(port); - - port->type = s->type; + unsigned long size = sci_port_size(port); + /* + * Nothing to do if there's already an established membase. + */ if (port->membase) - return; + return 0; if (port->flags & UPF_IOREMAP) { - port->membase = ioremap_nocache(port->mapbase, 0x40); - - if (IS_ERR(port->membase)) + port->membase = ioremap_nocache(port->mapbase, size); + if (unlikely(!port->membase)) { dev_err(port->dev, "can't remap port#%d\n", port->line); + return -ENXIO; + } } else { /* * For the simple (and majority of) cases where we don't @@ -1635,13 +1632,54 @@ static void sci_config_port(struct uart_port *port, int flags) */ port->membase = (void __iomem *)port->mapbase; } + + return 0; +} + +static void sci_release_port(struct uart_port *port) +{ + if (port->flags & UPF_IOREMAP) { + iounmap(port->membase); + port->membase = NULL; + } + + release_mem_region(port->mapbase, sci_port_size(port)); +} + +static int sci_request_port(struct uart_port *port) +{ + unsigned long size = sci_port_size(port); + struct resource *res; + int ret; + + res = request_mem_region(port->mapbase, size, dev_name(port->dev)); + if (unlikely(res == NULL)) + return -EBUSY; + + ret = sci_remap_port(port); + if (unlikely(ret != 0)) { + release_resource(res); + return ret; + } + + return 0; +} + +static void sci_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + struct sci_port *sport = to_sci_port(port); + + port->type = sport->cfg->type; + sci_request_port(port); + } } static int sci_verify_port(struct uart_port *port, struct serial_struct *ser) { struct sci_port *s = to_sci_port(port); - if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs) + if (ser->irq != s->cfg->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs) return -EINVAL; if (ser->baud_base < 2400) /* No paper tape reader for Mitch.. */ @@ -1726,36 +1764,29 @@ static int __devinit sci_init_single(struct platform_device *dev, sci_port->break_timer.function = sci_break_timer; init_timer(&sci_port->break_timer); - port->mapbase = p->mapbase; - port->membase = p->membase; + sci_port->cfg = p; - port->irq = p->irqs[SCIx_TXI_IRQ]; + port->mapbase = p->mapbase; + port->type = p->type; port->flags = p->flags; - sci_port->type = port->type = p->type; - sci_port->scscr = p->scscr; - sci_port->scbrr_algo_id = p->scbrr_algo_id; -#ifdef CONFIG_SERIAL_SH_SCI_DMA - sci_port->dma_dev = p->dma_dev; - sci_port->slave_tx = p->dma_slave_tx; - sci_port->slave_rx = p->dma_slave_rx; + /* + * The UART port needs an IRQ value, so we peg this to the TX IRQ + * for the multi-IRQ ports, which is where we are primarily + * concerned with the shutdown path synchronization. + * + * For the muxed case there's nothing more to do. + */ + port->irq = p->irqs[SCIx_TXI_IRQ]; - dev_dbg(port->dev, "%s: DMA device %p, tx %d, rx %d\n", __func__, - p->dma_dev, p->dma_slave_tx, p->dma_slave_rx); -#endif + if (p->dma_dev) + dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n", + p->dma_dev, p->dma_slave_tx, p->dma_slave_rx); - memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); return 0; } #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE -static struct tty_driver *serial_console_device(struct console *co, int *index) -{ - struct uart_driver *p = &sci_uart_driver; - *index = co->index; - return p->tty_driver; -} - static void serial_console_putchar(struct uart_port *port, int ch) { sci_poll_put_char(port, ch); @@ -1768,8 +1799,8 @@ static void serial_console_putchar(struct uart_port *port, int ch) static void serial_console_write(struct console *co, const char *s, unsigned count) { - struct uart_port *port = co->data; - struct sci_port *sci_port = to_sci_port(port); + struct sci_port *sci_port = &sci_ports[co->index]; + struct uart_port *port = &sci_port->port; unsigned short bits; if (sci_port->enable) @@ -1797,32 +1828,17 @@ static int __devinit serial_console_setup(struct console *co, char *options) int ret; /* - * Check whether an invalid uart number has been specified, and - * if so, search for the first available port that does have - * console support. - */ - if (co->index >= SCI_NPORTS) - co->index = 0; - - if (co->data) { - port = co->data; - sci_port = to_sci_port(port); - } else { - sci_port = &sci_ports[co->index]; - port = &sci_port->port; - co->data = port; - } - - /* - * Also need to check port->type, we don't actually have any - * UPIO_PORT ports, but uart_report_port() handily misreports - * it anyways if we don't have a port available by the time this is - * called. + * Refuse to handle any bogus ports. */ - if (!port->type) + if (co->index < 0 || co->index >= SCI_NPORTS) return -ENODEV; - sci_config_port(port, 0); + sci_port = &sci_ports[co->index]; + port = &sci_port->port; + + ret = sci_remap_port(port); + if (unlikely(ret != 0)) + return ret; if (sci_port->enable) sci_port->enable(port); @@ -1842,11 +1858,12 @@ static int __devinit serial_console_setup(struct console *co, char *options) static struct console serial_console = { .name = "ttySC", - .device = serial_console_device, + .device = uart_console_device, .write = serial_console_write, .setup = serial_console_setup, .flags = CON_PRINTBUFFER, .index = -1, + .data = &sci_uart_driver, }; static int __init sci_console_init(void) @@ -1856,14 +1873,39 @@ static int __init sci_console_init(void) } console_initcall(sci_console_init); -static struct sci_port early_serial_port; static struct console early_serial_console = { .name = "early_ttySC", .write = serial_console_write, .flags = CON_PRINTBUFFER, + .index = -1, }; + static char early_serial_buf[32]; +static int __devinit sci_probe_earlyprintk(struct platform_device *pdev) +{ + struct plat_sci_port *cfg = pdev->dev.platform_data; + + if (early_serial_console.data) + return -EEXIST; + + early_serial_console.index = pdev->id; + + sci_init_single(NULL, &sci_ports[pdev->id], pdev->id, cfg); + + serial_console_setup(&early_serial_console, early_serial_buf); + + if (!strstr(early_serial_buf, "keep")) + early_serial_console.flags |= CON_BOOT; + + register_console(&early_serial_console); + return 0; +} +#else +static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev) +{ + return -EINVAL; +} #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) @@ -1885,24 +1927,18 @@ static struct uart_driver sci_uart_driver = { .cons = SCI_CONSOLE, }; - static int sci_remove(struct platform_device *dev) { - struct sh_sci_priv *priv = platform_get_drvdata(dev); - struct sci_port *p; - unsigned long flags; + struct sci_port *port = platform_get_drvdata(dev); - cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER); + cpufreq_unregister_notifier(&port->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); - spin_lock_irqsave(&priv->lock, flags); - list_for_each_entry(p, &priv->ports, node) { - uart_remove_one_port(&sci_uart_driver, &p->port); - clk_put(p->iclk); - clk_put(p->fclk); - } - spin_unlock_irqrestore(&priv->lock, flags); + uart_remove_one_port(&sci_uart_driver, &port->port); + + clk_put(port->iclk); + clk_put(port->fclk); - kfree(priv); return 0; } @@ -1911,8 +1947,6 @@ static int __devinit sci_probe_single(struct platform_device *dev, struct plat_sci_port *p, struct sci_port *sciport) { - struct sh_sci_priv *priv = platform_get_drvdata(dev); - unsigned long flags; int ret; /* Sanity check */ @@ -1929,68 +1963,35 @@ static int __devinit sci_probe_single(struct platform_device *dev, if (ret) return ret; - ret = uart_add_one_port(&sci_uart_driver, &sciport->port); - if (ret) - return ret; - - INIT_LIST_HEAD(&sciport->node); - - spin_lock_irqsave(&priv->lock, flags); - list_add(&sciport->node, &priv->ports); - spin_unlock_irqrestore(&priv->lock, flags); - - return 0; + return uart_add_one_port(&sci_uart_driver, &sciport->port); } -/* - * Register a set of serial devices attached to a platform device. The - * list is terminated with a zero flags entry, which means we expect - * all entries to have at least UPF_BOOT_AUTOCONF set. Platforms that need - * remapping (such as sh64) should also set UPF_IOREMAP. - */ static int __devinit sci_probe(struct platform_device *dev) { struct plat_sci_port *p = dev->dev.platform_data; - struct sh_sci_priv *priv; - int i, ret = -EINVAL; + struct sci_port *sp = &sci_ports[dev->id]; + int ret; -#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE - if (is_early_platform_device(dev)) { - if (dev->id == -1) - return -ENOTSUPP; - early_serial_console.index = dev->id; - early_serial_console.data = &early_serial_port.port; - sci_init_single(NULL, &early_serial_port, dev->id, p); - serial_console_setup(&early_serial_console, early_serial_buf); - if (!strstr(early_serial_buf, "keep")) - early_serial_console.flags |= CON_BOOT; - register_console(&early_serial_console); - return 0; - } -#endif + /* + * If we've come here via earlyprintk initialization, head off to + * the special early probe. We don't have sufficient device state + * to make it beyond this yet. + */ + if (is_early_platform_device(dev)) + return sci_probe_earlyprintk(dev); - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + platform_set_drvdata(dev, sp); - INIT_LIST_HEAD(&priv->ports); - spin_lock_init(&priv->lock); - platform_set_drvdata(dev, priv); + ret = sci_probe_single(dev, dev->id, p, sp); + if (ret) + goto err_unreg; - priv->clk_nb.notifier_call = sci_notifier; - cpufreq_register_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER); + sp->freq_transition.notifier_call = sci_notifier; - if (dev->id != -1) { - ret = sci_probe_single(dev, dev->id, p, &sci_ports[dev->id]); - if (ret) - goto err_unreg; - } else { - for (i = 0; p && p->flags != 0; p++, i++) { - ret = sci_probe_single(dev, i, p, &sci_ports[i]); - if (ret) - goto err_unreg; - } - } + ret = cpufreq_register_notifier(&sp->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); + if (unlikely(ret < 0)) + goto err_unreg; #ifdef CONFIG_SH_STANDARD_BIOS sh_bios_gdb_detach(); @@ -2005,28 +2006,20 @@ err_unreg: static int sci_suspend(struct device *dev) { - struct sh_sci_priv *priv = dev_get_drvdata(dev); - struct sci_port *p; - unsigned long flags; + struct sci_port *sport = dev_get_drvdata(dev); - spin_lock_irqsave(&priv->lock, flags); - list_for_each_entry(p, &priv->ports, node) - uart_suspend_port(&sci_uart_driver, &p->port); - spin_unlock_irqrestore(&priv->lock, flags); + if (sport) + uart_suspend_port(&sci_uart_driver, &sport->port); return 0; } static int sci_resume(struct device *dev) { - struct sh_sci_priv *priv = dev_get_drvdata(dev); - struct sci_port *p; - unsigned long flags; + struct sci_port *sport = dev_get_drvdata(dev); - spin_lock_irqsave(&priv->lock, flags); - list_for_each_entry(p, &priv->ports, node) - uart_resume_port(&sci_uart_driver, &p->port); - spin_unlock_irqrestore(&priv->lock, flags); + if (sport) + uart_resume_port(&sci_uart_driver, &sport->port); return 0; } diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h index b223d6cbf33..5fefed53fa4 100644 --- a/drivers/tty/serial/sh-sci.h +++ b/drivers/tty/serial/sh-sci.h @@ -54,9 +54,6 @@ # define PBCR 0xa4050102 #elif defined(CONFIG_CPU_SUBTYPE_SH7343) # define SCSPTR0 0xffe00010 /* 16 bit SCIF */ -# define SCSPTR1 0xffe10010 /* 16 bit SCIF */ -# define SCSPTR2 0xffe20010 /* 16 bit SCIF */ -# define SCSPTR3 0xffe30010 /* 16 bit SCIF */ #elif defined(CONFIG_CPU_SUBTYPE_SH7722) # define PADR 0xA4050120 # define PSDR 0xA405013e @@ -69,77 +66,42 @@ # define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7723) # define SCSPTR0 0xa4050160 -# define SCSPTR1 0xa405013e -# define SCSPTR2 0xa4050160 -# define SCSPTR3 0xa405013e -# define SCSPTR4 0xa4050128 -# define SCSPTR5 0xa4050128 # define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7724) # define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH4_202) # define SCSPTR2 0xffe80020 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* overrun error bit */ -#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103) -# define SCIF_PTR2_OFFS 0x0000020 -# define SCSPTR2 ((port->mapbase)+SCIF_PTR2_OFFS) /* 16 bit SCIF */ #elif defined(CONFIG_H83007) || defined(CONFIG_H83068) # define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port) #elif defined(CONFIG_H8S2678) # define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port) #elif defined(CONFIG_CPU_SUBTYPE_SH7757) # define SCSPTR0 0xfe4b0020 -# define SCSPTR1 0xfe4b0020 -# define SCSPTR2 0xfe4b0020 # define SCIF_ORER 0x0001 -# define SCIF_ONLY #elif defined(CONFIG_CPU_SUBTYPE_SH7763) # define SCSPTR0 0xffe00024 /* 16 bit SCIF */ -# define SCSPTR1 0xffe08024 /* 16 bit SCIF */ -# define SCSPTR2 0xffe10020 /* 16 bit SCIF/IRDA */ # define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7770) # define SCSPTR0 0xff923020 /* 16 bit SCIF */ -# define SCSPTR1 0xff924020 /* 16 bit SCIF */ -# define SCSPTR2 0xff925020 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7780) # define SCSPTR0 0xffe00024 /* 16 bit SCIF */ -# define SCSPTR1 0xffe10024 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* Overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \ defined(CONFIG_CPU_SUBTYPE_SH7786) # define SCSPTR0 0xffea0024 /* 16 bit SCIF */ -# define SCSPTR1 0xffeb0024 /* 16 bit SCIF */ -# define SCSPTR2 0xffec0024 /* 16 bit SCIF */ -# define SCSPTR3 0xffed0024 /* 16 bit SCIF */ -# define SCSPTR4 0xffee0024 /* 16 bit SCIF */ -# define SCSPTR5 0xffef0024 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* Overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \ defined(CONFIG_CPU_SUBTYPE_SH7203) || \ defined(CONFIG_CPU_SUBTYPE_SH7206) || \ defined(CONFIG_CPU_SUBTYPE_SH7263) # define SCSPTR0 0xfffe8020 /* 16 bit SCIF */ -# define SCSPTR1 0xfffe8820 /* 16 bit SCIF */ -# define SCSPTR2 0xfffe9020 /* 16 bit SCIF */ -# define SCSPTR3 0xfffe9820 /* 16 bit SCIF */ -# if defined(CONFIG_CPU_SUBTYPE_SH7201) -# define SCSPTR4 0xfffeA020 /* 16 bit SCIF */ -# define SCSPTR5 0xfffeA820 /* 16 bit SCIF */ -# define SCSPTR6 0xfffeB020 /* 16 bit SCIF */ -# define SCSPTR7 0xfffeB820 /* 16 bit SCIF */ -# endif #elif defined(CONFIG_CPU_SUBTYPE_SH7619) # define SCSPTR0 0xf8400020 /* 16 bit SCIF */ -# define SCSPTR1 0xf8410020 /* 16 bit SCIF */ -# define SCSPTR2 0xf8420020 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* overrun error bit */ #elif defined(CONFIG_CPU_SUBTYPE_SHX3) # define SCSPTR0 0xffc30020 /* 16 bit SCIF */ -# define SCSPTR1 0xffc40020 /* 16 bit SCIF */ -# define SCSPTR2 0xffc50020 /* 16 bit SCIF */ -# define SCSPTR3 0xffc60020 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* Overrun error bit */ #else # error CPU subtype not defined @@ -411,7 +373,6 @@ SCIF_FNS(SCSPTR, 0, 0, 0x24, 16) SCIF_FNS(SCLSR, 0, 0, 0x28, 16) #elif defined(CONFIG_CPU_SUBTYPE_SH7763) SCIF_FNS(SCFDR, 0, 0, 0x1C, 16) -SCIF_FNS(SCSPTR2, 0, 0, 0x20, 16) SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) SCIF_FNS(SCSPTR, 0, 0, 0x24, 16) diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index c9014868297..c0b7246d733 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c @@ -519,7 +519,7 @@ static struct console sunhv_console = { .data = &sunhv_reg, }; -static int __devinit hv_probe(struct platform_device *op, const struct of_device_id *match) +static int __devinit hv_probe(struct platform_device *op) { struct uart_port *port; unsigned long minor; @@ -629,7 +629,7 @@ static const struct of_device_id hv_match[] = { }; MODULE_DEVICE_TABLE(of, hv_match); -static struct of_platform_driver hv_driver = { +static struct platform_driver hv_driver = { .driver = { .name = "hv", .owner = THIS_MODULE, @@ -644,12 +644,12 @@ static int __init sunhv_init(void) if (tlb_type != hypervisor) return -ENODEV; - return of_register_platform_driver(&hv_driver); + return platform_driver_register(&hv_driver); } static void __exit sunhv_exit(void) { - of_unregister_platform_driver(&hv_driver); + platform_driver_unregister(&hv_driver); } module_init(sunhv_init); diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c index 5b246b18f42..b5fa2a57b9d 100644 --- a/drivers/tty/serial/sunsab.c +++ b/drivers/tty/serial/sunsab.c @@ -1006,7 +1006,7 @@ static int __devinit sunsab_init_one(struct uart_sunsab_port *up, return 0; } -static int __devinit sab_probe(struct platform_device *op, const struct of_device_id *match) +static int __devinit sab_probe(struct platform_device *op) { static int inst; struct uart_sunsab_port *up; @@ -1092,7 +1092,7 @@ static const struct of_device_id sab_match[] = { }; MODULE_DEVICE_TABLE(of, sab_match); -static struct of_platform_driver sab_driver = { +static struct platform_driver sab_driver = { .driver = { .name = "sab", .owner = THIS_MODULE, @@ -1130,12 +1130,12 @@ static int __init sunsab_init(void) } } - return of_register_platform_driver(&sab_driver); + return platform_driver_register(&sab_driver); } static void __exit sunsab_exit(void) { - of_unregister_platform_driver(&sab_driver); + platform_driver_unregister(&sab_driver); if (sunsab_reg.nr) { sunserial_unregister_minors(&sunsab_reg, sunsab_reg.nr); } diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index 551ebfe3ccb..92aa54550e8 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c @@ -1406,7 +1406,7 @@ static enum su_type __devinit su_get_type(struct device_node *dp) return SU_PORT_PORT; } -static int __devinit su_probe(struct platform_device *op, const struct of_device_id *match) +static int __devinit su_probe(struct platform_device *op) { static int inst; struct device_node *dp = op->dev.of_node; @@ -1543,7 +1543,7 @@ static const struct of_device_id su_match[] = { }; MODULE_DEVICE_TABLE(of, su_match); -static struct of_platform_driver su_driver = { +static struct platform_driver su_driver = { .driver = { .name = "su", .owner = THIS_MODULE, @@ -1586,7 +1586,7 @@ static int __init sunsu_init(void) return err; } - err = of_register_platform_driver(&su_driver); + err = platform_driver_register(&su_driver); if (err && num_uart) sunserial_unregister_minors(&sunsu_reg, num_uart); diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c index c1967ac1c07..99ff9abf57c 100644 --- a/drivers/tty/serial/sunzilog.c +++ b/drivers/tty/serial/sunzilog.c @@ -1399,7 +1399,7 @@ static void __devinit sunzilog_init_hw(struct uart_sunzilog_port *up) static int zilog_irq = -1; -static int __devinit zs_probe(struct platform_device *op, const struct of_device_id *match) +static int __devinit zs_probe(struct platform_device *op) { static int kbm_inst, uart_inst; int inst; @@ -1540,7 +1540,7 @@ static const struct of_device_id zs_match[] = { }; MODULE_DEVICE_TABLE(of, zs_match); -static struct of_platform_driver zs_driver = { +static struct platform_driver zs_driver = { .driver = { .name = "zs", .owner = THIS_MODULE, @@ -1576,7 +1576,7 @@ static int __init sunzilog_init(void) goto out_free_tables; } - err = of_register_platform_driver(&zs_driver); + err = platform_driver_register(&zs_driver); if (err) goto out_unregister_uart; @@ -1604,7 +1604,7 @@ out: return err; out_unregister_driver: - of_unregister_platform_driver(&zs_driver); + platform_driver_unregister(&zs_driver); out_unregister_uart: if (num_sunzilog) { @@ -1619,7 +1619,7 @@ out_free_tables: static void __exit sunzilog_exit(void) { - of_unregister_platform_driver(&zs_driver); + platform_driver_unregister(&zs_driver); if (zilog_irq != -1) { struct uart_sunzilog_port *up = sunzilog_irq_chain; diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c index d2fce865b73..8af1ed83a4c 100644 --- a/drivers/tty/serial/uartlite.c +++ b/drivers/tty/serial/uartlite.c @@ -19,22 +19,11 @@ #include <linux/interrupt.h> #include <linux/init.h> #include <asm/io.h> -#if defined(CONFIG_OF) && (defined(CONFIG_PPC32) || defined(CONFIG_MICROBLAZE)) #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> -/* Match table for of_platform binding */ -static struct of_device_id ulite_of_match[] __devinitdata = { - { .compatible = "xlnx,opb-uartlite-1.00.b", }, - { .compatible = "xlnx,xps-uartlite-1.00.a", }, - {} -}; -MODULE_DEVICE_TABLE(of, ulite_of_match); - -#endif - #define ULITE_NAME "ttyUL" #define ULITE_MAJOR 204 #define ULITE_MINOR 187 @@ -571,9 +560,29 @@ static int __devexit ulite_release(struct device *dev) * Platform bus binding */ +#if defined(CONFIG_OF) +/* Match table for of_platform binding */ +static struct of_device_id ulite_of_match[] __devinitdata = { + { .compatible = "xlnx,opb-uartlite-1.00.b", }, + { .compatible = "xlnx,xps-uartlite-1.00.a", }, + {} +}; +MODULE_DEVICE_TABLE(of, ulite_of_match); +#else /* CONFIG_OF */ +#define ulite_of_match NULL +#endif /* CONFIG_OF */ + static int __devinit ulite_probe(struct platform_device *pdev) { struct resource *res, *res2; + int id = pdev->id; +#ifdef CONFIG_OF + const __be32 *prop; + + prop = of_get_property(pdev->dev.of_node, "port-number", NULL); + if (prop) + id = be32_to_cpup(prop); +#endif res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) @@ -583,7 +592,7 @@ static int __devinit ulite_probe(struct platform_device *pdev) if (!res2) return -ENODEV; - return ulite_assign(&pdev->dev, pdev->id, res->start, res2->start); + return ulite_assign(&pdev->dev, id, res->start, res2->start); } static int __devexit ulite_remove(struct platform_device *pdev) @@ -595,72 +604,15 @@ static int __devexit ulite_remove(struct platform_device *pdev) MODULE_ALIAS("platform:uartlite"); static struct platform_driver ulite_platform_driver = { - .probe = ulite_probe, - .remove = __devexit_p(ulite_remove), - .driver = { - .owner = THIS_MODULE, - .name = "uartlite", - }, -}; - -/* --------------------------------------------------------------------- - * OF bus bindings - */ -#if defined(CONFIG_OF) && (defined(CONFIG_PPC32) || defined(CONFIG_MICROBLAZE)) -static int __devinit -ulite_of_probe(struct platform_device *op, const struct of_device_id *match) -{ - struct resource res; - const unsigned int *id; - int irq, rc; - - dev_dbg(&op->dev, "%s(%p, %p)\n", __func__, op, match); - - rc = of_address_to_resource(op->dev.of_node, 0, &res); - if (rc) { - dev_err(&op->dev, "invalid address\n"); - return rc; - } - - irq = irq_of_parse_and_map(op->dev.of_node, 0); - - id = of_get_property(op->dev.of_node, "port-number", NULL); - - return ulite_assign(&op->dev, id ? *id : -1, res.start, irq); -} - -static int __devexit ulite_of_remove(struct platform_device *op) -{ - return ulite_release(&op->dev); -} - -static struct of_platform_driver ulite_of_driver = { - .probe = ulite_of_probe, - .remove = __devexit_p(ulite_of_remove), + .probe = ulite_probe, + .remove = __devexit_p(ulite_remove), .driver = { - .name = "uartlite", .owner = THIS_MODULE, + .name = "uartlite", .of_match_table = ulite_of_match, }, }; -/* Registration helpers to keep the number of #ifdefs to a minimum */ -static inline int __init ulite_of_register(void) -{ - pr_debug("uartlite: calling of_register_platform_driver()\n"); - return of_register_platform_driver(&ulite_of_driver); -} - -static inline void __exit ulite_of_unregister(void) -{ - of_unregister_platform_driver(&ulite_of_driver); -} -#else /* CONFIG_OF && (CONFIG_PPC32 || CONFIG_MICROBLAZE) */ -/* Appropriate config not enabled; do nothing helpers */ -static inline int __init ulite_of_register(void) { return 0; } -static inline void __exit ulite_of_unregister(void) { } -#endif /* CONFIG_OF && (CONFIG_PPC32 || CONFIG_MICROBLAZE) */ - /* --------------------------------------------------------------------- * Module setup/teardown */ @@ -674,10 +626,6 @@ int __init ulite_init(void) if (ret) goto err_uart; - ret = ulite_of_register(); - if (ret) - goto err_of; - pr_debug("uartlite: calling platform_driver_register()\n"); ret = platform_driver_register(&ulite_platform_driver); if (ret) @@ -686,8 +634,6 @@ int __init ulite_init(void) return 0; err_plat: - ulite_of_unregister(); -err_of: uart_unregister_driver(&ulite_uart_driver); err_uart: printk(KERN_ERR "registering uartlite driver failed: err=%i", ret); @@ -697,7 +643,6 @@ err_uart: void __exit ulite_exit(void) { platform_driver_unregister(&ulite_platform_driver); - ulite_of_unregister(); uart_unregister_driver(&ulite_uart_driver); } diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c index 3f4848e2174..ff51dae1df0 100644 --- a/drivers/tty/serial/ucc_uart.c +++ b/drivers/tty/serial/ucc_uart.c @@ -1194,8 +1194,7 @@ static void uart_firmware_cont(const struct firmware *fw, void *context) release_firmware(fw); } -static int ucc_uart_probe(struct platform_device *ofdev, - const struct of_device_id *match) +static int ucc_uart_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; const unsigned int *iprop; /* Integer OF properties */ @@ -1485,7 +1484,7 @@ static struct of_device_id ucc_uart_match[] = { }; MODULE_DEVICE_TABLE(of, ucc_uart_match); -static struct of_platform_driver ucc_uart_of_driver = { +static struct platform_driver ucc_uart_of_driver = { .driver = { .name = "ucc_uart", .owner = THIS_MODULE, @@ -1510,7 +1509,7 @@ static int __init ucc_uart_init(void) return ret; } - ret = of_register_platform_driver(&ucc_uart_of_driver); + ret = platform_driver_register(&ucc_uart_of_driver); if (ret) printk(KERN_ERR "ucc-uart: could not register platform driver\n"); @@ -1523,7 +1522,7 @@ static void __exit ucc_uart_exit(void) printk(KERN_INFO "Freescale QUICC Engine UART device driver unloading\n"); - of_unregister_platform_driver(&ucc_uart_of_driver); + platform_driver_unregister(&ucc_uart_of_driver); uart_unregister_driver(&ucc_uart_driver); } diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c new file mode 100644 index 00000000000..18888d005a0 --- /dev/null +++ b/drivers/tty/synclink.c @@ -0,0 +1,8119 @@ +/* + * linux/drivers/char/synclink.c + * + * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $ + * + * Device driver for Microgate SyncLink ISA and PCI + * high speed multiprotocol serial adapters. + * + * written by Paul Fulghum for Microgate Corporation + * paulkf@microgate.com + * + * Microgate and SyncLink are trademarks of Microgate Corporation + * + * Derived from serial.c written by Theodore Ts'o and Linus Torvalds + * + * Original release 01/11/99 + * + * This code is released under the GNU General Public License (GPL) + * + * This driver is primarily intended for use in synchronous + * HDLC mode. Asynchronous mode is also provided. + * + * When operating in synchronous mode, each call to mgsl_write() + * contains exactly one complete HDLC frame. Calling mgsl_put_char + * will start assembling an HDLC frame that will not be sent until + * mgsl_flush_chars or mgsl_write is called. + * + * Synchronous receive data is reported as complete frames. To accomplish + * this, the TTY flip buffer is bypassed (too small to hold largest + * frame and may fragment frames) and the line discipline + * receive entry point is called directly. + * + * This driver has been tested with a slightly modified ppp.c driver + * for synchronous PPP. + * + * 2000/02/16 + * Added interface for syncppp.c driver (an alternate synchronous PPP + * implementation that also supports Cisco HDLC). Each device instance + * registers as a tty device AND a network device (if dosyncppp option + * is set for the device). The functionality is determined by which + * device interface is opened. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#if defined(__i386__) +# define BREAKPOINT() asm(" int $3"); +#else +# define BREAKPOINT() { } +#endif + +#define MAX_ISA_DEVICES 10 +#define MAX_PCI_DEVICES 10 +#define MAX_TOTAL_DEVICES 20 + +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/timer.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/serial.h> +#include <linux/major.h> +#include <linux/string.h> +#include <linux/fcntl.h> +#include <linux/ptrace.h> +#include <linux/ioport.h> +#include <linux/mm.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> +#include <linux/init.h> +#include <linux/ioctl.h> +#include <linux/synclink.h> + +#include <asm/system.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/dma.h> +#include <linux/bitops.h> +#include <asm/types.h> +#include <linux/termios.h> +#include <linux/workqueue.h> +#include <linux/hdlc.h> +#include <linux/dma-mapping.h> + +#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE)) +#define SYNCLINK_GENERIC_HDLC 1 +#else +#define SYNCLINK_GENERIC_HDLC 0 +#endif + +#define GET_USER(error,value,addr) error = get_user(value,addr) +#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0 +#define PUT_USER(error,value,addr) error = put_user(value,addr) +#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 + +#include <asm/uaccess.h> + +#define RCLRVALUE 0xffff + +static MGSL_PARAMS default_params = { + MGSL_MODE_HDLC, /* unsigned long mode */ + 0, /* unsigned char loopback; */ + HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */ + HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */ + 0, /* unsigned long clock_speed; */ + 0xff, /* unsigned char addr_filter; */ + HDLC_CRC_16_CCITT, /* unsigned short crc_type; */ + HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */ + HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */ + 9600, /* unsigned long data_rate; */ + 8, /* unsigned char data_bits; */ + 1, /* unsigned char stop_bits; */ + ASYNC_PARITY_NONE /* unsigned char parity; */ +}; + +#define SHARED_MEM_ADDRESS_SIZE 0x40000 +#define BUFFERLISTSIZE 4096 +#define DMABUFFERSIZE 4096 +#define MAXRXFRAMES 7 + +typedef struct _DMABUFFERENTRY +{ + u32 phys_addr; /* 32-bit flat physical address of data buffer */ + volatile u16 count; /* buffer size/data count */ + volatile u16 status; /* Control/status field */ + volatile u16 rcc; /* character count field */ + u16 reserved; /* padding required by 16C32 */ + u32 link; /* 32-bit flat link to next buffer entry */ + char *virt_addr; /* virtual address of data buffer */ + u32 phys_entry; /* physical address of this buffer entry */ + dma_addr_t dma_addr; +} DMABUFFERENTRY, *DMAPBUFFERENTRY; + +/* The queue of BH actions to be performed */ + +#define BH_RECEIVE 1 +#define BH_TRANSMIT 2 +#define BH_STATUS 4 + +#define IO_PIN_SHUTDOWN_LIMIT 100 + +struct _input_signal_events { + int ri_up; + int ri_down; + int dsr_up; + int dsr_down; + int dcd_up; + int dcd_down; + int cts_up; + int cts_down; +}; + +/* transmit holding buffer definitions*/ +#define MAX_TX_HOLDING_BUFFERS 5 +struct tx_holding_buffer { + int buffer_size; + unsigned char * buffer; +}; + + +/* + * Device instance data structure + */ + +struct mgsl_struct { + int magic; + struct tty_port port; + int line; + int hw_version; + + struct mgsl_icount icount; + + int timeout; + int x_char; /* xon/xoff character */ + u16 read_status_mask; + u16 ignore_status_mask; + unsigned char *xmit_buf; + int xmit_head; + int xmit_tail; + int xmit_cnt; + + wait_queue_head_t status_event_wait_q; + wait_queue_head_t event_wait_q; + struct timer_list tx_timer; /* HDLC transmit timeout timer */ + struct mgsl_struct *next_device; /* device list link */ + + spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */ + struct work_struct task; /* task structure for scheduling bh */ + + u32 EventMask; /* event trigger mask */ + u32 RecordedEvents; /* pending events */ + + u32 max_frame_size; /* as set by device config */ + + u32 pending_bh; + + bool bh_running; /* Protection from multiple */ + int isr_overflow; + bool bh_requested; + + int dcd_chkcount; /* check counts to prevent */ + int cts_chkcount; /* too many IRQs if a signal */ + int dsr_chkcount; /* is floating */ + int ri_chkcount; + + char *buffer_list; /* virtual address of Rx & Tx buffer lists */ + u32 buffer_list_phys; + dma_addr_t buffer_list_dma_addr; + + unsigned int rx_buffer_count; /* count of total allocated Rx buffers */ + DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */ + unsigned int current_rx_buffer; + + int num_tx_dma_buffers; /* number of tx dma frames required */ + int tx_dma_buffers_used; + unsigned int tx_buffer_count; /* count of total allocated Tx buffers */ + DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */ + int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */ + int current_tx_buffer; /* next tx dma buffer to be loaded */ + + unsigned char *intermediate_rxbuffer; + + int num_tx_holding_buffers; /* number of tx holding buffer allocated */ + int get_tx_holding_index; /* next tx holding buffer for adapter to load */ + int put_tx_holding_index; /* next tx holding buffer to store user request */ + int tx_holding_count; /* number of tx holding buffers waiting */ + struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS]; + + bool rx_enabled; + bool rx_overflow; + bool rx_rcc_underrun; + + bool tx_enabled; + bool tx_active; + u32 idle_mode; + + u16 cmr_value; + u16 tcsr_value; + + char device_name[25]; /* device instance name */ + + unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */ + unsigned char bus; /* expansion bus number (zero based) */ + unsigned char function; /* PCI device number */ + + unsigned int io_base; /* base I/O address of adapter */ + unsigned int io_addr_size; /* size of the I/O address range */ + bool io_addr_requested; /* true if I/O address requested */ + + unsigned int irq_level; /* interrupt level */ + unsigned long irq_flags; + bool irq_requested; /* true if IRQ requested */ + + unsigned int dma_level; /* DMA channel */ + bool dma_requested; /* true if dma channel requested */ + + u16 mbre_bit; + u16 loopback_bits; + u16 usc_idle_mode; + + MGSL_PARAMS params; /* communications parameters */ + + unsigned char serial_signals; /* current serial signal states */ + + bool irq_occurred; /* for diagnostics use */ + unsigned int init_error; /* Initialization startup error (DIAGS) */ + int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */ + + u32 last_mem_alloc; + unsigned char* memory_base; /* shared memory address (PCI only) */ + u32 phys_memory_base; + bool shared_mem_requested; + + unsigned char* lcr_base; /* local config registers (PCI only) */ + u32 phys_lcr_base; + u32 lcr_offset; + bool lcr_mem_requested; + + u32 misc_ctrl_value; + char flag_buf[MAX_ASYNC_BUFFER_SIZE]; + char char_buf[MAX_ASYNC_BUFFER_SIZE]; + bool drop_rts_on_tx_done; + + bool loopmode_insert_requested; + bool loopmode_send_done_requested; + + struct _input_signal_events input_signal_events; + + /* generic HDLC device parts */ + int netcount; + spinlock_t netlock; + +#if SYNCLINK_GENERIC_HDLC + struct net_device *netdev; +#endif +}; + +#define MGSL_MAGIC 0x5401 + +/* + * The size of the serial xmit buffer is 1 page, or 4096 bytes + */ +#ifndef SERIAL_XMIT_SIZE +#define SERIAL_XMIT_SIZE 4096 +#endif + +/* + * These macros define the offsets used in calculating the + * I/O address of the specified USC registers. + */ + + +#define DCPIN 2 /* Bit 1 of I/O address */ +#define SDPIN 4 /* Bit 2 of I/O address */ + +#define DCAR 0 /* DMA command/address register */ +#define CCAR SDPIN /* channel command/address register */ +#define DATAREG DCPIN + SDPIN /* serial data register */ +#define MSBONLY 0x41 +#define LSBONLY 0x40 + +/* + * These macros define the register address (ordinal number) + * used for writing address/value pairs to the USC. + */ + +#define CMR 0x02 /* Channel mode Register */ +#define CCSR 0x04 /* Channel Command/status Register */ +#define CCR 0x06 /* Channel Control Register */ +#define PSR 0x08 /* Port status Register */ +#define PCR 0x0a /* Port Control Register */ +#define TMDR 0x0c /* Test mode Data Register */ +#define TMCR 0x0e /* Test mode Control Register */ +#define CMCR 0x10 /* Clock mode Control Register */ +#define HCR 0x12 /* Hardware Configuration Register */ +#define IVR 0x14 /* Interrupt Vector Register */ +#define IOCR 0x16 /* Input/Output Control Register */ +#define ICR 0x18 /* Interrupt Control Register */ +#define DCCR 0x1a /* Daisy Chain Control Register */ +#define MISR 0x1c /* Misc Interrupt status Register */ +#define SICR 0x1e /* status Interrupt Control Register */ +#define RDR 0x20 /* Receive Data Register */ +#define RMR 0x22 /* Receive mode Register */ +#define RCSR 0x24 /* Receive Command/status Register */ +#define RICR 0x26 /* Receive Interrupt Control Register */ +#define RSR 0x28 /* Receive Sync Register */ +#define RCLR 0x2a /* Receive count Limit Register */ +#define RCCR 0x2c /* Receive Character count Register */ +#define TC0R 0x2e /* Time Constant 0 Register */ +#define TDR 0x30 /* Transmit Data Register */ +#define TMR 0x32 /* Transmit mode Register */ +#define TCSR 0x34 /* Transmit Command/status Register */ +#define TICR 0x36 /* Transmit Interrupt Control Register */ +#define TSR 0x38 /* Transmit Sync Register */ +#define TCLR 0x3a /* Transmit count Limit Register */ +#define TCCR 0x3c /* Transmit Character count Register */ +#define TC1R 0x3e /* Time Constant 1 Register */ + + +/* + * MACRO DEFINITIONS FOR DMA REGISTERS + */ + +#define DCR 0x06 /* DMA Control Register (shared) */ +#define DACR 0x08 /* DMA Array count Register (shared) */ +#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */ +#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */ +#define DICR 0x18 /* DMA Interrupt Control Register (shared) */ +#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */ +#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */ + +#define TDMR 0x02 /* Transmit DMA mode Register */ +#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */ +#define TBCR 0x2a /* Transmit Byte count Register */ +#define TARL 0x2c /* Transmit Address Register (low) */ +#define TARU 0x2e /* Transmit Address Register (high) */ +#define NTBCR 0x3a /* Next Transmit Byte count Register */ +#define NTARL 0x3c /* Next Transmit Address Register (low) */ +#define NTARU 0x3e /* Next Transmit Address Register (high) */ + +#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */ +#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */ +#define RBCR 0xaa /* Receive Byte count Register */ +#define RARL 0xac /* Receive Address Register (low) */ +#define RARU 0xae /* Receive Address Register (high) */ +#define NRBCR 0xba /* Next Receive Byte count Register */ +#define NRARL 0xbc /* Next Receive Address Register (low) */ +#define NRARU 0xbe /* Next Receive Address Register (high) */ + + +/* + * MACRO DEFINITIONS FOR MODEM STATUS BITS + */ + +#define MODEMSTATUS_DTR 0x80 +#define MODEMSTATUS_DSR 0x40 +#define MODEMSTATUS_RTS 0x20 +#define MODEMSTATUS_CTS 0x10 +#define MODEMSTATUS_RI 0x04 +#define MODEMSTATUS_DCD 0x01 + + +/* + * Channel Command/Address Register (CCAR) Command Codes + */ + +#define RTCmd_Null 0x0000 +#define RTCmd_ResetHighestIus 0x1000 +#define RTCmd_TriggerChannelLoadDma 0x2000 +#define RTCmd_TriggerRxDma 0x2800 +#define RTCmd_TriggerTxDma 0x3000 +#define RTCmd_TriggerRxAndTxDma 0x3800 +#define RTCmd_PurgeRxFifo 0x4800 +#define RTCmd_PurgeTxFifo 0x5000 +#define RTCmd_PurgeRxAndTxFifo 0x5800 +#define RTCmd_LoadRcc 0x6800 +#define RTCmd_LoadTcc 0x7000 +#define RTCmd_LoadRccAndTcc 0x7800 +#define RTCmd_LoadTC0 0x8800 +#define RTCmd_LoadTC1 0x9000 +#define RTCmd_LoadTC0AndTC1 0x9800 +#define RTCmd_SerialDataLSBFirst 0xa000 +#define RTCmd_SerialDataMSBFirst 0xa800 +#define RTCmd_SelectBigEndian 0xb000 +#define RTCmd_SelectLittleEndian 0xb800 + + +/* + * DMA Command/Address Register (DCAR) Command Codes + */ + +#define DmaCmd_Null 0x0000 +#define DmaCmd_ResetTxChannel 0x1000 +#define DmaCmd_ResetRxChannel 0x1200 +#define DmaCmd_StartTxChannel 0x2000 +#define DmaCmd_StartRxChannel 0x2200 +#define DmaCmd_ContinueTxChannel 0x3000 +#define DmaCmd_ContinueRxChannel 0x3200 +#define DmaCmd_PauseTxChannel 0x4000 +#define DmaCmd_PauseRxChannel 0x4200 +#define DmaCmd_AbortTxChannel 0x5000 +#define DmaCmd_AbortRxChannel 0x5200 +#define DmaCmd_InitTxChannel 0x7000 +#define DmaCmd_InitRxChannel 0x7200 +#define DmaCmd_ResetHighestDmaIus 0x8000 +#define DmaCmd_ResetAllChannels 0x9000 +#define DmaCmd_StartAllChannels 0xa000 +#define DmaCmd_ContinueAllChannels 0xb000 +#define DmaCmd_PauseAllChannels 0xc000 +#define DmaCmd_AbortAllChannels 0xd000 +#define DmaCmd_InitAllChannels 0xf000 + +#define TCmd_Null 0x0000 +#define TCmd_ClearTxCRC 0x2000 +#define TCmd_SelectTicrTtsaData 0x4000 +#define TCmd_SelectTicrTxFifostatus 0x5000 +#define TCmd_SelectTicrIntLevel 0x6000 +#define TCmd_SelectTicrdma_level 0x7000 +#define TCmd_SendFrame 0x8000 +#define TCmd_SendAbort 0x9000 +#define TCmd_EnableDleInsertion 0xc000 +#define TCmd_DisableDleInsertion 0xd000 +#define TCmd_ClearEofEom 0xe000 +#define TCmd_SetEofEom 0xf000 + +#define RCmd_Null 0x0000 +#define RCmd_ClearRxCRC 0x2000 +#define RCmd_EnterHuntmode 0x3000 +#define RCmd_SelectRicrRtsaData 0x4000 +#define RCmd_SelectRicrRxFifostatus 0x5000 +#define RCmd_SelectRicrIntLevel 0x6000 +#define RCmd_SelectRicrdma_level 0x7000 + +/* + * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR) + */ + +#define RECEIVE_STATUS BIT5 +#define RECEIVE_DATA BIT4 +#define TRANSMIT_STATUS BIT3 +#define TRANSMIT_DATA BIT2 +#define IO_PIN BIT1 +#define MISC BIT0 + + +/* + * Receive status Bits in Receive Command/status Register RCSR + */ + +#define RXSTATUS_SHORT_FRAME BIT8 +#define RXSTATUS_CODE_VIOLATION BIT8 +#define RXSTATUS_EXITED_HUNT BIT7 +#define RXSTATUS_IDLE_RECEIVED BIT6 +#define RXSTATUS_BREAK_RECEIVED BIT5 +#define RXSTATUS_ABORT_RECEIVED BIT5 +#define RXSTATUS_RXBOUND BIT4 +#define RXSTATUS_CRC_ERROR BIT3 +#define RXSTATUS_FRAMING_ERROR BIT3 +#define RXSTATUS_ABORT BIT2 +#define RXSTATUS_PARITY_ERROR BIT2 +#define RXSTATUS_OVERRUN BIT1 +#define RXSTATUS_DATA_AVAILABLE BIT0 +#define RXSTATUS_ALL 0x01f6 +#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) ) + +/* + * Values for setting transmit idle mode in + * Transmit Control/status Register (TCSR) + */ +#define IDLEMODE_FLAGS 0x0000 +#define IDLEMODE_ALT_ONE_ZERO 0x0100 +#define IDLEMODE_ZERO 0x0200 +#define IDLEMODE_ONE 0x0300 +#define IDLEMODE_ALT_MARK_SPACE 0x0500 +#define IDLEMODE_SPACE 0x0600 +#define IDLEMODE_MARK 0x0700 +#define IDLEMODE_MASK 0x0700 + +/* + * IUSC revision identifiers + */ +#define IUSC_SL1660 0x4d44 +#define IUSC_PRE_SL1660 0x4553 + +/* + * Transmit status Bits in Transmit Command/status Register (TCSR) + */ + +#define TCSR_PRESERVE 0x0F00 + +#define TCSR_UNDERWAIT BIT11 +#define TXSTATUS_PREAMBLE_SENT BIT7 +#define TXSTATUS_IDLE_SENT BIT6 +#define TXSTATUS_ABORT_SENT BIT5 +#define TXSTATUS_EOF_SENT BIT4 +#define TXSTATUS_EOM_SENT BIT4 +#define TXSTATUS_CRC_SENT BIT3 +#define TXSTATUS_ALL_SENT BIT2 +#define TXSTATUS_UNDERRUN BIT1 +#define TXSTATUS_FIFO_EMPTY BIT0 +#define TXSTATUS_ALL 0x00fa +#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) ) + + +#define MISCSTATUS_RXC_LATCHED BIT15 +#define MISCSTATUS_RXC BIT14 +#define MISCSTATUS_TXC_LATCHED BIT13 +#define MISCSTATUS_TXC BIT12 +#define MISCSTATUS_RI_LATCHED BIT11 +#define MISCSTATUS_RI BIT10 +#define MISCSTATUS_DSR_LATCHED BIT9 +#define MISCSTATUS_DSR BIT8 +#define MISCSTATUS_DCD_LATCHED BIT7 +#define MISCSTATUS_DCD BIT6 +#define MISCSTATUS_CTS_LATCHED BIT5 +#define MISCSTATUS_CTS BIT4 +#define MISCSTATUS_RCC_UNDERRUN BIT3 +#define MISCSTATUS_DPLL_NO_SYNC BIT2 +#define MISCSTATUS_BRG1_ZERO BIT1 +#define MISCSTATUS_BRG0_ZERO BIT0 + +#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0)) +#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f)) + +#define SICR_RXC_ACTIVE BIT15 +#define SICR_RXC_INACTIVE BIT14 +#define SICR_RXC (BIT15+BIT14) +#define SICR_TXC_ACTIVE BIT13 +#define SICR_TXC_INACTIVE BIT12 +#define SICR_TXC (BIT13+BIT12) +#define SICR_RI_ACTIVE BIT11 +#define SICR_RI_INACTIVE BIT10 +#define SICR_RI (BIT11+BIT10) +#define SICR_DSR_ACTIVE BIT9 +#define SICR_DSR_INACTIVE BIT8 +#define SICR_DSR (BIT9+BIT8) +#define SICR_DCD_ACTIVE BIT7 +#define SICR_DCD_INACTIVE BIT6 +#define SICR_DCD (BIT7+BIT6) +#define SICR_CTS_ACTIVE BIT5 +#define SICR_CTS_INACTIVE BIT4 +#define SICR_CTS (BIT5+BIT4) +#define SICR_RCC_UNDERFLOW BIT3 +#define SICR_DPLL_NO_SYNC BIT2 +#define SICR_BRG1_ZERO BIT1 +#define SICR_BRG0_ZERO BIT0 + +void usc_DisableMasterIrqBit( struct mgsl_struct *info ); +void usc_EnableMasterIrqBit( struct mgsl_struct *info ); +void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask ); +void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask ); +void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask ); + +#define usc_EnableInterrupts( a, b ) \ + usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) ) + +#define usc_DisableInterrupts( a, b ) \ + usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) ) + +#define usc_EnableMasterIrqBit(a) \ + usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) ) + +#define usc_DisableMasterIrqBit(a) \ + usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) ) + +#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) ) + +/* + * Transmit status Bits in Transmit Control status Register (TCSR) + * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) + */ + +#define TXSTATUS_PREAMBLE_SENT BIT7 +#define TXSTATUS_IDLE_SENT BIT6 +#define TXSTATUS_ABORT_SENT BIT5 +#define TXSTATUS_EOF BIT4 +#define TXSTATUS_CRC_SENT BIT3 +#define TXSTATUS_ALL_SENT BIT2 +#define TXSTATUS_UNDERRUN BIT1 +#define TXSTATUS_FIFO_EMPTY BIT0 + +#define DICR_MASTER BIT15 +#define DICR_TRANSMIT BIT0 +#define DICR_RECEIVE BIT1 + +#define usc_EnableDmaInterrupts(a,b) \ + usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) ) + +#define usc_DisableDmaInterrupts(a,b) \ + usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) ) + +#define usc_EnableStatusIrqs(a,b) \ + usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) ) + +#define usc_DisablestatusIrqs(a,b) \ + usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) ) + +/* Transmit status Bits in Transmit Control status Register (TCSR) */ +/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */ + + +#define DISABLE_UNCONDITIONAL 0 +#define DISABLE_END_OF_FRAME 1 +#define ENABLE_UNCONDITIONAL 2 +#define ENABLE_AUTO_CTS 3 +#define ENABLE_AUTO_DCD 3 +#define usc_EnableTransmitter(a,b) \ + usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) ) +#define usc_EnableReceiver(a,b) \ + usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) ) + +static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port ); +static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value ); +static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ); + +static u16 usc_InReg( struct mgsl_struct *info, u16 Port ); +static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value ); +static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ); +void usc_RCmd( struct mgsl_struct *info, u16 Cmd ); +void usc_TCmd( struct mgsl_struct *info, u16 Cmd ); + +#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b))) +#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b)) + +#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1)) + +static void usc_process_rxoverrun_sync( struct mgsl_struct *info ); +static void usc_start_receiver( struct mgsl_struct *info ); +static void usc_stop_receiver( struct mgsl_struct *info ); + +static void usc_start_transmitter( struct mgsl_struct *info ); +static void usc_stop_transmitter( struct mgsl_struct *info ); +static void usc_set_txidle( struct mgsl_struct *info ); +static void usc_load_txfifo( struct mgsl_struct *info ); + +static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate ); +static void usc_enable_loopback( struct mgsl_struct *info, int enable ); + +static void usc_get_serial_signals( struct mgsl_struct *info ); +static void usc_set_serial_signals( struct mgsl_struct *info ); + +static void usc_reset( struct mgsl_struct *info ); + +static void usc_set_sync_mode( struct mgsl_struct *info ); +static void usc_set_sdlc_mode( struct mgsl_struct *info ); +static void usc_set_async_mode( struct mgsl_struct *info ); +static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate ); + +static void usc_loopback_frame( struct mgsl_struct *info ); + +static void mgsl_tx_timeout(unsigned long context); + + +static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ); +static void usc_loopmode_insert_request( struct mgsl_struct * info ); +static int usc_loopmode_active( struct mgsl_struct * info); +static void usc_loopmode_send_done( struct mgsl_struct * info ); + +static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg); + +#if SYNCLINK_GENERIC_HDLC +#define dev_to_port(D) (dev_to_hdlc(D)->priv) +static void hdlcdev_tx_done(struct mgsl_struct *info); +static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size); +static int hdlcdev_init(struct mgsl_struct *info); +static void hdlcdev_exit(struct mgsl_struct *info); +#endif + +/* + * Defines a BUS descriptor value for the PCI adapter + * local bus address ranges. + */ + +#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \ +(0x00400020 + \ +((WrHold) << 30) + \ +((WrDly) << 28) + \ +((RdDly) << 26) + \ +((Nwdd) << 20) + \ +((Nwad) << 15) + \ +((Nxda) << 13) + \ +((Nrdd) << 11) + \ +((Nrad) << 6) ) + +static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit); + +/* + * Adapter diagnostic routines + */ +static bool mgsl_register_test( struct mgsl_struct *info ); +static bool mgsl_irq_test( struct mgsl_struct *info ); +static bool mgsl_dma_test( struct mgsl_struct *info ); +static bool mgsl_memory_test( struct mgsl_struct *info ); +static int mgsl_adapter_test( struct mgsl_struct *info ); + +/* + * device and resource management routines + */ +static int mgsl_claim_resources(struct mgsl_struct *info); +static void mgsl_release_resources(struct mgsl_struct *info); +static void mgsl_add_device(struct mgsl_struct *info); +static struct mgsl_struct* mgsl_allocate_device(void); + +/* + * DMA buffer manupulation functions. + */ +static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ); +static bool mgsl_get_rx_frame( struct mgsl_struct *info ); +static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info ); +static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ); +static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ); +static int num_free_tx_dma_buffers(struct mgsl_struct *info); +static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize); +static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count); + +/* + * DMA and Shared Memory buffer allocation and formatting + */ +static int mgsl_allocate_dma_buffers(struct mgsl_struct *info); +static void mgsl_free_dma_buffers(struct mgsl_struct *info); +static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); +static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); +static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info); +static void mgsl_free_buffer_list_memory(struct mgsl_struct *info); +static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info); +static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info); +static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info); +static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info); +static bool load_next_tx_holding_buffer(struct mgsl_struct *info); +static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize); + +/* + * Bottom half interrupt handlers + */ +static void mgsl_bh_handler(struct work_struct *work); +static void mgsl_bh_receive(struct mgsl_struct *info); +static void mgsl_bh_transmit(struct mgsl_struct *info); +static void mgsl_bh_status(struct mgsl_struct *info); + +/* + * Interrupt handler routines and dispatch table. + */ +static void mgsl_isr_null( struct mgsl_struct *info ); +static void mgsl_isr_transmit_data( struct mgsl_struct *info ); +static void mgsl_isr_receive_data( struct mgsl_struct *info ); +static void mgsl_isr_receive_status( struct mgsl_struct *info ); +static void mgsl_isr_transmit_status( struct mgsl_struct *info ); +static void mgsl_isr_io_pin( struct mgsl_struct *info ); +static void mgsl_isr_misc( struct mgsl_struct *info ); +static void mgsl_isr_receive_dma( struct mgsl_struct *info ); +static void mgsl_isr_transmit_dma( struct mgsl_struct *info ); + +typedef void (*isr_dispatch_func)(struct mgsl_struct *); + +static isr_dispatch_func UscIsrTable[7] = +{ + mgsl_isr_null, + mgsl_isr_misc, + mgsl_isr_io_pin, + mgsl_isr_transmit_data, + mgsl_isr_transmit_status, + mgsl_isr_receive_data, + mgsl_isr_receive_status +}; + +/* + * ioctl call handlers + */ +static int tiocmget(struct tty_struct *tty); +static int tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear); +static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount + __user *user_icount); +static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params); +static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params); +static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode); +static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode); +static int mgsl_txenable(struct mgsl_struct * info, int enable); +static int mgsl_txabort(struct mgsl_struct * info); +static int mgsl_rxenable(struct mgsl_struct * info, int enable); +static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask); +static int mgsl_loopmode_send_done( struct mgsl_struct * info ); + +/* set non-zero on successful registration with PCI subsystem */ +static bool pci_registered; + +/* + * Global linked list of SyncLink devices + */ +static struct mgsl_struct *mgsl_device_list; +static int mgsl_device_count; + +/* + * Set this param to non-zero to load eax with the + * .text section address and breakpoint on module load. + * This is useful for use with gdb and add-symbol-file command. + */ +static int break_on_load; + +/* + * Driver major number, defaults to zero to get auto + * assigned major number. May be forced as module parameter. + */ +static int ttymajor; + +/* + * Array of user specified options for ISA adapters. + */ +static int io[MAX_ISA_DEVICES]; +static int irq[MAX_ISA_DEVICES]; +static int dma[MAX_ISA_DEVICES]; +static int debug_level; +static int maxframe[MAX_TOTAL_DEVICES]; +static int txdmabufs[MAX_TOTAL_DEVICES]; +static int txholdbufs[MAX_TOTAL_DEVICES]; + +module_param(break_on_load, bool, 0); +module_param(ttymajor, int, 0); +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(dma, int, NULL, 0); +module_param(debug_level, int, 0); +module_param_array(maxframe, int, NULL, 0); +module_param_array(txdmabufs, int, NULL, 0); +module_param_array(txholdbufs, int, NULL, 0); + +static char *driver_name = "SyncLink serial driver"; +static char *driver_version = "$Revision: 4.38 $"; + +static int synclink_init_one (struct pci_dev *dev, + const struct pci_device_id *ent); +static void synclink_remove_one (struct pci_dev *dev); + +static struct pci_device_id synclink_pci_tbl[] = { + { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, }, + { 0, }, /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, synclink_pci_tbl); + +MODULE_LICENSE("GPL"); + +static struct pci_driver synclink_pci_driver = { + .name = "synclink", + .id_table = synclink_pci_tbl, + .probe = synclink_init_one, + .remove = __devexit_p(synclink_remove_one), +}; + +static struct tty_driver *serial_driver; + +/* number of characters left in xmit buffer before we ask for more */ +#define WAKEUP_CHARS 256 + + +static void mgsl_change_params(struct mgsl_struct *info); +static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout); + +/* + * 1st function defined in .text section. Calling this function in + * init_module() followed by a breakpoint allows a remote debugger + * (gdb) to get the .text address for the add-symbol-file command. + * This allows remote debugging of dynamically loadable modules. + */ +static void* mgsl_get_text_ptr(void) +{ + return mgsl_get_text_ptr; +} + +static inline int mgsl_paranoia_check(struct mgsl_struct *info, + char *name, const char *routine) +{ +#ifdef MGSL_PARANOIA_CHECK + static const char *badmagic = + "Warning: bad magic number for mgsl struct (%s) in %s\n"; + static const char *badinfo = + "Warning: null mgsl_struct for (%s) in %s\n"; + + if (!info) { + printk(badinfo, name, routine); + return 1; + } + if (info->magic != MGSL_MAGIC) { + printk(badmagic, name, routine); + return 1; + } +#else + if (!info) + return 1; +#endif + return 0; +} + +/** + * line discipline callback wrappers + * + * The wrappers maintain line discipline references + * while calling into the line discipline. + * + * ldisc_receive_buf - pass receive data to line discipline + */ + +static void ldisc_receive_buf(struct tty_struct *tty, + const __u8 *data, char *flags, int count) +{ + struct tty_ldisc *ld; + if (!tty) + return; + ld = tty_ldisc_ref(tty); + if (ld) { + if (ld->ops->receive_buf) + ld->ops->receive_buf(tty, data, flags, count); + tty_ldisc_deref(ld); + } +} + +/* mgsl_stop() throttle (stop) transmitter + * + * Arguments: tty pointer to tty info structure + * Return Value: None + */ +static void mgsl_stop(struct tty_struct *tty) +{ + struct mgsl_struct *info = tty->driver_data; + unsigned long flags; + + if (mgsl_paranoia_check(info, tty->name, "mgsl_stop")) + return; + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk("mgsl_stop(%s)\n",info->device_name); + + spin_lock_irqsave(&info->irq_spinlock,flags); + if (info->tx_enabled) + usc_stop_transmitter(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + +} /* end of mgsl_stop() */ + +/* mgsl_start() release (start) transmitter + * + * Arguments: tty pointer to tty info structure + * Return Value: None + */ +static void mgsl_start(struct tty_struct *tty) +{ + struct mgsl_struct *info = tty->driver_data; + unsigned long flags; + + if (mgsl_paranoia_check(info, tty->name, "mgsl_start")) + return; + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk("mgsl_start(%s)\n",info->device_name); + + spin_lock_irqsave(&info->irq_spinlock,flags); + if (!info->tx_enabled) + usc_start_transmitter(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + +} /* end of mgsl_start() */ + +/* + * Bottom half work queue access functions + */ + +/* mgsl_bh_action() Return next bottom half action to perform. + * Return Value: BH action code or 0 if nothing to do. + */ +static int mgsl_bh_action(struct mgsl_struct *info) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&info->irq_spinlock,flags); + + if (info->pending_bh & BH_RECEIVE) { + info->pending_bh &= ~BH_RECEIVE; + rc = BH_RECEIVE; + } else if (info->pending_bh & BH_TRANSMIT) { + info->pending_bh &= ~BH_TRANSMIT; + rc = BH_TRANSMIT; + } else if (info->pending_bh & BH_STATUS) { + info->pending_bh &= ~BH_STATUS; + rc = BH_STATUS; + } + + if (!rc) { + /* Mark BH routine as complete */ + info->bh_running = false; + info->bh_requested = false; + } + + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + return rc; +} + +/* + * Perform bottom half processing of work items queued by ISR. + */ +static void mgsl_bh_handler(struct work_struct *work) +{ + struct mgsl_struct *info = + container_of(work, struct mgsl_struct, task); + int action; + + if (!info) + return; + + if ( debug_level >= DEBUG_LEVEL_BH ) + printk( "%s(%d):mgsl_bh_handler(%s) entry\n", + __FILE__,__LINE__,info->device_name); + + info->bh_running = true; + + while((action = mgsl_bh_action(info)) != 0) { + + /* Process work item */ + if ( debug_level >= DEBUG_LEVEL_BH ) + printk( "%s(%d):mgsl_bh_handler() work item action=%d\n", + __FILE__,__LINE__,action); + + switch (action) { + + case BH_RECEIVE: + mgsl_bh_receive(info); + break; + case BH_TRANSMIT: + mgsl_bh_transmit(info); + break; + case BH_STATUS: + mgsl_bh_status(info); + break; + default: + /* unknown work item ID */ + printk("Unknown work item ID=%08X!\n", action); + break; + } + } + + if ( debug_level >= DEBUG_LEVEL_BH ) + printk( "%s(%d):mgsl_bh_handler(%s) exit\n", + __FILE__,__LINE__,info->device_name); +} + +static void mgsl_bh_receive(struct mgsl_struct *info) +{ + bool (*get_rx_frame)(struct mgsl_struct *info) = + (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame); + + if ( debug_level >= DEBUG_LEVEL_BH ) + printk( "%s(%d):mgsl_bh_receive(%s)\n", + __FILE__,__LINE__,info->device_name); + + do + { + if (info->rx_rcc_underrun) { + unsigned long flags; + spin_lock_irqsave(&info->irq_spinlock,flags); + usc_start_receiver(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + return; + } + } while(get_rx_frame(info)); +} + +static void mgsl_bh_transmit(struct mgsl_struct *info) +{ + struct tty_struct *tty = info->port.tty; + unsigned long flags; + + if ( debug_level >= DEBUG_LEVEL_BH ) + printk( "%s(%d):mgsl_bh_transmit() entry on %s\n", + __FILE__,__LINE__,info->device_name); + + if (tty) + tty_wakeup(tty); + + /* if transmitter idle and loopmode_send_done_requested + * then start echoing RxD to TxD + */ + spin_lock_irqsave(&info->irq_spinlock,flags); + if ( !info->tx_active && info->loopmode_send_done_requested ) + usc_loopmode_send_done( info ); + spin_unlock_irqrestore(&info->irq_spinlock,flags); +} + +static void mgsl_bh_status(struct mgsl_struct *info) +{ + if ( debug_level >= DEBUG_LEVEL_BH ) + printk( "%s(%d):mgsl_bh_status() entry on %s\n", + __FILE__,__LINE__,info->device_name); + + info->ri_chkcount = 0; + info->dsr_chkcount = 0; + info->dcd_chkcount = 0; + info->cts_chkcount = 0; +} + +/* mgsl_isr_receive_status() + * + * Service a receive status interrupt. The type of status + * interrupt is indicated by the state of the RCSR. + * This is only used for HDLC mode. + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void mgsl_isr_receive_status( struct mgsl_struct *info ) +{ + u16 status = usc_InReg( info, RCSR ); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):mgsl_isr_receive_status status=%04X\n", + __FILE__,__LINE__,status); + + if ( (status & RXSTATUS_ABORT_RECEIVED) && + info->loopmode_insert_requested && + usc_loopmode_active(info) ) + { + ++info->icount.rxabort; + info->loopmode_insert_requested = false; + + /* clear CMR:13 to start echoing RxD to TxD */ + info->cmr_value &= ~BIT13; + usc_OutReg(info, CMR, info->cmr_value); + + /* disable received abort irq (no longer required) */ + usc_OutReg(info, RICR, + (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED)); + } + + if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) { + if (status & RXSTATUS_EXITED_HUNT) + info->icount.exithunt++; + if (status & RXSTATUS_IDLE_RECEIVED) + info->icount.rxidle++; + wake_up_interruptible(&info->event_wait_q); + } + + if (status & RXSTATUS_OVERRUN){ + info->icount.rxover++; + usc_process_rxoverrun_sync( info ); + } + + usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); + usc_UnlatchRxstatusBits( info, status ); + +} /* end of mgsl_isr_receive_status() */ + +/* mgsl_isr_transmit_status() + * + * Service a transmit status interrupt + * HDLC mode :end of transmit frame + * Async mode:all data is sent + * transmit status is indicated by bits in the TCSR. + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void mgsl_isr_transmit_status( struct mgsl_struct *info ) +{ + u16 status = usc_InReg( info, TCSR ); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):mgsl_isr_transmit_status status=%04X\n", + __FILE__,__LINE__,status); + + usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); + usc_UnlatchTxstatusBits( info, status ); + + if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) ) + { + /* finished sending HDLC abort. This may leave */ + /* the TxFifo with data from the aborted frame */ + /* so purge the TxFifo. Also shutdown the DMA */ + /* channel in case there is data remaining in */ + /* the DMA buffer */ + usc_DmaCmd( info, DmaCmd_ResetTxChannel ); + usc_RTCmd( info, RTCmd_PurgeTxFifo ); + } + + if ( status & TXSTATUS_EOF_SENT ) + info->icount.txok++; + else if ( status & TXSTATUS_UNDERRUN ) + info->icount.txunder++; + else if ( status & TXSTATUS_ABORT_SENT ) + info->icount.txabort++; + else + info->icount.txunder++; + + info->tx_active = false; + info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; + del_timer(&info->tx_timer); + + if ( info->drop_rts_on_tx_done ) { + usc_get_serial_signals( info ); + if ( info->serial_signals & SerialSignal_RTS ) { + info->serial_signals &= ~SerialSignal_RTS; + usc_set_serial_signals( info ); + } + info->drop_rts_on_tx_done = false; + } + +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) + hdlcdev_tx_done(info); + else +#endif + { + if (info->port.tty->stopped || info->port.tty->hw_stopped) { + usc_stop_transmitter(info); + return; + } + info->pending_bh |= BH_TRANSMIT; + } + +} /* end of mgsl_isr_transmit_status() */ + +/* mgsl_isr_io_pin() + * + * Service an Input/Output pin interrupt. The type of + * interrupt is indicated by bits in the MISR + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void mgsl_isr_io_pin( struct mgsl_struct *info ) +{ + struct mgsl_icount *icount; + u16 status = usc_InReg( info, MISR ); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):mgsl_isr_io_pin status=%04X\n", + __FILE__,__LINE__,status); + + usc_ClearIrqPendingBits( info, IO_PIN ); + usc_UnlatchIostatusBits( info, status ); + + if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED | + MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) { + icount = &info->icount; + /* update input line counters */ + if (status & MISCSTATUS_RI_LATCHED) { + if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) + usc_DisablestatusIrqs(info,SICR_RI); + icount->rng++; + if ( status & MISCSTATUS_RI ) + info->input_signal_events.ri_up++; + else + info->input_signal_events.ri_down++; + } + if (status & MISCSTATUS_DSR_LATCHED) { + if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) + usc_DisablestatusIrqs(info,SICR_DSR); + icount->dsr++; + if ( status & MISCSTATUS_DSR ) + info->input_signal_events.dsr_up++; + else + info->input_signal_events.dsr_down++; + } + if (status & MISCSTATUS_DCD_LATCHED) { + if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) + usc_DisablestatusIrqs(info,SICR_DCD); + icount->dcd++; + if (status & MISCSTATUS_DCD) { + info->input_signal_events.dcd_up++; + } else + info->input_signal_events.dcd_down++; +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) { + if (status & MISCSTATUS_DCD) + netif_carrier_on(info->netdev); + else + netif_carrier_off(info->netdev); + } +#endif + } + if (status & MISCSTATUS_CTS_LATCHED) + { + if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) + usc_DisablestatusIrqs(info,SICR_CTS); + icount->cts++; + if ( status & MISCSTATUS_CTS ) + info->input_signal_events.cts_up++; + else + info->input_signal_events.cts_down++; + } + wake_up_interruptible(&info->status_event_wait_q); + wake_up_interruptible(&info->event_wait_q); + + if ( (info->port.flags & ASYNC_CHECK_CD) && + (status & MISCSTATUS_DCD_LATCHED) ) { + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s CD now %s...", info->device_name, + (status & MISCSTATUS_DCD) ? "on" : "off"); + if (status & MISCSTATUS_DCD) + wake_up_interruptible(&info->port.open_wait); + else { + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("doing serial hangup..."); + if (info->port.tty) + tty_hangup(info->port.tty); + } + } + + if ( (info->port.flags & ASYNC_CTS_FLOW) && + (status & MISCSTATUS_CTS_LATCHED) ) { + if (info->port.tty->hw_stopped) { + if (status & MISCSTATUS_CTS) { + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("CTS tx start..."); + if (info->port.tty) + info->port.tty->hw_stopped = 0; + usc_start_transmitter(info); + info->pending_bh |= BH_TRANSMIT; + return; + } + } else { + if (!(status & MISCSTATUS_CTS)) { + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("CTS tx stop..."); + if (info->port.tty) + info->port.tty->hw_stopped = 1; + usc_stop_transmitter(info); + } + } + } + } + + info->pending_bh |= BH_STATUS; + + /* for diagnostics set IRQ flag */ + if ( status & MISCSTATUS_TXC_LATCHED ){ + usc_OutReg( info, SICR, + (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) ); + usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED ); + info->irq_occurred = true; + } + +} /* end of mgsl_isr_io_pin() */ + +/* mgsl_isr_transmit_data() + * + * Service a transmit data interrupt (async mode only). + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void mgsl_isr_transmit_data( struct mgsl_struct *info ) +{ + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n", + __FILE__,__LINE__,info->xmit_cnt); + + usc_ClearIrqPendingBits( info, TRANSMIT_DATA ); + + if (info->port.tty->stopped || info->port.tty->hw_stopped) { + usc_stop_transmitter(info); + return; + } + + if ( info->xmit_cnt ) + usc_load_txfifo( info ); + else + info->tx_active = false; + + if (info->xmit_cnt < WAKEUP_CHARS) + info->pending_bh |= BH_TRANSMIT; + +} /* end of mgsl_isr_transmit_data() */ + +/* mgsl_isr_receive_data() + * + * Service a receive data interrupt. This occurs + * when operating in asynchronous interrupt transfer mode. + * The receive data FIFO is flushed to the receive data buffers. + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void mgsl_isr_receive_data( struct mgsl_struct *info ) +{ + int Fifocount; + u16 status; + int work = 0; + unsigned char DataByte; + struct tty_struct *tty = info->port.tty; + struct mgsl_icount *icount = &info->icount; + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):mgsl_isr_receive_data\n", + __FILE__,__LINE__); + + usc_ClearIrqPendingBits( info, RECEIVE_DATA ); + + /* select FIFO status for RICR readback */ + usc_RCmd( info, RCmd_SelectRicrRxFifostatus ); + + /* clear the Wordstatus bit so that status readback */ + /* only reflects the status of this byte */ + usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 )); + + /* flush the receive FIFO */ + + while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) { + int flag; + + /* read one byte from RxFIFO */ + outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY), + info->io_base + CCAR ); + DataByte = inb( info->io_base + CCAR ); + + /* get the status of the received byte */ + status = usc_InReg(info, RCSR); + if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR + + RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) + usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); + + icount->rx++; + + flag = 0; + if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR + + RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) { + printk("rxerr=%04X\n",status); + /* update error statistics */ + if ( status & RXSTATUS_BREAK_RECEIVED ) { + status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR); + icount->brk++; + } else if (status & RXSTATUS_PARITY_ERROR) + icount->parity++; + else if (status & RXSTATUS_FRAMING_ERROR) + icount->frame++; + else if (status & RXSTATUS_OVERRUN) { + /* must issue purge fifo cmd before */ + /* 16C32 accepts more receive chars */ + usc_RTCmd(info,RTCmd_PurgeRxFifo); + icount->overrun++; + } + + /* discard char if tty control flags say so */ + if (status & info->ignore_status_mask) + continue; + + status &= info->read_status_mask; + + if (status & RXSTATUS_BREAK_RECEIVED) { + flag = TTY_BREAK; + if (info->port.flags & ASYNC_SAK) + do_SAK(tty); + } else if (status & RXSTATUS_PARITY_ERROR) + flag = TTY_PARITY; + else if (status & RXSTATUS_FRAMING_ERROR) + flag = TTY_FRAME; + } /* end of if (error) */ + tty_insert_flip_char(tty, DataByte, flag); + if (status & RXSTATUS_OVERRUN) { + /* Overrun is special, since it's + * reported immediately, and doesn't + * affect the current character + */ + work += tty_insert_flip_char(tty, 0, TTY_OVERRUN); + } + } + + if ( debug_level >= DEBUG_LEVEL_ISR ) { + printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n", + __FILE__,__LINE__,icount->rx,icount->brk, + icount->parity,icount->frame,icount->overrun); + } + + if(work) + tty_flip_buffer_push(tty); +} + +/* mgsl_isr_misc() + * + * Service a miscellaneous interrupt source. + * + * Arguments: info pointer to device extension (instance data) + * Return Value: None + */ +static void mgsl_isr_misc( struct mgsl_struct *info ) +{ + u16 status = usc_InReg( info, MISR ); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):mgsl_isr_misc status=%04X\n", + __FILE__,__LINE__,status); + + if ((status & MISCSTATUS_RCC_UNDERRUN) && + (info->params.mode == MGSL_MODE_HDLC)) { + + /* turn off receiver and rx DMA */ + usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); + usc_DmaCmd(info, DmaCmd_ResetRxChannel); + usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); + usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS); + usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS); + + /* schedule BH handler to restart receiver */ + info->pending_bh |= BH_RECEIVE; + info->rx_rcc_underrun = true; + } + + usc_ClearIrqPendingBits( info, MISC ); + usc_UnlatchMiscstatusBits( info, status ); + +} /* end of mgsl_isr_misc() */ + +/* mgsl_isr_null() + * + * Services undefined interrupt vectors from the + * USC. (hence this function SHOULD never be called) + * + * Arguments: info pointer to device extension (instance data) + * Return Value: None + */ +static void mgsl_isr_null( struct mgsl_struct *info ) +{ + +} /* end of mgsl_isr_null() */ + +/* mgsl_isr_receive_dma() + * + * Service a receive DMA channel interrupt. + * For this driver there are two sources of receive DMA interrupts + * as identified in the Receive DMA mode Register (RDMR): + * + * BIT3 EOA/EOL End of List, all receive buffers in receive + * buffer list have been filled (no more free buffers + * available). The DMA controller has shut down. + * + * BIT2 EOB End of Buffer. This interrupt occurs when a receive + * DMA buffer is terminated in response to completion + * of a good frame or a frame with errors. The status + * of the frame is stored in the buffer entry in the + * list of receive buffer entries. + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void mgsl_isr_receive_dma( struct mgsl_struct *info ) +{ + u16 status; + + /* clear interrupt pending and IUS bit for Rx DMA IRQ */ + usc_OutDmaReg( info, CDIR, BIT9+BIT1 ); + + /* Read the receive DMA status to identify interrupt type. */ + /* This also clears the status bits. */ + status = usc_InDmaReg( info, RDMR ); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n", + __FILE__,__LINE__,info->device_name,status); + + info->pending_bh |= BH_RECEIVE; + + if ( status & BIT3 ) { + info->rx_overflow = true; + info->icount.buf_overrun++; + } + +} /* end of mgsl_isr_receive_dma() */ + +/* mgsl_isr_transmit_dma() + * + * This function services a transmit DMA channel interrupt. + * + * For this driver there is one source of transmit DMA interrupts + * as identified in the Transmit DMA Mode Register (TDMR): + * + * BIT2 EOB End of Buffer. This interrupt occurs when a + * transmit DMA buffer has been emptied. + * + * The driver maintains enough transmit DMA buffers to hold at least + * one max frame size transmit frame. When operating in a buffered + * transmit mode, there may be enough transmit DMA buffers to hold at + * least two or more max frame size frames. On an EOB condition, + * determine if there are any queued transmit buffers and copy into + * transmit DMA buffers if we have room. + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void mgsl_isr_transmit_dma( struct mgsl_struct *info ) +{ + u16 status; + + /* clear interrupt pending and IUS bit for Tx DMA IRQ */ + usc_OutDmaReg(info, CDIR, BIT8+BIT0 ); + + /* Read the transmit DMA status to identify interrupt type. */ + /* This also clears the status bits. */ + + status = usc_InDmaReg( info, TDMR ); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n", + __FILE__,__LINE__,info->device_name,status); + + if ( status & BIT2 ) { + --info->tx_dma_buffers_used; + + /* if there are transmit frames queued, + * try to load the next one + */ + if ( load_next_tx_holding_buffer(info) ) { + /* if call returns non-zero value, we have + * at least one free tx holding buffer + */ + info->pending_bh |= BH_TRANSMIT; + } + } + +} /* end of mgsl_isr_transmit_dma() */ + +/* mgsl_interrupt() + * + * Interrupt service routine entry point. + * + * Arguments: + * + * irq interrupt number that caused interrupt + * dev_id device ID supplied during interrupt registration + * + * Return Value: None + */ +static irqreturn_t mgsl_interrupt(int dummy, void *dev_id) +{ + struct mgsl_struct *info = dev_id; + u16 UscVector; + u16 DmaVector; + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n", + __FILE__, __LINE__, info->irq_level); + + spin_lock(&info->irq_spinlock); + + for(;;) { + /* Read the interrupt vectors from hardware. */ + UscVector = usc_InReg(info, IVR) >> 9; + DmaVector = usc_InDmaReg(info, DIVR); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n", + __FILE__,__LINE__,info->device_name,UscVector,DmaVector); + + if ( !UscVector && !DmaVector ) + break; + + /* Dispatch interrupt vector */ + if ( UscVector ) + (*UscIsrTable[UscVector])(info); + else if ( (DmaVector&(BIT10|BIT9)) == BIT10) + mgsl_isr_transmit_dma(info); + else + mgsl_isr_receive_dma(info); + + if ( info->isr_overflow ) { + printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n", + __FILE__, __LINE__, info->device_name, info->irq_level); + usc_DisableMasterIrqBit(info); + usc_DisableDmaInterrupts(info,DICR_MASTER); + break; + } + } + + /* Request bottom half processing if there's something + * for it to do and the bh is not already running + */ + + if ( info->pending_bh && !info->bh_running && !info->bh_requested ) { + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):%s queueing bh task.\n", + __FILE__,__LINE__,info->device_name); + schedule_work(&info->task); + info->bh_requested = true; + } + + spin_unlock(&info->irq_spinlock); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n", + __FILE__, __LINE__, info->irq_level); + + return IRQ_HANDLED; +} /* end of mgsl_interrupt() */ + +/* startup() + * + * Initialize and start device. + * + * Arguments: info pointer to device instance data + * Return Value: 0 if success, otherwise error code + */ +static int startup(struct mgsl_struct * info) +{ + int retval = 0; + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name); + + if (info->port.flags & ASYNC_INITIALIZED) + return 0; + + if (!info->xmit_buf) { + /* allocate a page of memory for a transmit buffer */ + info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL); + if (!info->xmit_buf) { + printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", + __FILE__,__LINE__,info->device_name); + return -ENOMEM; + } + } + + info->pending_bh = 0; + + memset(&info->icount, 0, sizeof(info->icount)); + + setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info); + + /* Allocate and claim adapter resources */ + retval = mgsl_claim_resources(info); + + /* perform existence check and diagnostics */ + if ( !retval ) + retval = mgsl_adapter_test(info); + + if ( retval ) { + if (capable(CAP_SYS_ADMIN) && info->port.tty) + set_bit(TTY_IO_ERROR, &info->port.tty->flags); + mgsl_release_resources(info); + return retval; + } + + /* program hardware for current parameters */ + mgsl_change_params(info); + + if (info->port.tty) + clear_bit(TTY_IO_ERROR, &info->port.tty->flags); + + info->port.flags |= ASYNC_INITIALIZED; + + return 0; + +} /* end of startup() */ + +/* shutdown() + * + * Called by mgsl_close() and mgsl_hangup() to shutdown hardware + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void shutdown(struct mgsl_struct * info) +{ + unsigned long flags; + + if (!(info->port.flags & ASYNC_INITIALIZED)) + return; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_shutdown(%s)\n", + __FILE__,__LINE__, info->device_name ); + + /* clear status wait queue because status changes */ + /* can't happen after shutting down the hardware */ + wake_up_interruptible(&info->status_event_wait_q); + wake_up_interruptible(&info->event_wait_q); + + del_timer_sync(&info->tx_timer); + + if (info->xmit_buf) { + free_page((unsigned long) info->xmit_buf); + info->xmit_buf = NULL; + } + + spin_lock_irqsave(&info->irq_spinlock,flags); + usc_DisableMasterIrqBit(info); + usc_stop_receiver(info); + usc_stop_transmitter(info); + usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS + + TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC ); + usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE); + + /* Disable DMAEN (Port 7, Bit 14) */ + /* This disconnects the DMA request signal from the ISA bus */ + /* on the ISA adapter. This has no effect for the PCI adapter */ + usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14)); + + /* Disable INTEN (Port 6, Bit12) */ + /* This disconnects the IRQ request signal to the ISA bus */ + /* on the ISA adapter. This has no effect for the PCI adapter */ + usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12)); + + if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) { + info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS); + usc_set_serial_signals(info); + } + + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + mgsl_release_resources(info); + + if (info->port.tty) + set_bit(TTY_IO_ERROR, &info->port.tty->flags); + + info->port.flags &= ~ASYNC_INITIALIZED; + +} /* end of shutdown() */ + +static void mgsl_program_hw(struct mgsl_struct *info) +{ + unsigned long flags; + + spin_lock_irqsave(&info->irq_spinlock,flags); + + usc_stop_receiver(info); + usc_stop_transmitter(info); + info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; + + if (info->params.mode == MGSL_MODE_HDLC || + info->params.mode == MGSL_MODE_RAW || + info->netcount) + usc_set_sync_mode(info); + else + usc_set_async_mode(info); + + usc_set_serial_signals(info); + + info->dcd_chkcount = 0; + info->cts_chkcount = 0; + info->ri_chkcount = 0; + info->dsr_chkcount = 0; + + usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI); + usc_EnableInterrupts(info, IO_PIN); + usc_get_serial_signals(info); + + if (info->netcount || info->port.tty->termios->c_cflag & CREAD) + usc_start_receiver(info); + + spin_unlock_irqrestore(&info->irq_spinlock,flags); +} + +/* Reconfigure adapter based on new parameters + */ +static void mgsl_change_params(struct mgsl_struct *info) +{ + unsigned cflag; + int bits_per_char; + + if (!info->port.tty || !info->port.tty->termios) + return; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_change_params(%s)\n", + __FILE__,__LINE__, info->device_name ); + + cflag = info->port.tty->termios->c_cflag; + + /* if B0 rate (hangup) specified then negate DTR and RTS */ + /* otherwise assert DTR and RTS */ + if (cflag & CBAUD) + info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; + else + info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); + + /* byte size and parity */ + + switch (cflag & CSIZE) { + case CS5: info->params.data_bits = 5; break; + case CS6: info->params.data_bits = 6; break; + case CS7: info->params.data_bits = 7; break; + case CS8: info->params.data_bits = 8; break; + /* Never happens, but GCC is too dumb to figure it out */ + default: info->params.data_bits = 7; break; + } + + if (cflag & CSTOPB) + info->params.stop_bits = 2; + else + info->params.stop_bits = 1; + + info->params.parity = ASYNC_PARITY_NONE; + if (cflag & PARENB) { + if (cflag & PARODD) + info->params.parity = ASYNC_PARITY_ODD; + else + info->params.parity = ASYNC_PARITY_EVEN; +#ifdef CMSPAR + if (cflag & CMSPAR) + info->params.parity = ASYNC_PARITY_SPACE; +#endif + } + + /* calculate number of jiffies to transmit a full + * FIFO (32 bytes) at specified data rate + */ + bits_per_char = info->params.data_bits + + info->params.stop_bits + 1; + + /* if port data rate is set to 460800 or less then + * allow tty settings to override, otherwise keep the + * current data rate. + */ + if (info->params.data_rate <= 460800) + info->params.data_rate = tty_get_baud_rate(info->port.tty); + + if ( info->params.data_rate ) { + info->timeout = (32*HZ*bits_per_char) / + info->params.data_rate; + } + info->timeout += HZ/50; /* Add .02 seconds of slop */ + + if (cflag & CRTSCTS) + info->port.flags |= ASYNC_CTS_FLOW; + else + info->port.flags &= ~ASYNC_CTS_FLOW; + + if (cflag & CLOCAL) + info->port.flags &= ~ASYNC_CHECK_CD; + else + info->port.flags |= ASYNC_CHECK_CD; + + /* process tty input control flags */ + + info->read_status_mask = RXSTATUS_OVERRUN; + if (I_INPCK(info->port.tty)) + info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; + if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) + info->read_status_mask |= RXSTATUS_BREAK_RECEIVED; + + if (I_IGNPAR(info->port.tty)) + info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; + if (I_IGNBRK(info->port.tty)) { + info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED; + /* If ignoring parity and break indicators, ignore + * overruns too. (For real raw support). + */ + if (I_IGNPAR(info->port.tty)) + info->ignore_status_mask |= RXSTATUS_OVERRUN; + } + + mgsl_program_hw(info); + +} /* end of mgsl_change_params() */ + +/* mgsl_put_char() + * + * Add a character to the transmit buffer. + * + * Arguments: tty pointer to tty information structure + * ch character to add to transmit buffer + * + * Return Value: None + */ +static int mgsl_put_char(struct tty_struct *tty, unsigned char ch) +{ + struct mgsl_struct *info = tty->driver_data; + unsigned long flags; + int ret = 0; + + if (debug_level >= DEBUG_LEVEL_INFO) { + printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n", + __FILE__, __LINE__, ch, info->device_name); + } + + if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char")) + return 0; + + if (!info->xmit_buf) + return 0; + + spin_lock_irqsave(&info->irq_spinlock, flags); + + if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) { + if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) { + info->xmit_buf[info->xmit_head++] = ch; + info->xmit_head &= SERIAL_XMIT_SIZE-1; + info->xmit_cnt++; + ret = 1; + } + } + spin_unlock_irqrestore(&info->irq_spinlock, flags); + return ret; + +} /* end of mgsl_put_char() */ + +/* mgsl_flush_chars() + * + * Enable transmitter so remaining characters in the + * transmit buffer are sent. + * + * Arguments: tty pointer to tty information structure + * Return Value: None + */ +static void mgsl_flush_chars(struct tty_struct *tty) +{ + struct mgsl_struct *info = tty->driver_data; + unsigned long flags; + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n", + __FILE__,__LINE__,info->device_name,info->xmit_cnt); + + if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars")) + return; + + if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || + !info->xmit_buf) + return; + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n", + __FILE__,__LINE__,info->device_name ); + + spin_lock_irqsave(&info->irq_spinlock,flags); + + if (!info->tx_active) { + if ( (info->params.mode == MGSL_MODE_HDLC || + info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) { + /* operating in synchronous (frame oriented) mode */ + /* copy data from circular xmit_buf to */ + /* transmit DMA buffer. */ + mgsl_load_tx_dma_buffer(info, + info->xmit_buf,info->xmit_cnt); + } + usc_start_transmitter(info); + } + + spin_unlock_irqrestore(&info->irq_spinlock,flags); + +} /* end of mgsl_flush_chars() */ + +/* mgsl_write() + * + * Send a block of data + * + * Arguments: + * + * tty pointer to tty information structure + * buf pointer to buffer containing send data + * count size of send data in bytes + * + * Return Value: number of characters written + */ +static int mgsl_write(struct tty_struct * tty, + const unsigned char *buf, int count) +{ + int c, ret = 0; + struct mgsl_struct *info = tty->driver_data; + unsigned long flags; + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):mgsl_write(%s) count=%d\n", + __FILE__,__LINE__,info->device_name,count); + + if (mgsl_paranoia_check(info, tty->name, "mgsl_write")) + goto cleanup; + + if (!info->xmit_buf) + goto cleanup; + + if ( info->params.mode == MGSL_MODE_HDLC || + info->params.mode == MGSL_MODE_RAW ) { + /* operating in synchronous (frame oriented) mode */ + /* operating in synchronous (frame oriented) mode */ + if (info->tx_active) { + + if ( info->params.mode == MGSL_MODE_HDLC ) { + ret = 0; + goto cleanup; + } + /* transmitter is actively sending data - + * if we have multiple transmit dma and + * holding buffers, attempt to queue this + * frame for transmission at a later time. + */ + if (info->tx_holding_count >= info->num_tx_holding_buffers ) { + /* no tx holding buffers available */ + ret = 0; + goto cleanup; + } + + /* queue transmit frame request */ + ret = count; + save_tx_buffer_request(info,buf,count); + + /* if we have sufficient tx dma buffers, + * load the next buffered tx request + */ + spin_lock_irqsave(&info->irq_spinlock,flags); + load_next_tx_holding_buffer(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + goto cleanup; + } + + /* if operating in HDLC LoopMode and the adapter */ + /* has yet to be inserted into the loop, we can't */ + /* transmit */ + + if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) && + !usc_loopmode_active(info) ) + { + ret = 0; + goto cleanup; + } + + if ( info->xmit_cnt ) { + /* Send accumulated from send_char() calls */ + /* as frame and wait before accepting more data. */ + ret = 0; + + /* copy data from circular xmit_buf to */ + /* transmit DMA buffer. */ + mgsl_load_tx_dma_buffer(info, + info->xmit_buf,info->xmit_cnt); + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n", + __FILE__,__LINE__,info->device_name); + } else { + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n", + __FILE__,__LINE__,info->device_name); + ret = count; + info->xmit_cnt = count; + mgsl_load_tx_dma_buffer(info,buf,count); + } + } else { + while (1) { + spin_lock_irqsave(&info->irq_spinlock,flags); + c = min_t(int, count, + min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1, + SERIAL_XMIT_SIZE - info->xmit_head)); + if (c <= 0) { + spin_unlock_irqrestore(&info->irq_spinlock,flags); + break; + } + memcpy(info->xmit_buf + info->xmit_head, buf, c); + info->xmit_head = ((info->xmit_head + c) & + (SERIAL_XMIT_SIZE-1)); + info->xmit_cnt += c; + spin_unlock_irqrestore(&info->irq_spinlock,flags); + buf += c; + count -= c; + ret += c; + } + } + + if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) { + spin_lock_irqsave(&info->irq_spinlock,flags); + if (!info->tx_active) + usc_start_transmitter(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + } +cleanup: + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):mgsl_write(%s) returning=%d\n", + __FILE__,__LINE__,info->device_name,ret); + + return ret; + +} /* end of mgsl_write() */ + +/* mgsl_write_room() + * + * Return the count of free bytes in transmit buffer + * + * Arguments: tty pointer to tty info structure + * Return Value: None + */ +static int mgsl_write_room(struct tty_struct *tty) +{ + struct mgsl_struct *info = tty->driver_data; + int ret; + + if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room")) + return 0; + ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1; + if (ret < 0) + ret = 0; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_write_room(%s)=%d\n", + __FILE__,__LINE__, info->device_name,ret ); + + if ( info->params.mode == MGSL_MODE_HDLC || + info->params.mode == MGSL_MODE_RAW ) { + /* operating in synchronous (frame oriented) mode */ + if ( info->tx_active ) + return 0; + else + return HDLC_MAX_FRAME_SIZE; + } + + return ret; + +} /* end of mgsl_write_room() */ + +/* mgsl_chars_in_buffer() + * + * Return the count of bytes in transmit buffer + * + * Arguments: tty pointer to tty info structure + * Return Value: None + */ +static int mgsl_chars_in_buffer(struct tty_struct *tty) +{ + struct mgsl_struct *info = tty->driver_data; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_chars_in_buffer(%s)\n", + __FILE__,__LINE__, info->device_name ); + + if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer")) + return 0; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n", + __FILE__,__LINE__, info->device_name,info->xmit_cnt ); + + if ( info->params.mode == MGSL_MODE_HDLC || + info->params.mode == MGSL_MODE_RAW ) { + /* operating in synchronous (frame oriented) mode */ + if ( info->tx_active ) + return info->max_frame_size; + else + return 0; + } + + return info->xmit_cnt; +} /* end of mgsl_chars_in_buffer() */ + +/* mgsl_flush_buffer() + * + * Discard all data in the send buffer + * + * Arguments: tty pointer to tty info structure + * Return Value: None + */ +static void mgsl_flush_buffer(struct tty_struct *tty) +{ + struct mgsl_struct *info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_flush_buffer(%s) entry\n", + __FILE__,__LINE__, info->device_name ); + + if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer")) + return; + + spin_lock_irqsave(&info->irq_spinlock,flags); + info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; + del_timer(&info->tx_timer); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + tty_wakeup(tty); +} + +/* mgsl_send_xchar() + * + * Send a high-priority XON/XOFF character + * + * Arguments: tty pointer to tty info structure + * ch character to send + * Return Value: None + */ +static void mgsl_send_xchar(struct tty_struct *tty, char ch) +{ + struct mgsl_struct *info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_send_xchar(%s,%d)\n", + __FILE__,__LINE__, info->device_name, ch ); + + if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar")) + return; + + info->x_char = ch; + if (ch) { + /* Make sure transmit interrupts are on */ + spin_lock_irqsave(&info->irq_spinlock,flags); + if (!info->tx_enabled) + usc_start_transmitter(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + } +} /* end of mgsl_send_xchar() */ + +/* mgsl_throttle() + * + * Signal remote device to throttle send data (our receive data) + * + * Arguments: tty pointer to tty info structure + * Return Value: None + */ +static void mgsl_throttle(struct tty_struct * tty) +{ + struct mgsl_struct *info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_throttle(%s) entry\n", + __FILE__,__LINE__, info->device_name ); + + if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle")) + return; + + if (I_IXOFF(tty)) + mgsl_send_xchar(tty, STOP_CHAR(tty)); + + if (tty->termios->c_cflag & CRTSCTS) { + spin_lock_irqsave(&info->irq_spinlock,flags); + info->serial_signals &= ~SerialSignal_RTS; + usc_set_serial_signals(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + } +} /* end of mgsl_throttle() */ + +/* mgsl_unthrottle() + * + * Signal remote device to stop throttling send data (our receive data) + * + * Arguments: tty pointer to tty info structure + * Return Value: None + */ +static void mgsl_unthrottle(struct tty_struct * tty) +{ + struct mgsl_struct *info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_unthrottle(%s) entry\n", + __FILE__,__LINE__, info->device_name ); + + if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle")) + return; + + if (I_IXOFF(tty)) { + if (info->x_char) + info->x_char = 0; + else + mgsl_send_xchar(tty, START_CHAR(tty)); + } + + if (tty->termios->c_cflag & CRTSCTS) { + spin_lock_irqsave(&info->irq_spinlock,flags); + info->serial_signals |= SerialSignal_RTS; + usc_set_serial_signals(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + } + +} /* end of mgsl_unthrottle() */ + +/* mgsl_get_stats() + * + * get the current serial parameters information + * + * Arguments: info pointer to device instance data + * user_icount pointer to buffer to hold returned stats + * + * Return Value: 0 if success, otherwise error code + */ +static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount) +{ + int err; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_get_params(%s)\n", + __FILE__,__LINE__, info->device_name); + + if (!user_icount) { + memset(&info->icount, 0, sizeof(info->icount)); + } else { + mutex_lock(&info->port.mutex); + COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); + mutex_unlock(&info->port.mutex); + if (err) + return -EFAULT; + } + + return 0; + +} /* end of mgsl_get_stats() */ + +/* mgsl_get_params() + * + * get the current serial parameters information + * + * Arguments: info pointer to device instance data + * user_params pointer to buffer to hold returned params + * + * Return Value: 0 if success, otherwise error code + */ +static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params) +{ + int err; + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_get_params(%s)\n", + __FILE__,__LINE__, info->device_name); + + mutex_lock(&info->port.mutex); + COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS)); + mutex_unlock(&info->port.mutex); + if (err) { + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n", + __FILE__,__LINE__,info->device_name); + return -EFAULT; + } + + return 0; + +} /* end of mgsl_get_params() */ + +/* mgsl_set_params() + * + * set the serial parameters + * + * Arguments: + * + * info pointer to device instance data + * new_params user buffer containing new serial params + * + * Return Value: 0 if success, otherwise error code + */ +static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params) +{ + unsigned long flags; + MGSL_PARAMS tmp_params; + int err; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__, + info->device_name ); + COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); + if (err) { + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n", + __FILE__,__LINE__,info->device_name); + return -EFAULT; + } + + mutex_lock(&info->port.mutex); + spin_lock_irqsave(&info->irq_spinlock,flags); + memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + mgsl_change_params(info); + mutex_unlock(&info->port.mutex); + + return 0; + +} /* end of mgsl_set_params() */ + +/* mgsl_get_txidle() + * + * get the current transmit idle mode + * + * Arguments: info pointer to device instance data + * idle_mode pointer to buffer to hold returned idle mode + * + * Return Value: 0 if success, otherwise error code + */ +static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode) +{ + int err; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_get_txidle(%s)=%d\n", + __FILE__,__LINE__, info->device_name, info->idle_mode); + + COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int)); + if (err) { + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n", + __FILE__,__LINE__,info->device_name); + return -EFAULT; + } + + return 0; + +} /* end of mgsl_get_txidle() */ + +/* mgsl_set_txidle() service ioctl to set transmit idle mode + * + * Arguments: info pointer to device instance data + * idle_mode new idle mode + * + * Return Value: 0 if success, otherwise error code + */ +static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode) +{ + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__, + info->device_name, idle_mode ); + + spin_lock_irqsave(&info->irq_spinlock,flags); + info->idle_mode = idle_mode; + usc_set_txidle( info ); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + return 0; + +} /* end of mgsl_set_txidle() */ + +/* mgsl_txenable() + * + * enable or disable the transmitter + * + * Arguments: + * + * info pointer to device instance data + * enable 1 = enable, 0 = disable + * + * Return Value: 0 if success, otherwise error code + */ +static int mgsl_txenable(struct mgsl_struct * info, int enable) +{ + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__, + info->device_name, enable); + + spin_lock_irqsave(&info->irq_spinlock,flags); + if ( enable ) { + if ( !info->tx_enabled ) { + + usc_start_transmitter(info); + /*-------------------------------------------------- + * if HDLC/SDLC Loop mode, attempt to insert the + * station in the 'loop' by setting CMR:13. Upon + * receipt of the next GoAhead (RxAbort) sequence, + * the OnLoop indicator (CCSR:7) should go active + * to indicate that we are on the loop + *--------------------------------------------------*/ + if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) + usc_loopmode_insert_request( info ); + } + } else { + if ( info->tx_enabled ) + usc_stop_transmitter(info); + } + spin_unlock_irqrestore(&info->irq_spinlock,flags); + return 0; + +} /* end of mgsl_txenable() */ + +/* mgsl_txabort() abort send HDLC frame + * + * Arguments: info pointer to device instance data + * Return Value: 0 if success, otherwise error code + */ +static int mgsl_txabort(struct mgsl_struct * info) +{ + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__, + info->device_name); + + spin_lock_irqsave(&info->irq_spinlock,flags); + if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC ) + { + if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) + usc_loopmode_cancel_transmit( info ); + else + usc_TCmd(info,TCmd_SendAbort); + } + spin_unlock_irqrestore(&info->irq_spinlock,flags); + return 0; + +} /* end of mgsl_txabort() */ + +/* mgsl_rxenable() enable or disable the receiver + * + * Arguments: info pointer to device instance data + * enable 1 = enable, 0 = disable + * Return Value: 0 if success, otherwise error code + */ +static int mgsl_rxenable(struct mgsl_struct * info, int enable) +{ + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__, + info->device_name, enable); + + spin_lock_irqsave(&info->irq_spinlock,flags); + if ( enable ) { + if ( !info->rx_enabled ) + usc_start_receiver(info); + } else { + if ( info->rx_enabled ) + usc_stop_receiver(info); + } + spin_unlock_irqrestore(&info->irq_spinlock,flags); + return 0; + +} /* end of mgsl_rxenable() */ + +/* mgsl_wait_event() wait for specified event to occur + * + * Arguments: info pointer to device instance data + * mask pointer to bitmask of events to wait for + * Return Value: 0 if successful and bit mask updated with + * of events triggerred, + * otherwise error code + */ +static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr) +{ + unsigned long flags; + int s; + int rc=0; + struct mgsl_icount cprev, cnow; + int events; + int mask; + struct _input_signal_events oldsigs, newsigs; + DECLARE_WAITQUEUE(wait, current); + + COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int)); + if (rc) { + return -EFAULT; + } + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__, + info->device_name, mask); + + spin_lock_irqsave(&info->irq_spinlock,flags); + + /* return immediately if state matches requested events */ + usc_get_serial_signals(info); + s = info->serial_signals; + events = mask & + ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + + ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + + ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + + ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); + if (events) { + spin_unlock_irqrestore(&info->irq_spinlock,flags); + goto exit; + } + + /* save current irq counts */ + cprev = info->icount; + oldsigs = info->input_signal_events; + + /* enable hunt and idle irqs if needed */ + if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { + u16 oldreg = usc_InReg(info,RICR); + u16 newreg = oldreg + + (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) + + (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0); + if (oldreg != newreg) + usc_OutReg(info, RICR, newreg); + } + + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&info->event_wait_q, &wait); + + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + + for(;;) { + schedule(); + if (signal_pending(current)) { + rc = -ERESTARTSYS; + break; + } + + /* get current irq counts */ + spin_lock_irqsave(&info->irq_spinlock,flags); + cnow = info->icount; + newsigs = info->input_signal_events; + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + /* if no change, wait aborted for some reason */ + if (newsigs.dsr_up == oldsigs.dsr_up && + newsigs.dsr_down == oldsigs.dsr_down && + newsigs.dcd_up == oldsigs.dcd_up && + newsigs.dcd_down == oldsigs.dcd_down && + newsigs.cts_up == oldsigs.cts_up && + newsigs.cts_down == oldsigs.cts_down && + newsigs.ri_up == oldsigs.ri_up && + newsigs.ri_down == oldsigs.ri_down && + cnow.exithunt == cprev.exithunt && + cnow.rxidle == cprev.rxidle) { + rc = -EIO; + break; + } + + events = mask & + ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + + (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + + (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + + (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + + (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + + (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + + (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + + (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + + (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + + (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); + if (events) + break; + + cprev = cnow; + oldsigs = newsigs; + } + + remove_wait_queue(&info->event_wait_q, &wait); + set_current_state(TASK_RUNNING); + + if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { + spin_lock_irqsave(&info->irq_spinlock,flags); + if (!waitqueue_active(&info->event_wait_q)) { + /* disable enable exit hunt mode/idle rcvd IRQs */ + usc_OutReg(info, RICR, usc_InReg(info,RICR) & + ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)); + } + spin_unlock_irqrestore(&info->irq_spinlock,flags); + } +exit: + if ( rc == 0 ) + PUT_USER(rc, events, mask_ptr); + + return rc; + +} /* end of mgsl_wait_event() */ + +static int modem_input_wait(struct mgsl_struct *info,int arg) +{ + unsigned long flags; + int rc; + struct mgsl_icount cprev, cnow; + DECLARE_WAITQUEUE(wait, current); + + /* save current irq counts */ + spin_lock_irqsave(&info->irq_spinlock,flags); + cprev = info->icount; + add_wait_queue(&info->status_event_wait_q, &wait); + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + for(;;) { + schedule(); + if (signal_pending(current)) { + rc = -ERESTARTSYS; + break; + } + + /* get new irq counts */ + spin_lock_irqsave(&info->irq_spinlock,flags); + cnow = info->icount; + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + /* if no change, wait aborted for some reason */ + if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && + cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { + rc = -EIO; + break; + } + + /* check for change in caller specified modem input */ + if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || + (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || + (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || + (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { + rc = 0; + break; + } + + cprev = cnow; + } + remove_wait_queue(&info->status_event_wait_q, &wait); + set_current_state(TASK_RUNNING); + return rc; +} + +/* return the state of the serial control and status signals + */ +static int tiocmget(struct tty_struct *tty) +{ + struct mgsl_struct *info = tty->driver_data; + unsigned int result; + unsigned long flags; + + spin_lock_irqsave(&info->irq_spinlock,flags); + usc_get_serial_signals(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) + + ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) + + ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) + + ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) + + ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) + + ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0); + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s tiocmget() value=%08X\n", + __FILE__,__LINE__, info->device_name, result ); + return result; +} + +/* set modem control signals (DTR/RTS) + */ +static int tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear) +{ + struct mgsl_struct *info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s tiocmset(%x,%x)\n", + __FILE__,__LINE__,info->device_name, set, clear); + + if (set & TIOCM_RTS) + info->serial_signals |= SerialSignal_RTS; + if (set & TIOCM_DTR) + info->serial_signals |= SerialSignal_DTR; + if (clear & TIOCM_RTS) + info->serial_signals &= ~SerialSignal_RTS; + if (clear & TIOCM_DTR) + info->serial_signals &= ~SerialSignal_DTR; + + spin_lock_irqsave(&info->irq_spinlock,flags); + usc_set_serial_signals(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + return 0; +} + +/* mgsl_break() Set or clear transmit break condition + * + * Arguments: tty pointer to tty instance data + * break_state -1=set break condition, 0=clear + * Return Value: error code + */ +static int mgsl_break(struct tty_struct *tty, int break_state) +{ + struct mgsl_struct * info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_break(%s,%d)\n", + __FILE__,__LINE__, info->device_name, break_state); + + if (mgsl_paranoia_check(info, tty->name, "mgsl_break")) + return -EINVAL; + + spin_lock_irqsave(&info->irq_spinlock,flags); + if (break_state == -1) + usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7)); + else + usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7)); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + return 0; + +} /* end of mgsl_break() */ + +/* + * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) + * Return: write counters to the user passed counter struct + * NB: both 1->0 and 0->1 transitions are counted except for + * RI where only 0->1 is counted. + */ +static int msgl_get_icount(struct tty_struct *tty, + struct serial_icounter_struct *icount) + +{ + struct mgsl_struct * info = tty->driver_data; + struct mgsl_icount cnow; /* kernel counter temps */ + unsigned long flags; + + spin_lock_irqsave(&info->irq_spinlock,flags); + cnow = info->icount; + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + icount->cts = cnow.cts; + icount->dsr = cnow.dsr; + icount->rng = cnow.rng; + icount->dcd = cnow.dcd; + icount->rx = cnow.rx; + icount->tx = cnow.tx; + icount->frame = cnow.frame; + icount->overrun = cnow.overrun; + icount->parity = cnow.parity; + icount->brk = cnow.brk; + icount->buf_overrun = cnow.buf_overrun; + return 0; +} + +/* mgsl_ioctl() Service an IOCTL request + * + * Arguments: + * + * tty pointer to tty instance data + * cmd IOCTL command code + * arg command argument/context + * + * Return Value: 0 if success, otherwise error code + */ +static int mgsl_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + struct mgsl_struct * info = tty->driver_data; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__, + info->device_name, cmd ); + + if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl")) + return -ENODEV; + + if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && + (cmd != TIOCMIWAIT)) { + if (tty->flags & (1 << TTY_IO_ERROR)) + return -EIO; + } + + return mgsl_ioctl_common(info, cmd, arg); +} + +static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + + switch (cmd) { + case MGSL_IOCGPARAMS: + return mgsl_get_params(info, argp); + case MGSL_IOCSPARAMS: + return mgsl_set_params(info, argp); + case MGSL_IOCGTXIDLE: + return mgsl_get_txidle(info, argp); + case MGSL_IOCSTXIDLE: + return mgsl_set_txidle(info,(int)arg); + case MGSL_IOCTXENABLE: + return mgsl_txenable(info,(int)arg); + case MGSL_IOCRXENABLE: + return mgsl_rxenable(info,(int)arg); + case MGSL_IOCTXABORT: + return mgsl_txabort(info); + case MGSL_IOCGSTATS: + return mgsl_get_stats(info, argp); + case MGSL_IOCWAITEVENT: + return mgsl_wait_event(info, argp); + case MGSL_IOCLOOPTXDONE: + return mgsl_loopmode_send_done(info); + /* Wait for modem input (DCD,RI,DSR,CTS) change + * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS) + */ + case TIOCMIWAIT: + return modem_input_wait(info,(int)arg); + + default: + return -ENOIOCTLCMD; + } + return 0; +} + +/* mgsl_set_termios() + * + * Set new termios settings + * + * Arguments: + * + * tty pointer to tty structure + * termios pointer to buffer to hold returned old termios + * + * Return Value: None + */ +static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios) +{ + struct mgsl_struct *info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__, + tty->driver->name ); + + mgsl_change_params(info); + + /* Handle transition to B0 status */ + if (old_termios->c_cflag & CBAUD && + !(tty->termios->c_cflag & CBAUD)) { + info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); + spin_lock_irqsave(&info->irq_spinlock,flags); + usc_set_serial_signals(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + } + + /* Handle transition away from B0 status */ + if (!(old_termios->c_cflag & CBAUD) && + tty->termios->c_cflag & CBAUD) { + info->serial_signals |= SerialSignal_DTR; + if (!(tty->termios->c_cflag & CRTSCTS) || + !test_bit(TTY_THROTTLED, &tty->flags)) { + info->serial_signals |= SerialSignal_RTS; + } + spin_lock_irqsave(&info->irq_spinlock,flags); + usc_set_serial_signals(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + } + + /* Handle turning off CRTSCTS */ + if (old_termios->c_cflag & CRTSCTS && + !(tty->termios->c_cflag & CRTSCTS)) { + tty->hw_stopped = 0; + mgsl_start(tty); + } + +} /* end of mgsl_set_termios() */ + +/* mgsl_close() + * + * Called when port is closed. Wait for remaining data to be + * sent. Disable port and free resources. + * + * Arguments: + * + * tty pointer to open tty structure + * filp pointer to open file object + * + * Return Value: None + */ +static void mgsl_close(struct tty_struct *tty, struct file * filp) +{ + struct mgsl_struct * info = tty->driver_data; + + if (mgsl_paranoia_check(info, tty->name, "mgsl_close")) + return; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_close(%s) entry, count=%d\n", + __FILE__,__LINE__, info->device_name, info->port.count); + + if (tty_port_close_start(&info->port, tty, filp) == 0) + goto cleanup; + + mutex_lock(&info->port.mutex); + if (info->port.flags & ASYNC_INITIALIZED) + mgsl_wait_until_sent(tty, info->timeout); + mgsl_flush_buffer(tty); + tty_ldisc_flush(tty); + shutdown(info); + mutex_unlock(&info->port.mutex); + + tty_port_close_end(&info->port, tty); + info->port.tty = NULL; +cleanup: + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__, + tty->driver->name, info->port.count); + +} /* end of mgsl_close() */ + +/* mgsl_wait_until_sent() + * + * Wait until the transmitter is empty. + * + * Arguments: + * + * tty pointer to tty info structure + * timeout time to wait for send completion + * + * Return Value: None + */ +static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout) +{ + struct mgsl_struct * info = tty->driver_data; + unsigned long orig_jiffies, char_time; + + if (!info ) + return; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_wait_until_sent(%s) entry\n", + __FILE__,__LINE__, info->device_name ); + + if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent")) + return; + + if (!(info->port.flags & ASYNC_INITIALIZED)) + goto exit; + + orig_jiffies = jiffies; + + /* Set check interval to 1/5 of estimated time to + * send a character, and make it at least 1. The check + * interval should also be less than the timeout. + * Note: use tight timings here to satisfy the NIST-PCTS. + */ + + if ( info->params.data_rate ) { + char_time = info->timeout/(32 * 5); + if (!char_time) + char_time++; + } else + char_time = 1; + + if (timeout) + char_time = min_t(unsigned long, char_time, timeout); + + if ( info->params.mode == MGSL_MODE_HDLC || + info->params.mode == MGSL_MODE_RAW ) { + while (info->tx_active) { + msleep_interruptible(jiffies_to_msecs(char_time)); + if (signal_pending(current)) + break; + if (timeout && time_after(jiffies, orig_jiffies + timeout)) + break; + } + } else { + while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) && + info->tx_enabled) { + msleep_interruptible(jiffies_to_msecs(char_time)); + if (signal_pending(current)) + break; + if (timeout && time_after(jiffies, orig_jiffies + timeout)) + break; + } + } + +exit: + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_wait_until_sent(%s) exit\n", + __FILE__,__LINE__, info->device_name ); + +} /* end of mgsl_wait_until_sent() */ + +/* mgsl_hangup() + * + * Called by tty_hangup() when a hangup is signaled. + * This is the same as to closing all open files for the port. + * + * Arguments: tty pointer to associated tty object + * Return Value: None + */ +static void mgsl_hangup(struct tty_struct *tty) +{ + struct mgsl_struct * info = tty->driver_data; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_hangup(%s)\n", + __FILE__,__LINE__, info->device_name ); + + if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup")) + return; + + mgsl_flush_buffer(tty); + shutdown(info); + + info->port.count = 0; + info->port.flags &= ~ASYNC_NORMAL_ACTIVE; + info->port.tty = NULL; + + wake_up_interruptible(&info->port.open_wait); + +} /* end of mgsl_hangup() */ + +/* + * carrier_raised() + * + * Return true if carrier is raised + */ + +static int carrier_raised(struct tty_port *port) +{ + unsigned long flags; + struct mgsl_struct *info = container_of(port, struct mgsl_struct, port); + + spin_lock_irqsave(&info->irq_spinlock, flags); + usc_get_serial_signals(info); + spin_unlock_irqrestore(&info->irq_spinlock, flags); + return (info->serial_signals & SerialSignal_DCD) ? 1 : 0; +} + +static void dtr_rts(struct tty_port *port, int on) +{ + struct mgsl_struct *info = container_of(port, struct mgsl_struct, port); + unsigned long flags; + + spin_lock_irqsave(&info->irq_spinlock,flags); + if (on) + info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; + else + info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); + usc_set_serial_signals(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); +} + + +/* block_til_ready() + * + * Block the current process until the specified port + * is ready to be opened. + * + * Arguments: + * + * tty pointer to tty info structure + * filp pointer to open file object + * info pointer to device instance data + * + * Return Value: 0 if success, otherwise error code + */ +static int block_til_ready(struct tty_struct *tty, struct file * filp, + struct mgsl_struct *info) +{ + DECLARE_WAITQUEUE(wait, current); + int retval; + bool do_clocal = false; + bool extra_count = false; + unsigned long flags; + int dcd; + struct tty_port *port = &info->port; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):block_til_ready on %s\n", + __FILE__,__LINE__, tty->driver->name ); + + if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ + /* nonblock mode is set or port is not enabled */ + port->flags |= ASYNC_NORMAL_ACTIVE; + return 0; + } + + if (tty->termios->c_cflag & CLOCAL) + do_clocal = true; + + /* Wait for carrier detect and the line to become + * free (i.e., not in use by the callout). While we are in + * this loop, port->count is dropped by one, so that + * mgsl_close() knows when to free things. We restore it upon + * exit, either normal or abnormal. + */ + + retval = 0; + add_wait_queue(&port->open_wait, &wait); + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):block_til_ready before block on %s count=%d\n", + __FILE__,__LINE__, tty->driver->name, port->count ); + + spin_lock_irqsave(&info->irq_spinlock, flags); + if (!tty_hung_up_p(filp)) { + extra_count = true; + port->count--; + } + spin_unlock_irqrestore(&info->irq_spinlock, flags); + port->blocked_open++; + + while (1) { + if (tty->termios->c_cflag & CBAUD) + tty_port_raise_dtr_rts(port); + + set_current_state(TASK_INTERRUPTIBLE); + + if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){ + retval = (port->flags & ASYNC_HUP_NOTIFY) ? + -EAGAIN : -ERESTARTSYS; + break; + } + + dcd = tty_port_carrier_raised(&info->port); + + if (!(port->flags & ASYNC_CLOSING) && (do_clocal || dcd)) + break; + + if (signal_pending(current)) { + retval = -ERESTARTSYS; + break; + } + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):block_til_ready blocking on %s count=%d\n", + __FILE__,__LINE__, tty->driver->name, port->count ); + + tty_unlock(); + schedule(); + tty_lock(); + } + + set_current_state(TASK_RUNNING); + remove_wait_queue(&port->open_wait, &wait); + + /* FIXME: Racy on hangup during close wait */ + if (extra_count) + port->count++; + port->blocked_open--; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):block_til_ready after blocking on %s count=%d\n", + __FILE__,__LINE__, tty->driver->name, port->count ); + + if (!retval) + port->flags |= ASYNC_NORMAL_ACTIVE; + + return retval; + +} /* end of block_til_ready() */ + +/* mgsl_open() + * + * Called when a port is opened. Init and enable port. + * Perform serial-specific initialization for the tty structure. + * + * Arguments: tty pointer to tty info structure + * filp associated file pointer + * + * Return Value: 0 if success, otherwise error code + */ +static int mgsl_open(struct tty_struct *tty, struct file * filp) +{ + struct mgsl_struct *info; + int retval, line; + unsigned long flags; + + /* verify range of specified line number */ + line = tty->index; + if ((line < 0) || (line >= mgsl_device_count)) { + printk("%s(%d):mgsl_open with invalid line #%d.\n", + __FILE__,__LINE__,line); + return -ENODEV; + } + + /* find the info structure for the specified line */ + info = mgsl_device_list; + while(info && info->line != line) + info = info->next_device; + if (mgsl_paranoia_check(info, tty->name, "mgsl_open")) + return -ENODEV; + + tty->driver_data = info; + info->port.tty = tty; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_open(%s), old ref count = %d\n", + __FILE__,__LINE__,tty->driver->name, info->port.count); + + /* If port is closing, signal caller to try again */ + if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){ + if (info->port.flags & ASYNC_CLOSING) + interruptible_sleep_on(&info->port.close_wait); + retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ? + -EAGAIN : -ERESTARTSYS); + goto cleanup; + } + + info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; + + spin_lock_irqsave(&info->netlock, flags); + if (info->netcount) { + retval = -EBUSY; + spin_unlock_irqrestore(&info->netlock, flags); + goto cleanup; + } + info->port.count++; + spin_unlock_irqrestore(&info->netlock, flags); + + if (info->port.count == 1) { + /* 1st open on this device, init hardware */ + retval = startup(info); + if (retval < 0) + goto cleanup; + } + + retval = block_til_ready(tty, filp, info); + if (retval) { + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):block_til_ready(%s) returned %d\n", + __FILE__,__LINE__, info->device_name, retval); + goto cleanup; + } + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_open(%s) success\n", + __FILE__,__LINE__, info->device_name); + retval = 0; + +cleanup: + if (retval) { + if (tty->count == 1) + info->port.tty = NULL; /* tty layer will release tty struct */ + if(info->port.count) + info->port.count--; + } + + return retval; + +} /* end of mgsl_open() */ + +/* + * /proc fs routines.... + */ + +static inline void line_info(struct seq_file *m, struct mgsl_struct *info) +{ + char stat_buf[30]; + unsigned long flags; + + if (info->bus_type == MGSL_BUS_TYPE_PCI) { + seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X", + info->device_name, info->io_base, info->irq_level, + info->phys_memory_base, info->phys_lcr_base); + } else { + seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d", + info->device_name, info->io_base, + info->irq_level, info->dma_level); + } + + /* output current serial signal states */ + spin_lock_irqsave(&info->irq_spinlock,flags); + usc_get_serial_signals(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + stat_buf[0] = 0; + stat_buf[1] = 0; + if (info->serial_signals & SerialSignal_RTS) + strcat(stat_buf, "|RTS"); + if (info->serial_signals & SerialSignal_CTS) + strcat(stat_buf, "|CTS"); + if (info->serial_signals & SerialSignal_DTR) + strcat(stat_buf, "|DTR"); + if (info->serial_signals & SerialSignal_DSR) + strcat(stat_buf, "|DSR"); + if (info->serial_signals & SerialSignal_DCD) + strcat(stat_buf, "|CD"); + if (info->serial_signals & SerialSignal_RI) + strcat(stat_buf, "|RI"); + + if (info->params.mode == MGSL_MODE_HDLC || + info->params.mode == MGSL_MODE_RAW ) { + seq_printf(m, " HDLC txok:%d rxok:%d", + info->icount.txok, info->icount.rxok); + if (info->icount.txunder) + seq_printf(m, " txunder:%d", info->icount.txunder); + if (info->icount.txabort) + seq_printf(m, " txabort:%d", info->icount.txabort); + if (info->icount.rxshort) + seq_printf(m, " rxshort:%d", info->icount.rxshort); + if (info->icount.rxlong) + seq_printf(m, " rxlong:%d", info->icount.rxlong); + if (info->icount.rxover) + seq_printf(m, " rxover:%d", info->icount.rxover); + if (info->icount.rxcrc) + seq_printf(m, " rxcrc:%d", info->icount.rxcrc); + } else { + seq_printf(m, " ASYNC tx:%d rx:%d", + info->icount.tx, info->icount.rx); + if (info->icount.frame) + seq_printf(m, " fe:%d", info->icount.frame); + if (info->icount.parity) + seq_printf(m, " pe:%d", info->icount.parity); + if (info->icount.brk) + seq_printf(m, " brk:%d", info->icount.brk); + if (info->icount.overrun) + seq_printf(m, " oe:%d", info->icount.overrun); + } + + /* Append serial signal status to end */ + seq_printf(m, " %s\n", stat_buf+1); + + seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", + info->tx_active,info->bh_requested,info->bh_running, + info->pending_bh); + + spin_lock_irqsave(&info->irq_spinlock,flags); + { + u16 Tcsr = usc_InReg( info, TCSR ); + u16 Tdmr = usc_InDmaReg( info, TDMR ); + u16 Ticr = usc_InReg( info, TICR ); + u16 Rscr = usc_InReg( info, RCSR ); + u16 Rdmr = usc_InDmaReg( info, RDMR ); + u16 Ricr = usc_InReg( info, RICR ); + u16 Icr = usc_InReg( info, ICR ); + u16 Dccr = usc_InReg( info, DCCR ); + u16 Tmr = usc_InReg( info, TMR ); + u16 Tccr = usc_InReg( info, TCCR ); + u16 Ccar = inw( info->io_base + CCAR ); + seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n" + "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n", + Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar ); + } + spin_unlock_irqrestore(&info->irq_spinlock,flags); +} + +/* Called to print information about devices */ +static int mgsl_proc_show(struct seq_file *m, void *v) +{ + struct mgsl_struct *info; + + seq_printf(m, "synclink driver:%s\n", driver_version); + + info = mgsl_device_list; + while( info ) { + line_info(m, info); + info = info->next_device; + } + return 0; +} + +static int mgsl_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, mgsl_proc_show, NULL); +} + +static const struct file_operations mgsl_proc_fops = { + .owner = THIS_MODULE, + .open = mgsl_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* mgsl_allocate_dma_buffers() + * + * Allocate and format DMA buffers (ISA adapter) + * or format shared memory buffers (PCI adapter). + * + * Arguments: info pointer to device instance data + * Return Value: 0 if success, otherwise error + */ +static int mgsl_allocate_dma_buffers(struct mgsl_struct *info) +{ + unsigned short BuffersPerFrame; + + info->last_mem_alloc = 0; + + /* Calculate the number of DMA buffers necessary to hold the */ + /* largest allowable frame size. Note: If the max frame size is */ + /* not an even multiple of the DMA buffer size then we need to */ + /* round the buffer count per frame up one. */ + + BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE); + if ( info->max_frame_size % DMABUFFERSIZE ) + BuffersPerFrame++; + + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { + /* + * The PCI adapter has 256KBytes of shared memory to use. + * This is 64 PAGE_SIZE buffers. + * + * The first page is used for padding at this time so the + * buffer list does not begin at offset 0 of the PCI + * adapter's shared memory. + * + * The 2nd page is used for the buffer list. A 4K buffer + * list can hold 128 DMA_BUFFER structures at 32 bytes + * each. + * + * This leaves 62 4K pages. + * + * The next N pages are used for transmit frame(s). We + * reserve enough 4K page blocks to hold the required + * number of transmit dma buffers (num_tx_dma_buffers), + * each of MaxFrameSize size. + * + * Of the remaining pages (62-N), determine how many can + * be used to receive full MaxFrameSize inbound frames + */ + info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame; + info->rx_buffer_count = 62 - info->tx_buffer_count; + } else { + /* Calculate the number of PAGE_SIZE buffers needed for */ + /* receive and transmit DMA buffers. */ + + + /* Calculate the number of DMA buffers necessary to */ + /* hold 7 max size receive frames and one max size transmit frame. */ + /* The receive buffer count is bumped by one so we avoid an */ + /* End of List condition if all receive buffers are used when */ + /* using linked list DMA buffers. */ + + info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame; + info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6; + + /* + * limit total TxBuffers & RxBuffers to 62 4K total + * (ala PCI Allocation) + */ + + if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 ) + info->rx_buffer_count = 62 - info->tx_buffer_count; + + } + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n", + __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count); + + if ( mgsl_alloc_buffer_list_memory( info ) < 0 || + mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 || + mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 || + mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 || + mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) { + printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__); + return -ENOMEM; + } + + mgsl_reset_rx_dma_buffers( info ); + mgsl_reset_tx_dma_buffers( info ); + + return 0; + +} /* end of mgsl_allocate_dma_buffers() */ + +/* + * mgsl_alloc_buffer_list_memory() + * + * Allocate a common DMA buffer for use as the + * receive and transmit buffer lists. + * + * A buffer list is a set of buffer entries where each entry contains + * a pointer to an actual buffer and a pointer to the next buffer entry + * (plus some other info about the buffer). + * + * The buffer entries for a list are built to form a circular list so + * that when the entire list has been traversed you start back at the + * beginning. + * + * This function allocates memory for just the buffer entries. + * The links (pointer to next entry) are filled in with the physical + * address of the next entry so the adapter can navigate the list + * using bus master DMA. The pointers to the actual buffers are filled + * out later when the actual buffers are allocated. + * + * Arguments: info pointer to device instance data + * Return Value: 0 if success, otherwise error + */ +static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info ) +{ + unsigned int i; + + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { + /* PCI adapter uses shared memory. */ + info->buffer_list = info->memory_base + info->last_mem_alloc; + info->buffer_list_phys = info->last_mem_alloc; + info->last_mem_alloc += BUFFERLISTSIZE; + } else { + /* ISA adapter uses system memory. */ + /* The buffer lists are allocated as a common buffer that both */ + /* the processor and adapter can access. This allows the driver to */ + /* inspect portions of the buffer while other portions are being */ + /* updated by the adapter using Bus Master DMA. */ + + info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL); + if (info->buffer_list == NULL) + return -ENOMEM; + info->buffer_list_phys = (u32)(info->buffer_list_dma_addr); + } + + /* We got the memory for the buffer entry lists. */ + /* Initialize the memory block to all zeros. */ + memset( info->buffer_list, 0, BUFFERLISTSIZE ); + + /* Save virtual address pointers to the receive and */ + /* transmit buffer lists. (Receive 1st). These pointers will */ + /* be used by the processor to access the lists. */ + info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; + info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; + info->tx_buffer_list += info->rx_buffer_count; + + /* + * Build the links for the buffer entry lists such that + * two circular lists are built. (Transmit and Receive). + * + * Note: the links are physical addresses + * which are read by the adapter to determine the next + * buffer entry to use. + */ + + for ( i = 0; i < info->rx_buffer_count; i++ ) { + /* calculate and store physical address of this buffer entry */ + info->rx_buffer_list[i].phys_entry = + info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY)); + + /* calculate and store physical address of */ + /* next entry in cirular list of entries */ + + info->rx_buffer_list[i].link = info->buffer_list_phys; + + if ( i < info->rx_buffer_count - 1 ) + info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); + } + + for ( i = 0; i < info->tx_buffer_count; i++ ) { + /* calculate and store physical address of this buffer entry */ + info->tx_buffer_list[i].phys_entry = info->buffer_list_phys + + ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY)); + + /* calculate and store physical address of */ + /* next entry in cirular list of entries */ + + info->tx_buffer_list[i].link = info->buffer_list_phys + + info->rx_buffer_count * sizeof(DMABUFFERENTRY); + + if ( i < info->tx_buffer_count - 1 ) + info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); + } + + return 0; + +} /* end of mgsl_alloc_buffer_list_memory() */ + +/* Free DMA buffers allocated for use as the + * receive and transmit buffer lists. + * Warning: + * + * The data transfer buffers associated with the buffer list + * MUST be freed before freeing the buffer list itself because + * the buffer list contains the information necessary to free + * the individual buffers! + */ +static void mgsl_free_buffer_list_memory( struct mgsl_struct *info ) +{ + if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI) + dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr); + + info->buffer_list = NULL; + info->rx_buffer_list = NULL; + info->tx_buffer_list = NULL; + +} /* end of mgsl_free_buffer_list_memory() */ + +/* + * mgsl_alloc_frame_memory() + * + * Allocate the frame DMA buffers used by the specified buffer list. + * Each DMA buffer will be one memory page in size. This is necessary + * because memory can fragment enough that it may be impossible + * contiguous pages. + * + * Arguments: + * + * info pointer to device instance data + * BufferList pointer to list of buffer entries + * Buffercount count of buffer entries in buffer list + * + * Return Value: 0 if success, otherwise -ENOMEM + */ +static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount) +{ + int i; + u32 phys_addr; + + /* Allocate page sized buffers for the receive buffer list */ + + for ( i = 0; i < Buffercount; i++ ) { + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { + /* PCI adapter uses shared memory buffers. */ + BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc; + phys_addr = info->last_mem_alloc; + info->last_mem_alloc += DMABUFFERSIZE; + } else { + /* ISA adapter uses system memory. */ + BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL); + if (BufferList[i].virt_addr == NULL) + return -ENOMEM; + phys_addr = (u32)(BufferList[i].dma_addr); + } + BufferList[i].phys_addr = phys_addr; + } + + return 0; + +} /* end of mgsl_alloc_frame_memory() */ + +/* + * mgsl_free_frame_memory() + * + * Free the buffers associated with + * each buffer entry of a buffer list. + * + * Arguments: + * + * info pointer to device instance data + * BufferList pointer to list of buffer entries + * Buffercount count of buffer entries in buffer list + * + * Return Value: None + */ +static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount) +{ + int i; + + if ( BufferList ) { + for ( i = 0 ; i < Buffercount ; i++ ) { + if ( BufferList[i].virt_addr ) { + if ( info->bus_type != MGSL_BUS_TYPE_PCI ) + dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr); + BufferList[i].virt_addr = NULL; + } + } + } + +} /* end of mgsl_free_frame_memory() */ + +/* mgsl_free_dma_buffers() + * + * Free DMA buffers + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void mgsl_free_dma_buffers( struct mgsl_struct *info ) +{ + mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count ); + mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count ); + mgsl_free_buffer_list_memory( info ); + +} /* end of mgsl_free_dma_buffers() */ + + +/* + * mgsl_alloc_intermediate_rxbuffer_memory() + * + * Allocate a buffer large enough to hold max_frame_size. This buffer + * is used to pass an assembled frame to the line discipline. + * + * Arguments: + * + * info pointer to device instance data + * + * Return Value: 0 if success, otherwise -ENOMEM + */ +static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info) +{ + info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA); + if ( info->intermediate_rxbuffer == NULL ) + return -ENOMEM; + + return 0; + +} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */ + +/* + * mgsl_free_intermediate_rxbuffer_memory() + * + * + * Arguments: + * + * info pointer to device instance data + * + * Return Value: None + */ +static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info) +{ + kfree(info->intermediate_rxbuffer); + info->intermediate_rxbuffer = NULL; + +} /* end of mgsl_free_intermediate_rxbuffer_memory() */ + +/* + * mgsl_alloc_intermediate_txbuffer_memory() + * + * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size. + * This buffer is used to load transmit frames into the adapter's dma transfer + * buffers when there is sufficient space. + * + * Arguments: + * + * info pointer to device instance data + * + * Return Value: 0 if success, otherwise -ENOMEM + */ +static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info) +{ + int i; + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk("%s %s(%d) allocating %d tx holding buffers\n", + info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers); + + memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers)); + + for ( i=0; i<info->num_tx_holding_buffers; ++i) { + info->tx_holding_buffers[i].buffer = + kmalloc(info->max_frame_size, GFP_KERNEL); + if (info->tx_holding_buffers[i].buffer == NULL) { + for (--i; i >= 0; i--) { + kfree(info->tx_holding_buffers[i].buffer); + info->tx_holding_buffers[i].buffer = NULL; + } + return -ENOMEM; + } + } + + return 0; + +} /* end of mgsl_alloc_intermediate_txbuffer_memory() */ + +/* + * mgsl_free_intermediate_txbuffer_memory() + * + * + * Arguments: + * + * info pointer to device instance data + * + * Return Value: None + */ +static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info) +{ + int i; + + for ( i=0; i<info->num_tx_holding_buffers; ++i ) { + kfree(info->tx_holding_buffers[i].buffer); + info->tx_holding_buffers[i].buffer = NULL; + } + + info->get_tx_holding_index = 0; + info->put_tx_holding_index = 0; + info->tx_holding_count = 0; + +} /* end of mgsl_free_intermediate_txbuffer_memory() */ + + +/* + * load_next_tx_holding_buffer() + * + * attempts to load the next buffered tx request into the + * tx dma buffers + * + * Arguments: + * + * info pointer to device instance data + * + * Return Value: true if next buffered tx request loaded + * into adapter's tx dma buffer, + * false otherwise + */ +static bool load_next_tx_holding_buffer(struct mgsl_struct *info) +{ + bool ret = false; + + if ( info->tx_holding_count ) { + /* determine if we have enough tx dma buffers + * to accommodate the next tx frame + */ + struct tx_holding_buffer *ptx = + &info->tx_holding_buffers[info->get_tx_holding_index]; + int num_free = num_free_tx_dma_buffers(info); + int num_needed = ptx->buffer_size / DMABUFFERSIZE; + if ( ptx->buffer_size % DMABUFFERSIZE ) + ++num_needed; + + if (num_needed <= num_free) { + info->xmit_cnt = ptx->buffer_size; + mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size); + + --info->tx_holding_count; + if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers) + info->get_tx_holding_index=0; + + /* restart transmit timer */ + mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000)); + + ret = true; + } + } + + return ret; +} + +/* + * save_tx_buffer_request() + * + * attempt to store transmit frame request for later transmission + * + * Arguments: + * + * info pointer to device instance data + * Buffer pointer to buffer containing frame to load + * BufferSize size in bytes of frame in Buffer + * + * Return Value: 1 if able to store, 0 otherwise + */ +static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize) +{ + struct tx_holding_buffer *ptx; + + if ( info->tx_holding_count >= info->num_tx_holding_buffers ) { + return 0; /* all buffers in use */ + } + + ptx = &info->tx_holding_buffers[info->put_tx_holding_index]; + ptx->buffer_size = BufferSize; + memcpy( ptx->buffer, Buffer, BufferSize); + + ++info->tx_holding_count; + if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers) + info->put_tx_holding_index=0; + + return 1; +} + +static int mgsl_claim_resources(struct mgsl_struct *info) +{ + if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) { + printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n", + __FILE__,__LINE__,info->device_name, info->io_base); + return -ENODEV; + } + info->io_addr_requested = true; + + if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags, + info->device_name, info ) < 0 ) { + printk( "%s(%d):Cant request interrupt on device %s IRQ=%d\n", + __FILE__,__LINE__,info->device_name, info->irq_level ); + goto errout; + } + info->irq_requested = true; + + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { + if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) { + printk( "%s(%d):mem addr conflict device %s Addr=%08X\n", + __FILE__,__LINE__,info->device_name, info->phys_memory_base); + goto errout; + } + info->shared_mem_requested = true; + if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) { + printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n", + __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset); + goto errout; + } + info->lcr_mem_requested = true; + + info->memory_base = ioremap_nocache(info->phys_memory_base, + 0x40000); + if (!info->memory_base) { + printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n", + __FILE__,__LINE__,info->device_name, info->phys_memory_base ); + goto errout; + } + + if ( !mgsl_memory_test(info) ) { + printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n", + __FILE__,__LINE__,info->device_name, info->phys_memory_base ); + goto errout; + } + + info->lcr_base = ioremap_nocache(info->phys_lcr_base, + PAGE_SIZE); + if (!info->lcr_base) { + printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n", + __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); + goto errout; + } + info->lcr_base += info->lcr_offset; + + } else { + /* claim DMA channel */ + + if (request_dma(info->dma_level,info->device_name) < 0){ + printk( "%s(%d):Cant request DMA channel on device %s DMA=%d\n", + __FILE__,__LINE__,info->device_name, info->dma_level ); + mgsl_release_resources( info ); + return -ENODEV; + } + info->dma_requested = true; + + /* ISA adapter uses bus master DMA */ + set_dma_mode(info->dma_level,DMA_MODE_CASCADE); + enable_dma(info->dma_level); + } + + if ( mgsl_allocate_dma_buffers(info) < 0 ) { + printk( "%s(%d):Cant allocate DMA buffers on device %s DMA=%d\n", + __FILE__,__LINE__,info->device_name, info->dma_level ); + goto errout; + } + + return 0; +errout: + mgsl_release_resources(info); + return -ENODEV; + +} /* end of mgsl_claim_resources() */ + +static void mgsl_release_resources(struct mgsl_struct *info) +{ + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):mgsl_release_resources(%s) entry\n", + __FILE__,__LINE__,info->device_name ); + + if ( info->irq_requested ) { + free_irq(info->irq_level, info); + info->irq_requested = false; + } + if ( info->dma_requested ) { + disable_dma(info->dma_level); + free_dma(info->dma_level); + info->dma_requested = false; + } + mgsl_free_dma_buffers(info); + mgsl_free_intermediate_rxbuffer_memory(info); + mgsl_free_intermediate_txbuffer_memory(info); + + if ( info->io_addr_requested ) { + release_region(info->io_base,info->io_addr_size); + info->io_addr_requested = false; + } + if ( info->shared_mem_requested ) { + release_mem_region(info->phys_memory_base,0x40000); + info->shared_mem_requested = false; + } + if ( info->lcr_mem_requested ) { + release_mem_region(info->phys_lcr_base + info->lcr_offset,128); + info->lcr_mem_requested = false; + } + if (info->memory_base){ + iounmap(info->memory_base); + info->memory_base = NULL; + } + if (info->lcr_base){ + iounmap(info->lcr_base - info->lcr_offset); + info->lcr_base = NULL; + } + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):mgsl_release_resources(%s) exit\n", + __FILE__,__LINE__,info->device_name ); + +} /* end of mgsl_release_resources() */ + +/* mgsl_add_device() + * + * Add the specified device instance data structure to the + * global linked list of devices and increment the device count. + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void mgsl_add_device( struct mgsl_struct *info ) +{ + info->next_device = NULL; + info->line = mgsl_device_count; + sprintf(info->device_name,"ttySL%d",info->line); + + if (info->line < MAX_TOTAL_DEVICES) { + if (maxframe[info->line]) + info->max_frame_size = maxframe[info->line]; + + if (txdmabufs[info->line]) { + info->num_tx_dma_buffers = txdmabufs[info->line]; + if (info->num_tx_dma_buffers < 1) + info->num_tx_dma_buffers = 1; + } + + if (txholdbufs[info->line]) { + info->num_tx_holding_buffers = txholdbufs[info->line]; + if (info->num_tx_holding_buffers < 1) + info->num_tx_holding_buffers = 1; + else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS) + info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS; + } + } + + mgsl_device_count++; + + if ( !mgsl_device_list ) + mgsl_device_list = info; + else { + struct mgsl_struct *current_dev = mgsl_device_list; + while( current_dev->next_device ) + current_dev = current_dev->next_device; + current_dev->next_device = info; + } + + if ( info->max_frame_size < 4096 ) + info->max_frame_size = 4096; + else if ( info->max_frame_size > 65535 ) + info->max_frame_size = 65535; + + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { + printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n", + info->hw_version + 1, info->device_name, info->io_base, info->irq_level, + info->phys_memory_base, info->phys_lcr_base, + info->max_frame_size ); + } else { + printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n", + info->device_name, info->io_base, info->irq_level, info->dma_level, + info->max_frame_size ); + } + +#if SYNCLINK_GENERIC_HDLC + hdlcdev_init(info); +#endif + +} /* end of mgsl_add_device() */ + +static const struct tty_port_operations mgsl_port_ops = { + .carrier_raised = carrier_raised, + .dtr_rts = dtr_rts, +}; + + +/* mgsl_allocate_device() + * + * Allocate and initialize a device instance structure + * + * Arguments: none + * Return Value: pointer to mgsl_struct if success, otherwise NULL + */ +static struct mgsl_struct* mgsl_allocate_device(void) +{ + struct mgsl_struct *info; + + info = kzalloc(sizeof(struct mgsl_struct), + GFP_KERNEL); + + if (!info) { + printk("Error can't allocate device instance data\n"); + } else { + tty_port_init(&info->port); + info->port.ops = &mgsl_port_ops; + info->magic = MGSL_MAGIC; + INIT_WORK(&info->task, mgsl_bh_handler); + info->max_frame_size = 4096; + info->port.close_delay = 5*HZ/10; + info->port.closing_wait = 30*HZ; + init_waitqueue_head(&info->status_event_wait_q); + init_waitqueue_head(&info->event_wait_q); + spin_lock_init(&info->irq_spinlock); + spin_lock_init(&info->netlock); + memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); + info->idle_mode = HDLC_TXIDLE_FLAGS; + info->num_tx_dma_buffers = 1; + info->num_tx_holding_buffers = 0; + } + + return info; + +} /* end of mgsl_allocate_device()*/ + +static const struct tty_operations mgsl_ops = { + .open = mgsl_open, + .close = mgsl_close, + .write = mgsl_write, + .put_char = mgsl_put_char, + .flush_chars = mgsl_flush_chars, + .write_room = mgsl_write_room, + .chars_in_buffer = mgsl_chars_in_buffer, + .flush_buffer = mgsl_flush_buffer, + .ioctl = mgsl_ioctl, + .throttle = mgsl_throttle, + .unthrottle = mgsl_unthrottle, + .send_xchar = mgsl_send_xchar, + .break_ctl = mgsl_break, + .wait_until_sent = mgsl_wait_until_sent, + .set_termios = mgsl_set_termios, + .stop = mgsl_stop, + .start = mgsl_start, + .hangup = mgsl_hangup, + .tiocmget = tiocmget, + .tiocmset = tiocmset, + .get_icount = msgl_get_icount, + .proc_fops = &mgsl_proc_fops, +}; + +/* + * perform tty device initialization + */ +static int mgsl_init_tty(void) +{ + int rc; + + serial_driver = alloc_tty_driver(128); + if (!serial_driver) + return -ENOMEM; + + serial_driver->owner = THIS_MODULE; + serial_driver->driver_name = "synclink"; + serial_driver->name = "ttySL"; + serial_driver->major = ttymajor; + serial_driver->minor_start = 64; + serial_driver->type = TTY_DRIVER_TYPE_SERIAL; + serial_driver->subtype = SERIAL_TYPE_NORMAL; + serial_driver->init_termios = tty_std_termios; + serial_driver->init_termios.c_cflag = + B9600 | CS8 | CREAD | HUPCL | CLOCAL; + serial_driver->init_termios.c_ispeed = 9600; + serial_driver->init_termios.c_ospeed = 9600; + serial_driver->flags = TTY_DRIVER_REAL_RAW; + tty_set_operations(serial_driver, &mgsl_ops); + if ((rc = tty_register_driver(serial_driver)) < 0) { + printk("%s(%d):Couldn't register serial driver\n", + __FILE__,__LINE__); + put_tty_driver(serial_driver); + serial_driver = NULL; + return rc; + } + + printk("%s %s, tty major#%d\n", + driver_name, driver_version, + serial_driver->major); + return 0; +} + +/* enumerate user specified ISA adapters + */ +static void mgsl_enum_isa_devices(void) +{ + struct mgsl_struct *info; + int i; + + /* Check for user specified ISA devices */ + + for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){ + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk("ISA device specified io=%04X,irq=%d,dma=%d\n", + io[i], irq[i], dma[i] ); + + info = mgsl_allocate_device(); + if ( !info ) { + /* error allocating device instance data */ + if ( debug_level >= DEBUG_LEVEL_ERROR ) + printk( "can't allocate device instance data.\n"); + continue; + } + + /* Copy user configuration info to device instance data */ + info->io_base = (unsigned int)io[i]; + info->irq_level = (unsigned int)irq[i]; + info->irq_level = irq_canonicalize(info->irq_level); + info->dma_level = (unsigned int)dma[i]; + info->bus_type = MGSL_BUS_TYPE_ISA; + info->io_addr_size = 16; + info->irq_flags = 0; + + mgsl_add_device( info ); + } +} + +static void synclink_cleanup(void) +{ + int rc; + struct mgsl_struct *info; + struct mgsl_struct *tmp; + + printk("Unloading %s: %s\n", driver_name, driver_version); + + if (serial_driver) { + if ((rc = tty_unregister_driver(serial_driver))) + printk("%s(%d) failed to unregister tty driver err=%d\n", + __FILE__,__LINE__,rc); + put_tty_driver(serial_driver); + } + + info = mgsl_device_list; + while(info) { +#if SYNCLINK_GENERIC_HDLC + hdlcdev_exit(info); +#endif + mgsl_release_resources(info); + tmp = info; + info = info->next_device; + kfree(tmp); + } + + if (pci_registered) + pci_unregister_driver(&synclink_pci_driver); +} + +static int __init synclink_init(void) +{ + int rc; + + if (break_on_load) { + mgsl_get_text_ptr(); + BREAKPOINT(); + } + + printk("%s %s\n", driver_name, driver_version); + + mgsl_enum_isa_devices(); + if ((rc = pci_register_driver(&synclink_pci_driver)) < 0) + printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc); + else + pci_registered = true; + + if ((rc = mgsl_init_tty()) < 0) + goto error; + + return 0; + +error: + synclink_cleanup(); + return rc; +} + +static void __exit synclink_exit(void) +{ + synclink_cleanup(); +} + +module_init(synclink_init); +module_exit(synclink_exit); + +/* + * usc_RTCmd() + * + * Issue a USC Receive/Transmit command to the + * Channel Command/Address Register (CCAR). + * + * Notes: + * + * The command is encoded in the most significant 5 bits <15..11> + * of the CCAR value. Bits <10..7> of the CCAR must be preserved + * and Bits <6..0> must be written as zeros. + * + * Arguments: + * + * info pointer to device information structure + * Cmd command mask (use symbolic macros) + * + * Return Value: + * + * None + */ +static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ) +{ + /* output command to CCAR in bits <15..11> */ + /* preserve bits <10..7>, bits <6..0> must be zero */ + + outw( Cmd + info->loopback_bits, info->io_base + CCAR ); + + /* Read to flush write to CCAR */ + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) + inw( info->io_base + CCAR ); + +} /* end of usc_RTCmd() */ + +/* + * usc_DmaCmd() + * + * Issue a DMA command to the DMA Command/Address Register (DCAR). + * + * Arguments: + * + * info pointer to device information structure + * Cmd DMA command mask (usc_DmaCmd_XX Macros) + * + * Return Value: + * + * None + */ +static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ) +{ + /* write command mask to DCAR */ + outw( Cmd + info->mbre_bit, info->io_base ); + + /* Read to flush write to DCAR */ + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) + inw( info->io_base ); + +} /* end of usc_DmaCmd() */ + +/* + * usc_OutDmaReg() + * + * Write a 16-bit value to a USC DMA register + * + * Arguments: + * + * info pointer to device info structure + * RegAddr register address (number) for write + * RegValue 16-bit value to write to register + * + * Return Value: + * + * None + * + */ +static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) +{ + /* Note: The DCAR is located at the adapter base address */ + /* Note: must preserve state of BIT8 in DCAR */ + + outw( RegAddr + info->mbre_bit, info->io_base ); + outw( RegValue, info->io_base ); + + /* Read to flush write to DCAR */ + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) + inw( info->io_base ); + +} /* end of usc_OutDmaReg() */ + +/* + * usc_InDmaReg() + * + * Read a 16-bit value from a DMA register + * + * Arguments: + * + * info pointer to device info structure + * RegAddr register address (number) to read from + * + * Return Value: + * + * The 16-bit value read from register + * + */ +static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr ) +{ + /* Note: The DCAR is located at the adapter base address */ + /* Note: must preserve state of BIT8 in DCAR */ + + outw( RegAddr + info->mbre_bit, info->io_base ); + return inw( info->io_base ); + +} /* end of usc_InDmaReg() */ + +/* + * + * usc_OutReg() + * + * Write a 16-bit value to a USC serial channel register + * + * Arguments: + * + * info pointer to device info structure + * RegAddr register address (number) to write to + * RegValue 16-bit value to write to register + * + * Return Value: + * + * None + * + */ +static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) +{ + outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); + outw( RegValue, info->io_base + CCAR ); + + /* Read to flush write to CCAR */ + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) + inw( info->io_base + CCAR ); + +} /* end of usc_OutReg() */ + +/* + * usc_InReg() + * + * Reads a 16-bit value from a USC serial channel register + * + * Arguments: + * + * info pointer to device extension + * RegAddr register address (number) to read from + * + * Return Value: + * + * 16-bit value read from register + */ +static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr ) +{ + outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); + return inw( info->io_base + CCAR ); + +} /* end of usc_InReg() */ + +/* usc_set_sdlc_mode() + * + * Set up the adapter for SDLC DMA communications. + * + * Arguments: info pointer to device instance data + * Return Value: NONE + */ +static void usc_set_sdlc_mode( struct mgsl_struct *info ) +{ + u16 RegValue; + bool PreSL1660; + + /* + * determine if the IUSC on the adapter is pre-SL1660. If + * not, take advantage of the UnderWait feature of more + * modern chips. If an underrun occurs and this bit is set, + * the transmitter will idle the programmed idle pattern + * until the driver has time to service the underrun. Otherwise, + * the dma controller may get the cycles previously requested + * and begin transmitting queued tx data. + */ + usc_OutReg(info,TMCR,0x1f); + RegValue=usc_InReg(info,TMDR); + PreSL1660 = (RegValue == IUSC_PRE_SL1660); + + if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) + { + /* + ** Channel Mode Register (CMR) + ** + ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun + ** <13> 0 0 = Transmit Disabled (initially) + ** <12> 0 1 = Consecutive Idles share common 0 + ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop + ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling + ** <3..0> 0110 Receiver Mode = HDLC/SDLC + ** + ** 1000 1110 0000 0110 = 0x8e06 + */ + RegValue = 0x8e06; + + /*-------------------------------------------------- + * ignore user options for UnderRun Actions and + * preambles + *--------------------------------------------------*/ + } + else + { + /* Channel mode Register (CMR) + * + * <15..14> 00 Tx Sub modes, Underrun Action + * <13> 0 1 = Send Preamble before opening flag + * <12> 0 1 = Consecutive Idles share common 0 + * <11..8> 0110 Transmitter mode = HDLC/SDLC + * <7..4> 0000 Rx Sub modes, addr/ctrl field handling + * <3..0> 0110 Receiver mode = HDLC/SDLC + * + * 0000 0110 0000 0110 = 0x0606 + */ + if (info->params.mode == MGSL_MODE_RAW) { + RegValue = 0x0001; /* Set Receive mode = external sync */ + + usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */ + (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12)); + + /* + * TxSubMode: + * CMR <15> 0 Don't send CRC on Tx Underrun + * CMR <14> x undefined + * CMR <13> 0 Send preamble before openning sync + * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength + * + * TxMode: + * CMR <11-8) 0100 MonoSync + * + * 0x00 0100 xxxx xxxx 04xx + */ + RegValue |= 0x0400; + } + else { + + RegValue = 0x0606; + + if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 ) + RegValue |= BIT14; + else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG ) + RegValue |= BIT15; + else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC ) + RegValue |= BIT15 + BIT14; + } + + if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE ) + RegValue |= BIT13; + } + + if ( info->params.mode == MGSL_MODE_HDLC && + (info->params.flags & HDLC_FLAG_SHARE_ZERO) ) + RegValue |= BIT12; + + if ( info->params.addr_filter != 0xff ) + { + /* set up receive address filtering */ + usc_OutReg( info, RSR, info->params.addr_filter ); + RegValue |= BIT4; + } + + usc_OutReg( info, CMR, RegValue ); + info->cmr_value = RegValue; + + /* Receiver mode Register (RMR) + * + * <15..13> 000 encoding + * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) + * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC) + * <9> 0 1 = Include Receive chars in CRC + * <8> 1 1 = Use Abort/PE bit as abort indicator + * <7..6> 00 Even parity + * <5> 0 parity disabled + * <4..2> 000 Receive Char Length = 8 bits + * <1..0> 00 Disable Receiver + * + * 0000 0101 0000 0000 = 0x0500 + */ + + RegValue = 0x0500; + + switch ( info->params.encoding ) { + case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; + case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; + case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break; + case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; + case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break; + case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break; + case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break; + } + + if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) + RegValue |= BIT9; + else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) + RegValue |= ( BIT12 | BIT10 | BIT9 ); + + usc_OutReg( info, RMR, RegValue ); + + /* Set the Receive count Limit Register (RCLR) to 0xffff. */ + /* When an opening flag of an SDLC frame is recognized the */ + /* Receive Character count (RCC) is loaded with the value in */ + /* RCLR. The RCC is decremented for each received byte. The */ + /* value of RCC is stored after the closing flag of the frame */ + /* allowing the frame size to be computed. */ + + usc_OutReg( info, RCLR, RCLRVALUE ); + + usc_RCmd( info, RCmd_SelectRicrdma_level ); + + /* Receive Interrupt Control Register (RICR) + * + * <15..8> ? RxFIFO DMA Request Level + * <7> 0 Exited Hunt IA (Interrupt Arm) + * <6> 0 Idle Received IA + * <5> 0 Break/Abort IA + * <4> 0 Rx Bound IA + * <3> 1 Queued status reflects oldest 2 bytes in FIFO + * <2> 0 Abort/PE IA + * <1> 1 Rx Overrun IA + * <0> 0 Select TC0 value for readback + * + * 0000 0000 0000 1000 = 0x000a + */ + + /* Carry over the Exit Hunt and Idle Received bits */ + /* in case they have been armed by usc_ArmEvents. */ + + RegValue = usc_InReg( info, RICR ) & 0xc0; + + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) + usc_OutReg( info, RICR, (u16)(0x030a | RegValue) ); + else + usc_OutReg( info, RICR, (u16)(0x140a | RegValue) ); + + /* Unlatch all Rx status bits and clear Rx status IRQ Pending */ + + usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); + usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); + + /* Transmit mode Register (TMR) + * + * <15..13> 000 encoding + * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) + * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC) + * <9> 0 1 = Tx CRC Enabled + * <8> 0 1 = Append CRC to end of transmit frame + * <7..6> 00 Transmit parity Even + * <5> 0 Transmit parity Disabled + * <4..2> 000 Tx Char Length = 8 bits + * <1..0> 00 Disable Transmitter + * + * 0000 0100 0000 0000 = 0x0400 + */ + + RegValue = 0x0400; + + switch ( info->params.encoding ) { + case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; + case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; + case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break; + case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; + case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break; + case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break; + case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break; + } + + if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) + RegValue |= BIT9 + BIT8; + else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) + RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8); + + usc_OutReg( info, TMR, RegValue ); + + usc_set_txidle( info ); + + + usc_TCmd( info, TCmd_SelectTicrdma_level ); + + /* Transmit Interrupt Control Register (TICR) + * + * <15..8> ? Transmit FIFO DMA Level + * <7> 0 Present IA (Interrupt Arm) + * <6> 0 Idle Sent IA + * <5> 1 Abort Sent IA + * <4> 1 EOF/EOM Sent IA + * <3> 0 CRC Sent IA + * <2> 1 1 = Wait for SW Trigger to Start Frame + * <1> 1 Tx Underrun IA + * <0> 0 TC0 constant on read back + * + * 0000 0000 0011 0110 = 0x0036 + */ + + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) + usc_OutReg( info, TICR, 0x0736 ); + else + usc_OutReg( info, TICR, 0x1436 ); + + usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); + usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); + + /* + ** Transmit Command/Status Register (TCSR) + ** + ** <15..12> 0000 TCmd + ** <11> 0/1 UnderWait + ** <10..08> 000 TxIdle + ** <7> x PreSent + ** <6> x IdleSent + ** <5> x AbortSent + ** <4> x EOF/EOM Sent + ** <3> x CRC Sent + ** <2> x All Sent + ** <1> x TxUnder + ** <0> x TxEmpty + ** + ** 0000 0000 0000 0000 = 0x0000 + */ + info->tcsr_value = 0; + + if ( !PreSL1660 ) + info->tcsr_value |= TCSR_UNDERWAIT; + + usc_OutReg( info, TCSR, info->tcsr_value ); + + /* Clock mode Control Register (CMCR) + * + * <15..14> 00 counter 1 Source = Disabled + * <13..12> 00 counter 0 Source = Disabled + * <11..10> 11 BRG1 Input is TxC Pin + * <9..8> 11 BRG0 Input is TxC Pin + * <7..6> 01 DPLL Input is BRG1 Output + * <5..3> XXX TxCLK comes from Port 0 + * <2..0> XXX RxCLK comes from Port 1 + * + * 0000 1111 0111 0111 = 0x0f77 + */ + + RegValue = 0x0f40; + + if ( info->params.flags & HDLC_FLAG_RXC_DPLL ) + RegValue |= 0x0003; /* RxCLK from DPLL */ + else if ( info->params.flags & HDLC_FLAG_RXC_BRG ) + RegValue |= 0x0004; /* RxCLK from BRG0 */ + else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN) + RegValue |= 0x0006; /* RxCLK from TXC Input */ + else + RegValue |= 0x0007; /* RxCLK from Port1 */ + + if ( info->params.flags & HDLC_FLAG_TXC_DPLL ) + RegValue |= 0x0018; /* TxCLK from DPLL */ + else if ( info->params.flags & HDLC_FLAG_TXC_BRG ) + RegValue |= 0x0020; /* TxCLK from BRG0 */ + else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN) + RegValue |= 0x0038; /* RxCLK from TXC Input */ + else + RegValue |= 0x0030; /* TxCLK from Port0 */ + + usc_OutReg( info, CMCR, RegValue ); + + + /* Hardware Configuration Register (HCR) + * + * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4 + * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div + * <12> 0 CVOK:0=report code violation in biphase + * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4 + * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level + * <7..6> 00 reserved + * <5> 0 BRG1 mode:0=continuous,1=single cycle + * <4> X BRG1 Enable + * <3..2> 00 reserved + * <1> 0 BRG0 mode:0=continuous,1=single cycle + * <0> 0 BRG0 Enable + */ + + RegValue = 0x0000; + + if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) { + u32 XtalSpeed; + u32 DpllDivisor; + u16 Tc; + + /* DPLL is enabled. Use BRG1 to provide continuous reference clock */ + /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */ + + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) + XtalSpeed = 11059200; + else + XtalSpeed = 14745600; + + if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) { + DpllDivisor = 16; + RegValue |= BIT10; + } + else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) { + DpllDivisor = 8; + RegValue |= BIT11; + } + else + DpllDivisor = 32; + + /* Tc = (Xtal/Speed) - 1 */ + /* If twice the remainder of (Xtal/Speed) is greater than Speed */ + /* then rounding up gives a more precise time constant. Instead */ + /* of rounding up and then subtracting 1 we just don't subtract */ + /* the one in this case. */ + + /*-------------------------------------------------- + * ejz: for DPLL mode, application should use the + * same clock speed as the partner system, even + * though clocking is derived from the input RxData. + * In case the user uses a 0 for the clock speed, + * default to 0xffffffff and don't try to divide by + * zero + *--------------------------------------------------*/ + if ( info->params.clock_speed ) + { + Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed); + if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2) + / info->params.clock_speed) ) + Tc--; + } + else + Tc = -1; + + + /* Write 16-bit Time Constant for BRG1 */ + usc_OutReg( info, TC1R, Tc ); + + RegValue |= BIT4; /* enable BRG1 */ + + switch ( info->params.encoding ) { + case HDLC_ENCODING_NRZ: + case HDLC_ENCODING_NRZB: + case HDLC_ENCODING_NRZI_MARK: + case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break; + case HDLC_ENCODING_BIPHASE_MARK: + case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break; + case HDLC_ENCODING_BIPHASE_LEVEL: + case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break; + } + } + + usc_OutReg( info, HCR, RegValue ); + + + /* Channel Control/status Register (CCSR) + * + * <15> X RCC FIFO Overflow status (RO) + * <14> X RCC FIFO Not Empty status (RO) + * <13> 0 1 = Clear RCC FIFO (WO) + * <12> X DPLL Sync (RW) + * <11> X DPLL 2 Missed Clocks status (RO) + * <10> X DPLL 1 Missed Clock status (RO) + * <9..8> 00 DPLL Resync on rising and falling edges (RW) + * <7> X SDLC Loop On status (RO) + * <6> X SDLC Loop Send status (RO) + * <5> 1 Bypass counters for TxClk and RxClk (RW) + * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) + * <1..0> 00 reserved + * + * 0000 0000 0010 0000 = 0x0020 + */ + + usc_OutReg( info, CCSR, 0x1020 ); + + + if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) { + usc_OutReg( info, SICR, + (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) ); + } + + + /* enable Master Interrupt Enable bit (MIE) */ + usc_EnableMasterIrqBit( info ); + + usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA + + TRANSMIT_STATUS + TRANSMIT_DATA + MISC); + + /* arm RCC underflow interrupt */ + usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3)); + usc_EnableInterrupts(info, MISC); + + info->mbre_bit = 0; + outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ + usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ + info->mbre_bit = BIT8; + outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */ + + if (info->bus_type == MGSL_BUS_TYPE_ISA) { + /* Enable DMAEN (Port 7, Bit 14) */ + /* This connects the DMA request signal to the ISA bus */ + usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14)); + } + + /* DMA Control Register (DCR) + * + * <15..14> 10 Priority mode = Alternating Tx/Rx + * 01 Rx has priority + * 00 Tx has priority + * + * <13> 1 Enable Priority Preempt per DCR<15..14> + * (WARNING DCR<11..10> must be 00 when this is 1) + * 0 Choose activate channel per DCR<11..10> + * + * <12> 0 Little Endian for Array/List + * <11..10> 00 Both Channels can use each bus grant + * <9..6> 0000 reserved + * <5> 0 7 CLK - Minimum Bus Re-request Interval + * <4> 0 1 = drive D/C and S/D pins + * <3> 1 1 = Add one wait state to all DMA cycles. + * <2> 0 1 = Strobe /UAS on every transfer. + * <1..0> 11 Addr incrementing only affects LS24 bits + * + * 0110 0000 0000 1011 = 0x600b + */ + + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { + /* PCI adapter does not need DMA wait state */ + usc_OutDmaReg( info, DCR, 0xa00b ); + } + else + usc_OutDmaReg( info, DCR, 0x800b ); + + + /* Receive DMA mode Register (RDMR) + * + * <15..14> 11 DMA mode = Linked List Buffer mode + * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry + * <12> 1 Clear count of List Entry after fetching + * <11..10> 00 Address mode = Increment + * <9> 1 Terminate Buffer on RxBound + * <8> 0 Bus Width = 16bits + * <7..0> ? status Bits (write as 0s) + * + * 1111 0010 0000 0000 = 0xf200 + */ + + usc_OutDmaReg( info, RDMR, 0xf200 ); + + + /* Transmit DMA mode Register (TDMR) + * + * <15..14> 11 DMA mode = Linked List Buffer mode + * <13> 1 TCBinA/L = fetch Tx Control Block from List entry + * <12> 1 Clear count of List Entry after fetching + * <11..10> 00 Address mode = Increment + * <9> 1 Terminate Buffer on end of frame + * <8> 0 Bus Width = 16bits + * <7..0> ? status Bits (Read Only so write as 0) + * + * 1111 0010 0000 0000 = 0xf200 + */ + + usc_OutDmaReg( info, TDMR, 0xf200 ); + + + /* DMA Interrupt Control Register (DICR) + * + * <15> 1 DMA Interrupt Enable + * <14> 0 1 = Disable IEO from USC + * <13> 0 1 = Don't provide vector during IntAck + * <12> 1 1 = Include status in Vector + * <10..2> 0 reserved, Must be 0s + * <1> 0 1 = Rx DMA Interrupt Enabled + * <0> 0 1 = Tx DMA Interrupt Enabled + * + * 1001 0000 0000 0000 = 0x9000 + */ + + usc_OutDmaReg( info, DICR, 0x9000 ); + + usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */ + usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */ + usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */ + + /* Channel Control Register (CCR) + * + * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs) + * <13> 0 Trigger Tx on SW Command Disabled + * <12> 0 Flag Preamble Disabled + * <11..10> 00 Preamble Length + * <9..8> 00 Preamble Pattern + * <7..6> 10 Use 32-bit Rx status Blocks (RSBs) + * <5> 0 Trigger Rx on SW Command Disabled + * <4..0> 0 reserved + * + * 1000 0000 1000 0000 = 0x8080 + */ + + RegValue = 0x8080; + + switch ( info->params.preamble_length ) { + case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break; + case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break; + case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break; + } + + switch ( info->params.preamble ) { + case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break; + case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break; + case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break; + case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break; + } + + usc_OutReg( info, CCR, RegValue ); + + + /* + * Burst/Dwell Control Register + * + * <15..8> 0x20 Maximum number of transfers per bus grant + * <7..0> 0x00 Maximum number of clock cycles per bus grant + */ + + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { + /* don't limit bus occupancy on PCI adapter */ + usc_OutDmaReg( info, BDCR, 0x0000 ); + } + else + usc_OutDmaReg( info, BDCR, 0x2000 ); + + usc_stop_transmitter(info); + usc_stop_receiver(info); + +} /* end of usc_set_sdlc_mode() */ + +/* usc_enable_loopback() + * + * Set the 16C32 for internal loopback mode. + * The TxCLK and RxCLK signals are generated from the BRG0 and + * the TxD is looped back to the RxD internally. + * + * Arguments: info pointer to device instance data + * enable 1 = enable loopback, 0 = disable + * Return Value: None + */ +static void usc_enable_loopback(struct mgsl_struct *info, int enable) +{ + if (enable) { + /* blank external TXD output */ + usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6)); + + /* Clock mode Control Register (CMCR) + * + * <15..14> 00 counter 1 Disabled + * <13..12> 00 counter 0 Disabled + * <11..10> 11 BRG1 Input is TxC Pin + * <9..8> 11 BRG0 Input is TxC Pin + * <7..6> 01 DPLL Input is BRG1 Output + * <5..3> 100 TxCLK comes from BRG0 + * <2..0> 100 RxCLK comes from BRG0 + * + * 0000 1111 0110 0100 = 0x0f64 + */ + + usc_OutReg( info, CMCR, 0x0f64 ); + + /* Write 16-bit Time Constant for BRG0 */ + /* use clock speed if available, otherwise use 8 for diagnostics */ + if (info->params.clock_speed) { + if (info->bus_type == MGSL_BUS_TYPE_PCI) + usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1)); + else + usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1)); + } else + usc_OutReg(info, TC0R, (u16)8); + + /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0 + mode = Continuous Set Bit 0 to enable BRG0. */ + usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); + + /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ + usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004)); + + /* set Internal Data loopback mode */ + info->loopback_bits = 0x300; + outw( 0x0300, info->io_base + CCAR ); + } else { + /* enable external TXD output */ + usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6)); + + /* clear Internal Data loopback mode */ + info->loopback_bits = 0; + outw( 0,info->io_base + CCAR ); + } + +} /* end of usc_enable_loopback() */ + +/* usc_enable_aux_clock() + * + * Enabled the AUX clock output at the specified frequency. + * + * Arguments: + * + * info pointer to device extension + * data_rate data rate of clock in bits per second + * A data rate of 0 disables the AUX clock. + * + * Return Value: None + */ +static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate ) +{ + u32 XtalSpeed; + u16 Tc; + + if ( data_rate ) { + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) + XtalSpeed = 11059200; + else + XtalSpeed = 14745600; + + + /* Tc = (Xtal/Speed) - 1 */ + /* If twice the remainder of (Xtal/Speed) is greater than Speed */ + /* then rounding up gives a more precise time constant. Instead */ + /* of rounding up and then subtracting 1 we just don't subtract */ + /* the one in this case. */ + + + Tc = (u16)(XtalSpeed/data_rate); + if ( !(((XtalSpeed % data_rate) * 2) / data_rate) ) + Tc--; + + /* Write 16-bit Time Constant for BRG0 */ + usc_OutReg( info, TC0R, Tc ); + + /* + * Hardware Configuration Register (HCR) + * Clear Bit 1, BRG0 mode = Continuous + * Set Bit 0 to enable BRG0. + */ + + usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); + + /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ + usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); + } else { + /* data rate == 0 so turn off BRG0 */ + usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); + } + +} /* end of usc_enable_aux_clock() */ + +/* + * + * usc_process_rxoverrun_sync() + * + * This function processes a receive overrun by resetting the + * receive DMA buffers and issuing a Purge Rx FIFO command + * to allow the receiver to continue receiving. + * + * Arguments: + * + * info pointer to device extension + * + * Return Value: None + */ +static void usc_process_rxoverrun_sync( struct mgsl_struct *info ) +{ + int start_index; + int end_index; + int frame_start_index; + bool start_of_frame_found = false; + bool end_of_frame_found = false; + bool reprogram_dma = false; + + DMABUFFERENTRY *buffer_list = info->rx_buffer_list; + u32 phys_addr; + + usc_DmaCmd( info, DmaCmd_PauseRxChannel ); + usc_RCmd( info, RCmd_EnterHuntmode ); + usc_RTCmd( info, RTCmd_PurgeRxFifo ); + + /* CurrentRxBuffer points to the 1st buffer of the next */ + /* possibly available receive frame. */ + + frame_start_index = start_index = end_index = info->current_rx_buffer; + + /* Search for an unfinished string of buffers. This means */ + /* that a receive frame started (at least one buffer with */ + /* count set to zero) but there is no terminiting buffer */ + /* (status set to non-zero). */ + + while( !buffer_list[end_index].count ) + { + /* Count field has been reset to zero by 16C32. */ + /* This buffer is currently in use. */ + + if ( !start_of_frame_found ) + { + start_of_frame_found = true; + frame_start_index = end_index; + end_of_frame_found = false; + } + + if ( buffer_list[end_index].status ) + { + /* Status field has been set by 16C32. */ + /* This is the last buffer of a received frame. */ + + /* We want to leave the buffers for this frame intact. */ + /* Move on to next possible frame. */ + + start_of_frame_found = false; + end_of_frame_found = true; + } + + /* advance to next buffer entry in linked list */ + end_index++; + if ( end_index == info->rx_buffer_count ) + end_index = 0; + + if ( start_index == end_index ) + { + /* The entire list has been searched with all Counts == 0 and */ + /* all Status == 0. The receive buffers are */ + /* completely screwed, reset all receive buffers! */ + mgsl_reset_rx_dma_buffers( info ); + frame_start_index = 0; + start_of_frame_found = false; + reprogram_dma = true; + break; + } + } + + if ( start_of_frame_found && !end_of_frame_found ) + { + /* There is an unfinished string of receive DMA buffers */ + /* as a result of the receiver overrun. */ + + /* Reset the buffers for the unfinished frame */ + /* and reprogram the receive DMA controller to start */ + /* at the 1st buffer of unfinished frame. */ + + start_index = frame_start_index; + + do + { + *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE; + + /* Adjust index for wrap around. */ + if ( start_index == info->rx_buffer_count ) + start_index = 0; + + } while( start_index != end_index ); + + reprogram_dma = true; + } + + if ( reprogram_dma ) + { + usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); + usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS); + usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS); + + usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); + + /* This empties the receive FIFO and loads the RCC with RCLR */ + usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); + + /* program 16C32 with physical address of 1st DMA buffer entry */ + phys_addr = info->rx_buffer_list[frame_start_index].phys_entry; + usc_OutDmaReg( info, NRARL, (u16)phys_addr ); + usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); + + usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); + usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); + usc_EnableInterrupts( info, RECEIVE_STATUS ); + + /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ + /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ + + usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 ); + usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); + usc_DmaCmd( info, DmaCmd_InitRxChannel ); + if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) + usc_EnableReceiver(info,ENABLE_AUTO_DCD); + else + usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); + } + else + { + /* This empties the receive FIFO and loads the RCC with RCLR */ + usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); + usc_RTCmd( info, RTCmd_PurgeRxFifo ); + } + +} /* end of usc_process_rxoverrun_sync() */ + +/* usc_stop_receiver() + * + * Disable USC receiver + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void usc_stop_receiver( struct mgsl_struct *info ) +{ + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):usc_stop_receiver(%s)\n", + __FILE__,__LINE__, info->device_name ); + + /* Disable receive DMA channel. */ + /* This also disables receive DMA channel interrupts */ + usc_DmaCmd( info, DmaCmd_ResetRxChannel ); + + usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); + usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); + usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS ); + + usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); + + /* This empties the receive FIFO and loads the RCC with RCLR */ + usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); + usc_RTCmd( info, RTCmd_PurgeRxFifo ); + + info->rx_enabled = false; + info->rx_overflow = false; + info->rx_rcc_underrun = false; + +} /* end of stop_receiver() */ + +/* usc_start_receiver() + * + * Enable the USC receiver + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void usc_start_receiver( struct mgsl_struct *info ) +{ + u32 phys_addr; + + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):usc_start_receiver(%s)\n", + __FILE__,__LINE__, info->device_name ); + + mgsl_reset_rx_dma_buffers( info ); + usc_stop_receiver( info ); + + usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); + usc_RTCmd( info, RTCmd_PurgeRxFifo ); + + if ( info->params.mode == MGSL_MODE_HDLC || + info->params.mode == MGSL_MODE_RAW ) { + /* DMA mode Transfers */ + /* Program the DMA controller. */ + /* Enable the DMA controller end of buffer interrupt. */ + + /* program 16C32 with physical address of 1st DMA buffer entry */ + phys_addr = info->rx_buffer_list[0].phys_entry; + usc_OutDmaReg( info, NRARL, (u16)phys_addr ); + usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); + + usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); + usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); + usc_EnableInterrupts( info, RECEIVE_STATUS ); + + /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ + /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ + + usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 ); + usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); + usc_DmaCmd( info, DmaCmd_InitRxChannel ); + if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) + usc_EnableReceiver(info,ENABLE_AUTO_DCD); + else + usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); + } else { + usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); + usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS); + usc_EnableInterrupts(info, RECEIVE_DATA); + + usc_RTCmd( info, RTCmd_PurgeRxFifo ); + usc_RCmd( info, RCmd_EnterHuntmode ); + + usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); + } + + usc_OutReg( info, CCSR, 0x1020 ); + + info->rx_enabled = true; + +} /* end of usc_start_receiver() */ + +/* usc_start_transmitter() + * + * Enable the USC transmitter and send a transmit frame if + * one is loaded in the DMA buffers. + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void usc_start_transmitter( struct mgsl_struct *info ) +{ + u32 phys_addr; + unsigned int FrameSize; + + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):usc_start_transmitter(%s)\n", + __FILE__,__LINE__, info->device_name ); + + if ( info->xmit_cnt ) { + + /* If auto RTS enabled and RTS is inactive, then assert */ + /* RTS and set a flag indicating that the driver should */ + /* negate RTS when the transmission completes. */ + + info->drop_rts_on_tx_done = false; + + if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) { + usc_get_serial_signals( info ); + if ( !(info->serial_signals & SerialSignal_RTS) ) { + info->serial_signals |= SerialSignal_RTS; + usc_set_serial_signals( info ); + info->drop_rts_on_tx_done = true; + } + } + + + if ( info->params.mode == MGSL_MODE_ASYNC ) { + if ( !info->tx_active ) { + usc_UnlatchTxstatusBits(info, TXSTATUS_ALL); + usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA); + usc_EnableInterrupts(info, TRANSMIT_DATA); + usc_load_txfifo(info); + } + } else { + /* Disable transmit DMA controller while programming. */ + usc_DmaCmd( info, DmaCmd_ResetTxChannel ); + + /* Transmit DMA buffer is loaded, so program USC */ + /* to send the frame contained in the buffers. */ + + FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc; + + /* if operating in Raw sync mode, reset the rcc component + * of the tx dma buffer entry, otherwise, the serial controller + * will send a closing sync char after this count. + */ + if ( info->params.mode == MGSL_MODE_RAW ) + info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0; + + /* Program the Transmit Character Length Register (TCLR) */ + /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ + usc_OutReg( info, TCLR, (u16)FrameSize ); + + usc_RTCmd( info, RTCmd_PurgeTxFifo ); + + /* Program the address of the 1st DMA Buffer Entry in linked list */ + phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry; + usc_OutDmaReg( info, NTARL, (u16)phys_addr ); + usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) ); + + usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); + usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); + usc_EnableInterrupts( info, TRANSMIT_STATUS ); + + if ( info->params.mode == MGSL_MODE_RAW && + info->num_tx_dma_buffers > 1 ) { + /* When running external sync mode, attempt to 'stream' transmit */ + /* by filling tx dma buffers as they become available. To do this */ + /* we need to enable Tx DMA EOB Status interrupts : */ + /* */ + /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */ + /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */ + + usc_OutDmaReg( info, TDIAR, BIT2|BIT3 ); + usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) ); + } + + /* Initialize Transmit DMA Channel */ + usc_DmaCmd( info, DmaCmd_InitTxChannel ); + + usc_TCmd( info, TCmd_SendFrame ); + + mod_timer(&info->tx_timer, jiffies + + msecs_to_jiffies(5000)); + } + info->tx_active = true; + } + + if ( !info->tx_enabled ) { + info->tx_enabled = true; + if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) + usc_EnableTransmitter(info,ENABLE_AUTO_CTS); + else + usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); + } + +} /* end of usc_start_transmitter() */ + +/* usc_stop_transmitter() + * + * Stops the transmitter and DMA + * + * Arguments: info pointer to device isntance data + * Return Value: None + */ +static void usc_stop_transmitter( struct mgsl_struct *info ) +{ + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):usc_stop_transmitter(%s)\n", + __FILE__,__LINE__, info->device_name ); + + del_timer(&info->tx_timer); + + usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); + usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA ); + usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA ); + + usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL); + usc_DmaCmd( info, DmaCmd_ResetTxChannel ); + usc_RTCmd( info, RTCmd_PurgeTxFifo ); + + info->tx_enabled = false; + info->tx_active = false; + +} /* end of usc_stop_transmitter() */ + +/* usc_load_txfifo() + * + * Fill the transmit FIFO until the FIFO is full or + * there is no more data to load. + * + * Arguments: info pointer to device extension (instance data) + * Return Value: None + */ +static void usc_load_txfifo( struct mgsl_struct *info ) +{ + int Fifocount; + u8 TwoBytes[2]; + + if ( !info->xmit_cnt && !info->x_char ) + return; + + /* Select transmit FIFO status readback in TICR */ + usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); + + /* load the Transmit FIFO until FIFOs full or all data sent */ + + while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) { + /* there is more space in the transmit FIFO and */ + /* there is more data in transmit buffer */ + + if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) { + /* write a 16-bit word from transmit buffer to 16C32 */ + + TwoBytes[0] = info->xmit_buf[info->xmit_tail++]; + info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); + TwoBytes[1] = info->xmit_buf[info->xmit_tail++]; + info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); + + outw( *((u16 *)TwoBytes), info->io_base + DATAREG); + + info->xmit_cnt -= 2; + info->icount.tx += 2; + } else { + /* only 1 byte left to transmit or 1 FIFO slot left */ + + outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY), + info->io_base + CCAR ); + + if (info->x_char) { + /* transmit pending high priority char */ + outw( info->x_char,info->io_base + CCAR ); + info->x_char = 0; + } else { + outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR ); + info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); + info->xmit_cnt--; + } + info->icount.tx++; + } + } + +} /* end of usc_load_txfifo() */ + +/* usc_reset() + * + * Reset the adapter to a known state and prepare it for further use. + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void usc_reset( struct mgsl_struct *info ) +{ + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { + int i; + u32 readval; + + /* Set BIT30 of Misc Control Register */ + /* (Local Control Register 0x50) to force reset of USC. */ + + volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50); + u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28); + + info->misc_ctrl_value |= BIT30; + *MiscCtrl = info->misc_ctrl_value; + + /* + * Force at least 170ns delay before clearing + * reset bit. Each read from LCR takes at least + * 30ns so 10 times for 300ns to be safe. + */ + for(i=0;i<10;i++) + readval = *MiscCtrl; + + info->misc_ctrl_value &= ~BIT30; + *MiscCtrl = info->misc_ctrl_value; + + *LCR0BRDR = BUS_DESCRIPTOR( + 1, // Write Strobe Hold (0-3) + 2, // Write Strobe Delay (0-3) + 2, // Read Strobe Delay (0-3) + 0, // NWDD (Write data-data) (0-3) + 4, // NWAD (Write Addr-data) (0-31) + 0, // NXDA (Read/Write Data-Addr) (0-3) + 0, // NRDD (Read Data-Data) (0-3) + 5 // NRAD (Read Addr-Data) (0-31) + ); + } else { + /* do HW reset */ + outb( 0,info->io_base + 8 ); + } + + info->mbre_bit = 0; + info->loopback_bits = 0; + info->usc_idle_mode = 0; + + /* + * Program the Bus Configuration Register (BCR) + * + * <15> 0 Don't use separate address + * <14..6> 0 reserved + * <5..4> 00 IAckmode = Default, don't care + * <3> 1 Bus Request Totem Pole output + * <2> 1 Use 16 Bit data bus + * <1> 0 IRQ Totem Pole output + * <0> 0 Don't Shift Right Addr + * + * 0000 0000 0000 1100 = 0x000c + * + * By writing to io_base + SDPIN the Wait/Ack pin is + * programmed to work as a Wait pin. + */ + + outw( 0x000c,info->io_base + SDPIN ); + + + outw( 0,info->io_base ); + outw( 0,info->io_base + CCAR ); + + /* select little endian byte ordering */ + usc_RTCmd( info, RTCmd_SelectLittleEndian ); + + + /* Port Control Register (PCR) + * + * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled) + * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled) + * <11..10> 00 Port 5 is Input (No Connect, Don't Care) + * <9..8> 00 Port 4 is Input (No Connect, Don't Care) + * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled ) + * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled ) + * <3..2> 01 Port 1 is Input (Dedicated RxC) + * <1..0> 01 Port 0 is Input (Dedicated TxC) + * + * 1111 0000 1111 0101 = 0xf0f5 + */ + + usc_OutReg( info, PCR, 0xf0f5 ); + + + /* + * Input/Output Control Register + * + * <15..14> 00 CTS is active low input + * <13..12> 00 DCD is active low input + * <11..10> 00 TxREQ pin is input (DSR) + * <9..8> 00 RxREQ pin is input (RI) + * <7..6> 00 TxD is output (Transmit Data) + * <5..3> 000 TxC Pin in Input (14.7456MHz Clock) + * <2..0> 100 RxC is Output (drive with BRG0) + * + * 0000 0000 0000 0100 = 0x0004 + */ + + usc_OutReg( info, IOCR, 0x0004 ); + +} /* end of usc_reset() */ + +/* usc_set_async_mode() + * + * Program adapter for asynchronous communications. + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void usc_set_async_mode( struct mgsl_struct *info ) +{ + u16 RegValue; + + /* disable interrupts while programming USC */ + usc_DisableMasterIrqBit( info ); + + outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ + usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ + + usc_loopback_frame( info ); + + /* Channel mode Register (CMR) + * + * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit + * <13..12> 00 00 = 16X Clock + * <11..8> 0000 Transmitter mode = Asynchronous + * <7..6> 00 reserved? + * <5..4> 00 Rx Sub modes, 00 = 16X Clock + * <3..0> 0000 Receiver mode = Asynchronous + * + * 0000 0000 0000 0000 = 0x0 + */ + + RegValue = 0; + if ( info->params.stop_bits != 1 ) + RegValue |= BIT14; + usc_OutReg( info, CMR, RegValue ); + + + /* Receiver mode Register (RMR) + * + * <15..13> 000 encoding = None + * <12..08> 00000 reserved (Sync Only) + * <7..6> 00 Even parity + * <5> 0 parity disabled + * <4..2> 000 Receive Char Length = 8 bits + * <1..0> 00 Disable Receiver + * + * 0000 0000 0000 0000 = 0x0 + */ + + RegValue = 0; + + if ( info->params.data_bits != 8 ) + RegValue |= BIT4+BIT3+BIT2; + + if ( info->params.parity != ASYNC_PARITY_NONE ) { + RegValue |= BIT5; + if ( info->params.parity != ASYNC_PARITY_ODD ) + RegValue |= BIT6; + } + + usc_OutReg( info, RMR, RegValue ); + + + /* Set IRQ trigger level */ + + usc_RCmd( info, RCmd_SelectRicrIntLevel ); + + + /* Receive Interrupt Control Register (RICR) + * + * <15..8> ? RxFIFO IRQ Request Level + * + * Note: For async mode the receive FIFO level must be set + * to 0 to avoid the situation where the FIFO contains fewer bytes + * than the trigger level and no more data is expected. + * + * <7> 0 Exited Hunt IA (Interrupt Arm) + * <6> 0 Idle Received IA + * <5> 0 Break/Abort IA + * <4> 0 Rx Bound IA + * <3> 0 Queued status reflects oldest byte in FIFO + * <2> 0 Abort/PE IA + * <1> 0 Rx Overrun IA + * <0> 0 Select TC0 value for readback + * + * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB) + */ + + usc_OutReg( info, RICR, 0x0000 ); + + usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); + usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); + + + /* Transmit mode Register (TMR) + * + * <15..13> 000 encoding = None + * <12..08> 00000 reserved (Sync Only) + * <7..6> 00 Transmit parity Even + * <5> 0 Transmit parity Disabled + * <4..2> 000 Tx Char Length = 8 bits + * <1..0> 00 Disable Transmitter + * + * 0000 0000 0000 0000 = 0x0 + */ + + RegValue = 0; + + if ( info->params.data_bits != 8 ) + RegValue |= BIT4+BIT3+BIT2; + + if ( info->params.parity != ASYNC_PARITY_NONE ) { + RegValue |= BIT5; + if ( info->params.parity != ASYNC_PARITY_ODD ) + RegValue |= BIT6; + } + + usc_OutReg( info, TMR, RegValue ); + + usc_set_txidle( info ); + + + /* Set IRQ trigger level */ + + usc_TCmd( info, TCmd_SelectTicrIntLevel ); + + + /* Transmit Interrupt Control Register (TICR) + * + * <15..8> ? Transmit FIFO IRQ Level + * <7> 0 Present IA (Interrupt Arm) + * <6> 1 Idle Sent IA + * <5> 0 Abort Sent IA + * <4> 0 EOF/EOM Sent IA + * <3> 0 CRC Sent IA + * <2> 0 1 = Wait for SW Trigger to Start Frame + * <1> 0 Tx Underrun IA + * <0> 0 TC0 constant on read back + * + * 0000 0000 0100 0000 = 0x0040 + */ + + usc_OutReg( info, TICR, 0x1f40 ); + + usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); + usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); + + usc_enable_async_clock( info, info->params.data_rate ); + + + /* Channel Control/status Register (CCSR) + * + * <15> X RCC FIFO Overflow status (RO) + * <14> X RCC FIFO Not Empty status (RO) + * <13> 0 1 = Clear RCC FIFO (WO) + * <12> X DPLL in Sync status (RO) + * <11> X DPLL 2 Missed Clocks status (RO) + * <10> X DPLL 1 Missed Clock status (RO) + * <9..8> 00 DPLL Resync on rising and falling edges (RW) + * <7> X SDLC Loop On status (RO) + * <6> X SDLC Loop Send status (RO) + * <5> 1 Bypass counters for TxClk and RxClk (RW) + * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) + * <1..0> 00 reserved + * + * 0000 0000 0010 0000 = 0x0020 + */ + + usc_OutReg( info, CCSR, 0x0020 ); + + usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA + + RECEIVE_DATA + RECEIVE_STATUS ); + + usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA + + RECEIVE_DATA + RECEIVE_STATUS ); + + usc_EnableMasterIrqBit( info ); + + if (info->bus_type == MGSL_BUS_TYPE_ISA) { + /* Enable INTEN (Port 6, Bit12) */ + /* This connects the IRQ request signal to the ISA bus */ + usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); + } + + if (info->params.loopback) { + info->loopback_bits = 0x300; + outw(0x0300, info->io_base + CCAR); + } + +} /* end of usc_set_async_mode() */ + +/* usc_loopback_frame() + * + * Loop back a small (2 byte) dummy SDLC frame. + * Interrupts and DMA are NOT used. The purpose of this is to + * clear any 'stale' status info left over from running in async mode. + * + * The 16C32 shows the strange behaviour of marking the 1st + * received SDLC frame with a CRC error even when there is no + * CRC error. To get around this a small dummy from of 2 bytes + * is looped back when switching from async to sync mode. + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void usc_loopback_frame( struct mgsl_struct *info ) +{ + int i; + unsigned long oldmode = info->params.mode; + + info->params.mode = MGSL_MODE_HDLC; + + usc_DisableMasterIrqBit( info ); + + usc_set_sdlc_mode( info ); + usc_enable_loopback( info, 1 ); + + /* Write 16-bit Time Constant for BRG0 */ + usc_OutReg( info, TC0R, 0 ); + + /* Channel Control Register (CCR) + * + * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs) + * <13> 0 Trigger Tx on SW Command Disabled + * <12> 0 Flag Preamble Disabled + * <11..10> 00 Preamble Length = 8-Bits + * <9..8> 01 Preamble Pattern = flags + * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs) + * <5> 0 Trigger Rx on SW Command Disabled + * <4..0> 0 reserved + * + * 0000 0001 0000 0000 = 0x0100 + */ + + usc_OutReg( info, CCR, 0x0100 ); + + /* SETUP RECEIVER */ + usc_RTCmd( info, RTCmd_PurgeRxFifo ); + usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); + + /* SETUP TRANSMITTER */ + /* Program the Transmit Character Length Register (TCLR) */ + /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ + usc_OutReg( info, TCLR, 2 ); + usc_RTCmd( info, RTCmd_PurgeTxFifo ); + + /* unlatch Tx status bits, and start transmit channel. */ + usc_UnlatchTxstatusBits(info,TXSTATUS_ALL); + outw(0,info->io_base + DATAREG); + + /* ENABLE TRANSMITTER */ + usc_TCmd( info, TCmd_SendFrame ); + usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); + + /* WAIT FOR RECEIVE COMPLETE */ + for (i=0 ; i<1000 ; i++) + if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1)) + break; + + /* clear Internal Data loopback mode */ + usc_enable_loopback(info, 0); + + usc_EnableMasterIrqBit(info); + + info->params.mode = oldmode; + +} /* end of usc_loopback_frame() */ + +/* usc_set_sync_mode() Programs the USC for SDLC communications. + * + * Arguments: info pointer to adapter info structure + * Return Value: None + */ +static void usc_set_sync_mode( struct mgsl_struct *info ) +{ + usc_loopback_frame( info ); + usc_set_sdlc_mode( info ); + + if (info->bus_type == MGSL_BUS_TYPE_ISA) { + /* Enable INTEN (Port 6, Bit12) */ + /* This connects the IRQ request signal to the ISA bus */ + usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); + } + + usc_enable_aux_clock(info, info->params.clock_speed); + + if (info->params.loopback) + usc_enable_loopback(info,1); + +} /* end of mgsl_set_sync_mode() */ + +/* usc_set_txidle() Set the HDLC idle mode for the transmitter. + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void usc_set_txidle( struct mgsl_struct *info ) +{ + u16 usc_idle_mode = IDLEMODE_FLAGS; + + /* Map API idle mode to USC register bits */ + + switch( info->idle_mode ){ + case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break; + case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break; + case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break; + case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break; + case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break; + case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break; + case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break; + } + + info->usc_idle_mode = usc_idle_mode; + //usc_OutReg(info, TCSR, usc_idle_mode); + info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */ + info->tcsr_value += usc_idle_mode; + usc_OutReg(info, TCSR, info->tcsr_value); + + /* + * if SyncLink WAN adapter is running in external sync mode, the + * transmitter has been set to Monosync in order to try to mimic + * a true raw outbound bit stream. Monosync still sends an open/close + * sync char at the start/end of a frame. Try to match those sync + * patterns to the idle mode set here + */ + if ( info->params.mode == MGSL_MODE_RAW ) { + unsigned char syncpat = 0; + switch( info->idle_mode ) { + case HDLC_TXIDLE_FLAGS: + syncpat = 0x7e; + break; + case HDLC_TXIDLE_ALT_ZEROS_ONES: + syncpat = 0x55; + break; + case HDLC_TXIDLE_ZEROS: + case HDLC_TXIDLE_SPACE: + syncpat = 0x00; + break; + case HDLC_TXIDLE_ONES: + case HDLC_TXIDLE_MARK: + syncpat = 0xff; + break; + case HDLC_TXIDLE_ALT_MARK_SPACE: + syncpat = 0xaa; + break; + } + + usc_SetTransmitSyncChars(info,syncpat,syncpat); + } + +} /* end of usc_set_txidle() */ + +/* usc_get_serial_signals() + * + * Query the adapter for the state of the V24 status (input) signals. + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void usc_get_serial_signals( struct mgsl_struct *info ) +{ + u16 status; + + /* clear all serial signals except DTR and RTS */ + info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS; + + /* Read the Misc Interrupt status Register (MISR) to get */ + /* the V24 status signals. */ + + status = usc_InReg( info, MISR ); + + /* set serial signal bits to reflect MISR */ + + if ( status & MISCSTATUS_CTS ) + info->serial_signals |= SerialSignal_CTS; + + if ( status & MISCSTATUS_DCD ) + info->serial_signals |= SerialSignal_DCD; + + if ( status & MISCSTATUS_RI ) + info->serial_signals |= SerialSignal_RI; + + if ( status & MISCSTATUS_DSR ) + info->serial_signals |= SerialSignal_DSR; + +} /* end of usc_get_serial_signals() */ + +/* usc_set_serial_signals() + * + * Set the state of DTR and RTS based on contents of + * serial_signals member of device extension. + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void usc_set_serial_signals( struct mgsl_struct *info ) +{ + u16 Control; + unsigned char V24Out = info->serial_signals; + + /* get the current value of the Port Control Register (PCR) */ + + Control = usc_InReg( info, PCR ); + + if ( V24Out & SerialSignal_RTS ) + Control &= ~(BIT6); + else + Control |= BIT6; + + if ( V24Out & SerialSignal_DTR ) + Control &= ~(BIT4); + else + Control |= BIT4; + + usc_OutReg( info, PCR, Control ); + +} /* end of usc_set_serial_signals() */ + +/* usc_enable_async_clock() + * + * Enable the async clock at the specified frequency. + * + * Arguments: info pointer to device instance data + * data_rate data rate of clock in bps + * 0 disables the AUX clock. + * Return Value: None + */ +static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate ) +{ + if ( data_rate ) { + /* + * Clock mode Control Register (CMCR) + * + * <15..14> 00 counter 1 Disabled + * <13..12> 00 counter 0 Disabled + * <11..10> 11 BRG1 Input is TxC Pin + * <9..8> 11 BRG0 Input is TxC Pin + * <7..6> 01 DPLL Input is BRG1 Output + * <5..3> 100 TxCLK comes from BRG0 + * <2..0> 100 RxCLK comes from BRG0 + * + * 0000 1111 0110 0100 = 0x0f64 + */ + + usc_OutReg( info, CMCR, 0x0f64 ); + + + /* + * Write 16-bit Time Constant for BRG0 + * Time Constant = (ClkSpeed / data_rate) - 1 + * ClkSpeed = 921600 (ISA), 691200 (PCI) + */ + + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) + usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) ); + else + usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) ); + + + /* + * Hardware Configuration Register (HCR) + * Clear Bit 1, BRG0 mode = Continuous + * Set Bit 0 to enable BRG0. + */ + + usc_OutReg( info, HCR, + (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); + + + /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ + + usc_OutReg( info, IOCR, + (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); + } else { + /* data rate == 0 so turn off BRG0 */ + usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); + } + +} /* end of usc_enable_async_clock() */ + +/* + * Buffer Structures: + * + * Normal memory access uses virtual addresses that can make discontiguous + * physical memory pages appear to be contiguous in the virtual address + * space (the processors memory mapping handles the conversions). + * + * DMA transfers require physically contiguous memory. This is because + * the DMA system controller and DMA bus masters deal with memory using + * only physical addresses. + * + * This causes a problem under Windows NT when large DMA buffers are + * needed. Fragmentation of the nonpaged pool prevents allocations of + * physically contiguous buffers larger than the PAGE_SIZE. + * + * However the 16C32 supports Bus Master Scatter/Gather DMA which + * allows DMA transfers to physically discontiguous buffers. Information + * about each data transfer buffer is contained in a memory structure + * called a 'buffer entry'. A list of buffer entries is maintained + * to track and control the use of the data transfer buffers. + * + * To support this strategy we will allocate sufficient PAGE_SIZE + * contiguous memory buffers to allow for the total required buffer + * space. + * + * The 16C32 accesses the list of buffer entries using Bus Master + * DMA. Control information is read from the buffer entries by the + * 16C32 to control data transfers. status information is written to + * the buffer entries by the 16C32 to indicate the status of completed + * transfers. + * + * The CPU writes control information to the buffer entries to control + * the 16C32 and reads status information from the buffer entries to + * determine information about received and transmitted frames. + * + * Because the CPU and 16C32 (adapter) both need simultaneous access + * to the buffer entries, the buffer entry memory is allocated with + * HalAllocateCommonBuffer(). This restricts the size of the buffer + * entry list to PAGE_SIZE. + * + * The actual data buffers on the other hand will only be accessed + * by the CPU or the adapter but not by both simultaneously. This allows + * Scatter/Gather packet based DMA procedures for using physically + * discontiguous pages. + */ + +/* + * mgsl_reset_tx_dma_buffers() + * + * Set the count for all transmit buffers to 0 to indicate the + * buffer is available for use and set the current buffer to the + * first buffer. This effectively makes all buffers free and + * discards any data in buffers. + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ) +{ + unsigned int i; + + for ( i = 0; i < info->tx_buffer_count; i++ ) { + *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0; + } + + info->current_tx_buffer = 0; + info->start_tx_dma_buffer = 0; + info->tx_dma_buffers_used = 0; + + info->get_tx_holding_index = 0; + info->put_tx_holding_index = 0; + info->tx_holding_count = 0; + +} /* end of mgsl_reset_tx_dma_buffers() */ + +/* + * num_free_tx_dma_buffers() + * + * returns the number of free tx dma buffers available + * + * Arguments: info pointer to device instance data + * Return Value: number of free tx dma buffers + */ +static int num_free_tx_dma_buffers(struct mgsl_struct *info) +{ + return info->tx_buffer_count - info->tx_dma_buffers_used; +} + +/* + * mgsl_reset_rx_dma_buffers() + * + * Set the count for all receive buffers to DMABUFFERSIZE + * and set the current buffer to the first buffer. This effectively + * makes all buffers free and discards any data in buffers. + * + * Arguments: info pointer to device instance data + * Return Value: None + */ +static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ) +{ + unsigned int i; + + for ( i = 0; i < info->rx_buffer_count; i++ ) { + *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE; +// info->rx_buffer_list[i].count = DMABUFFERSIZE; +// info->rx_buffer_list[i].status = 0; + } + + info->current_rx_buffer = 0; + +} /* end of mgsl_reset_rx_dma_buffers() */ + +/* + * mgsl_free_rx_frame_buffers() + * + * Free the receive buffers used by a received SDLC + * frame such that the buffers can be reused. + * + * Arguments: + * + * info pointer to device instance data + * StartIndex index of 1st receive buffer of frame + * EndIndex index of last receive buffer of frame + * + * Return Value: None + */ +static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ) +{ + bool Done = false; + DMABUFFERENTRY *pBufEntry; + unsigned int Index; + + /* Starting with 1st buffer entry of the frame clear the status */ + /* field and set the count field to DMA Buffer Size. */ + + Index = StartIndex; + + while( !Done ) { + pBufEntry = &(info->rx_buffer_list[Index]); + + if ( Index == EndIndex ) { + /* This is the last buffer of the frame! */ + Done = true; + } + + /* reset current buffer for reuse */ +// pBufEntry->status = 0; +// pBufEntry->count = DMABUFFERSIZE; + *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE; + + /* advance to next buffer entry in linked list */ + Index++; + if ( Index == info->rx_buffer_count ) + Index = 0; + } + + /* set current buffer to next buffer after last buffer of frame */ + info->current_rx_buffer = Index; + +} /* end of free_rx_frame_buffers() */ + +/* mgsl_get_rx_frame() + * + * This function attempts to return a received SDLC frame from the + * receive DMA buffers. Only frames received without errors are returned. + * + * Arguments: info pointer to device extension + * Return Value: true if frame returned, otherwise false + */ +static bool mgsl_get_rx_frame(struct mgsl_struct *info) +{ + unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */ + unsigned short status; + DMABUFFERENTRY *pBufEntry; + unsigned int framesize = 0; + bool ReturnCode = false; + unsigned long flags; + struct tty_struct *tty = info->port.tty; + bool return_frame = false; + + /* + * current_rx_buffer points to the 1st buffer of the next available + * receive frame. To find the last buffer of the frame look for + * a non-zero status field in the buffer entries. (The status + * field is set by the 16C32 after completing a receive frame. + */ + + StartIndex = EndIndex = info->current_rx_buffer; + + while( !info->rx_buffer_list[EndIndex].status ) { + /* + * If the count field of the buffer entry is non-zero then + * this buffer has not been used. (The 16C32 clears the count + * field when it starts using the buffer.) If an unused buffer + * is encountered then there are no frames available. + */ + + if ( info->rx_buffer_list[EndIndex].count ) + goto Cleanup; + + /* advance to next buffer entry in linked list */ + EndIndex++; + if ( EndIndex == info->rx_buffer_count ) + EndIndex = 0; + + /* if entire list searched then no frame available */ + if ( EndIndex == StartIndex ) { + /* If this occurs then something bad happened, + * all buffers have been 'used' but none mark + * the end of a frame. Reset buffers and receiver. + */ + + if ( info->rx_enabled ){ + spin_lock_irqsave(&info->irq_spinlock,flags); + usc_start_receiver(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + } + goto Cleanup; + } + } + + + /* check status of receive frame */ + + status = info->rx_buffer_list[EndIndex].status; + + if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN + + RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) { + if ( status & RXSTATUS_SHORT_FRAME ) + info->icount.rxshort++; + else if ( status & RXSTATUS_ABORT ) + info->icount.rxabort++; + else if ( status & RXSTATUS_OVERRUN ) + info->icount.rxover++; + else { + info->icount.rxcrc++; + if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) + return_frame = true; + } + framesize = 0; +#if SYNCLINK_GENERIC_HDLC + { + info->netdev->stats.rx_errors++; + info->netdev->stats.rx_frame_errors++; + } +#endif + } else + return_frame = true; + + if ( return_frame ) { + /* receive frame has no errors, get frame size. + * The frame size is the starting value of the RCC (which was + * set to 0xffff) minus the ending value of the RCC (decremented + * once for each receive character) minus 2 for the 16-bit CRC. + */ + + framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc; + + /* adjust frame size for CRC if any */ + if ( info->params.crc_type == HDLC_CRC_16_CCITT ) + framesize -= 2; + else if ( info->params.crc_type == HDLC_CRC_32_CCITT ) + framesize -= 4; + } + + if ( debug_level >= DEBUG_LEVEL_BH ) + printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n", + __FILE__,__LINE__,info->device_name,status,framesize); + + if ( debug_level >= DEBUG_LEVEL_DATA ) + mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr, + min_t(int, framesize, DMABUFFERSIZE),0); + + if (framesize) { + if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) && + ((framesize+1) > info->max_frame_size) ) || + (framesize > info->max_frame_size) ) + info->icount.rxlong++; + else { + /* copy dma buffer(s) to contiguous intermediate buffer */ + int copy_count = framesize; + int index = StartIndex; + unsigned char *ptmp = info->intermediate_rxbuffer; + + if ( !(status & RXSTATUS_CRC_ERROR)) + info->icount.rxok++; + + while(copy_count) { + int partial_count; + if ( copy_count > DMABUFFERSIZE ) + partial_count = DMABUFFERSIZE; + else + partial_count = copy_count; + + pBufEntry = &(info->rx_buffer_list[index]); + memcpy( ptmp, pBufEntry->virt_addr, partial_count ); + ptmp += partial_count; + copy_count -= partial_count; + + if ( ++index == info->rx_buffer_count ) + index = 0; + } + + if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) { + ++framesize; + *ptmp = (status & RXSTATUS_CRC_ERROR ? + RX_CRC_ERROR : + RX_OK); + + if ( debug_level >= DEBUG_LEVEL_DATA ) + printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n", + __FILE__,__LINE__,info->device_name, + *ptmp); + } + +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) + hdlcdev_rx(info,info->intermediate_rxbuffer,framesize); + else +#endif + ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); + } + } + /* Free the buffers used by this frame. */ + mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex ); + + ReturnCode = true; + +Cleanup: + + if ( info->rx_enabled && info->rx_overflow ) { + /* The receiver needs to restarted because of + * a receive overflow (buffer or FIFO). If the + * receive buffers are now empty, then restart receiver. + */ + + if ( !info->rx_buffer_list[EndIndex].status && + info->rx_buffer_list[EndIndex].count ) { + spin_lock_irqsave(&info->irq_spinlock,flags); + usc_start_receiver(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + } + } + + return ReturnCode; + +} /* end of mgsl_get_rx_frame() */ + +/* mgsl_get_raw_rx_frame() + * + * This function attempts to return a received frame from the + * receive DMA buffers when running in external loop mode. In this mode, + * we will return at most one DMABUFFERSIZE frame to the application. + * The USC receiver is triggering off of DCD going active to start a new + * frame, and DCD going inactive to terminate the frame (similar to + * processing a closing flag character). + * + * In this routine, we will return DMABUFFERSIZE "chunks" at a time. + * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero + * status field and the RCC field will indicate the length of the + * entire received frame. We take this RCC field and get the modulus + * of RCC and DMABUFFERSIZE to determine if number of bytes in the + * last Rx DMA buffer and return that last portion of the frame. + * + * Arguments: info pointer to device extension + * Return Value: true if frame returned, otherwise false + */ +static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info) +{ + unsigned int CurrentIndex, NextIndex; + unsigned short status; + DMABUFFERENTRY *pBufEntry; + unsigned int framesize = 0; + bool ReturnCode = false; + unsigned long flags; + struct tty_struct *tty = info->port.tty; + + /* + * current_rx_buffer points to the 1st buffer of the next available + * receive frame. The status field is set by the 16C32 after + * completing a receive frame. If the status field of this buffer + * is zero, either the USC is still filling this buffer or this + * is one of a series of buffers making up a received frame. + * + * If the count field of this buffer is zero, the USC is either + * using this buffer or has used this buffer. Look at the count + * field of the next buffer. If that next buffer's count is + * non-zero, the USC is still actively using the current buffer. + * Otherwise, if the next buffer's count field is zero, the + * current buffer is complete and the USC is using the next + * buffer. + */ + CurrentIndex = NextIndex = info->current_rx_buffer; + ++NextIndex; + if ( NextIndex == info->rx_buffer_count ) + NextIndex = 0; + + if ( info->rx_buffer_list[CurrentIndex].status != 0 || + (info->rx_buffer_list[CurrentIndex].count == 0 && + info->rx_buffer_list[NextIndex].count == 0)) { + /* + * Either the status field of this dma buffer is non-zero + * (indicating the last buffer of a receive frame) or the next + * buffer is marked as in use -- implying this buffer is complete + * and an intermediate buffer for this received frame. + */ + + status = info->rx_buffer_list[CurrentIndex].status; + + if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN + + RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) { + if ( status & RXSTATUS_SHORT_FRAME ) + info->icount.rxshort++; + else if ( status & RXSTATUS_ABORT ) + info->icount.rxabort++; + else if ( status & RXSTATUS_OVERRUN ) + info->icount.rxover++; + else + info->icount.rxcrc++; + framesize = 0; + } else { + /* + * A receive frame is available, get frame size and status. + * + * The frame size is the starting value of the RCC (which was + * set to 0xffff) minus the ending value of the RCC (decremented + * once for each receive character) minus 2 or 4 for the 16-bit + * or 32-bit CRC. + * + * If the status field is zero, this is an intermediate buffer. + * It's size is 4K. + * + * If the DMA Buffer Entry's Status field is non-zero, the + * receive operation completed normally (ie: DCD dropped). The + * RCC field is valid and holds the received frame size. + * It is possible that the RCC field will be zero on a DMA buffer + * entry with a non-zero status. This can occur if the total + * frame size (number of bytes between the time DCD goes active + * to the time DCD goes inactive) exceeds 65535 bytes. In this + * case the 16C32 has underrun on the RCC count and appears to + * stop updating this counter to let us know the actual received + * frame size. If this happens (non-zero status and zero RCC), + * simply return the entire RxDMA Buffer + */ + if ( status ) { + /* + * In the event that the final RxDMA Buffer is + * terminated with a non-zero status and the RCC + * field is zero, we interpret this as the RCC + * having underflowed (received frame > 65535 bytes). + * + * Signal the event to the user by passing back + * a status of RxStatus_CrcError returning the full + * buffer and let the app figure out what data is + * actually valid + */ + if ( info->rx_buffer_list[CurrentIndex].rcc ) + framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc; + else + framesize = DMABUFFERSIZE; + } + else + framesize = DMABUFFERSIZE; + } + + if ( framesize > DMABUFFERSIZE ) { + /* + * if running in raw sync mode, ISR handler for + * End Of Buffer events terminates all buffers at 4K. + * If this frame size is said to be >4K, get the + * actual number of bytes of the frame in this buffer. + */ + framesize = framesize % DMABUFFERSIZE; + } + + + if ( debug_level >= DEBUG_LEVEL_BH ) + printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n", + __FILE__,__LINE__,info->device_name,status,framesize); + + if ( debug_level >= DEBUG_LEVEL_DATA ) + mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr, + min_t(int, framesize, DMABUFFERSIZE),0); + + if (framesize) { + /* copy dma buffer(s) to contiguous intermediate buffer */ + /* NOTE: we never copy more than DMABUFFERSIZE bytes */ + + pBufEntry = &(info->rx_buffer_list[CurrentIndex]); + memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize); + info->icount.rxok++; + + ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); + } + + /* Free the buffers used by this frame. */ + mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex ); + + ReturnCode = true; + } + + + if ( info->rx_enabled && info->rx_overflow ) { + /* The receiver needs to restarted because of + * a receive overflow (buffer or FIFO). If the + * receive buffers are now empty, then restart receiver. + */ + + if ( !info->rx_buffer_list[CurrentIndex].status && + info->rx_buffer_list[CurrentIndex].count ) { + spin_lock_irqsave(&info->irq_spinlock,flags); + usc_start_receiver(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + } + } + + return ReturnCode; + +} /* end of mgsl_get_raw_rx_frame() */ + +/* mgsl_load_tx_dma_buffer() + * + * Load the transmit DMA buffer with the specified data. + * + * Arguments: + * + * info pointer to device extension + * Buffer pointer to buffer containing frame to load + * BufferSize size in bytes of frame in Buffer + * + * Return Value: None + */ +static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info, + const char *Buffer, unsigned int BufferSize) +{ + unsigned short Copycount; + unsigned int i = 0; + DMABUFFERENTRY *pBufEntry; + + if ( debug_level >= DEBUG_LEVEL_DATA ) + mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1); + + if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { + /* set CMR:13 to start transmit when + * next GoAhead (abort) is received + */ + info->cmr_value |= BIT13; + } + + /* begin loading the frame in the next available tx dma + * buffer, remember it's starting location for setting + * up tx dma operation + */ + i = info->current_tx_buffer; + info->start_tx_dma_buffer = i; + + /* Setup the status and RCC (Frame Size) fields of the 1st */ + /* buffer entry in the transmit DMA buffer list. */ + + info->tx_buffer_list[i].status = info->cmr_value & 0xf000; + info->tx_buffer_list[i].rcc = BufferSize; + info->tx_buffer_list[i].count = BufferSize; + + /* Copy frame data from 1st source buffer to the DMA buffers. */ + /* The frame data may span multiple DMA buffers. */ + + while( BufferSize ){ + /* Get a pointer to next DMA buffer entry. */ + pBufEntry = &info->tx_buffer_list[i++]; + + if ( i == info->tx_buffer_count ) + i=0; + + /* Calculate the number of bytes that can be copied from */ + /* the source buffer to this DMA buffer. */ + if ( BufferSize > DMABUFFERSIZE ) + Copycount = DMABUFFERSIZE; + else + Copycount = BufferSize; + + /* Actually copy data from source buffer to DMA buffer. */ + /* Also set the data count for this individual DMA buffer. */ + if ( info->bus_type == MGSL_BUS_TYPE_PCI ) + mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount); + else + memcpy(pBufEntry->virt_addr, Buffer, Copycount); + + pBufEntry->count = Copycount; + + /* Advance source pointer and reduce remaining data count. */ + Buffer += Copycount; + BufferSize -= Copycount; + + ++info->tx_dma_buffers_used; + } + + /* remember next available tx dma buffer */ + info->current_tx_buffer = i; + +} /* end of mgsl_load_tx_dma_buffer() */ + +/* + * mgsl_register_test() + * + * Performs a register test of the 16C32. + * + * Arguments: info pointer to device instance data + * Return Value: true if test passed, otherwise false + */ +static bool mgsl_register_test( struct mgsl_struct *info ) +{ + static unsigned short BitPatterns[] = + { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f }; + static unsigned int Patterncount = ARRAY_SIZE(BitPatterns); + unsigned int i; + bool rc = true; + unsigned long flags; + + spin_lock_irqsave(&info->irq_spinlock,flags); + usc_reset(info); + + /* Verify the reset state of some registers. */ + + if ( (usc_InReg( info, SICR ) != 0) || + (usc_InReg( info, IVR ) != 0) || + (usc_InDmaReg( info, DIVR ) != 0) ){ + rc = false; + } + + if ( rc ){ + /* Write bit patterns to various registers but do it out of */ + /* sync, then read back and verify values. */ + + for ( i = 0 ; i < Patterncount ; i++ ) { + usc_OutReg( info, TC0R, BitPatterns[i] ); + usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] ); + usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] ); + usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] ); + usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] ); + usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] ); + + if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) || + (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) || + (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) || + (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) || + (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) || + (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){ + rc = false; + break; + } + } + } + + usc_reset(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + return rc; + +} /* end of mgsl_register_test() */ + +/* mgsl_irq_test() Perform interrupt test of the 16C32. + * + * Arguments: info pointer to device instance data + * Return Value: true if test passed, otherwise false + */ +static bool mgsl_irq_test( struct mgsl_struct *info ) +{ + unsigned long EndTime; + unsigned long flags; + + spin_lock_irqsave(&info->irq_spinlock,flags); + usc_reset(info); + + /* + * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition. + * The ISR sets irq_occurred to true. + */ + + info->irq_occurred = false; + + /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */ + /* Enable INTEN (Port 6, Bit12) */ + /* This connects the IRQ request signal to the ISA bus */ + /* on the ISA adapter. This has no effect for the PCI adapter */ + usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) ); + + usc_EnableMasterIrqBit(info); + usc_EnableInterrupts(info, IO_PIN); + usc_ClearIrqPendingBits(info, IO_PIN); + + usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED); + usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE); + + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + EndTime=100; + while( EndTime-- && !info->irq_occurred ) { + msleep_interruptible(10); + } + + spin_lock_irqsave(&info->irq_spinlock,flags); + usc_reset(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + return info->irq_occurred; + +} /* end of mgsl_irq_test() */ + +/* mgsl_dma_test() + * + * Perform a DMA test of the 16C32. A small frame is + * transmitted via DMA from a transmit buffer to a receive buffer + * using single buffer DMA mode. + * + * Arguments: info pointer to device instance data + * Return Value: true if test passed, otherwise false + */ +static bool mgsl_dma_test( struct mgsl_struct *info ) +{ + unsigned short FifoLevel; + unsigned long phys_addr; + unsigned int FrameSize; + unsigned int i; + char *TmpPtr; + bool rc = true; + unsigned short status=0; + unsigned long EndTime; + unsigned long flags; + MGSL_PARAMS tmp_params; + + /* save current port options */ + memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS)); + /* load default port options */ + memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); + +#define TESTFRAMESIZE 40 + + spin_lock_irqsave(&info->irq_spinlock,flags); + + /* setup 16C32 for SDLC DMA transfer mode */ + + usc_reset(info); + usc_set_sdlc_mode(info); + usc_enable_loopback(info,1); + + /* Reprogram the RDMR so that the 16C32 does NOT clear the count + * field of the buffer entry after fetching buffer address. This + * way we can detect a DMA failure for a DMA read (which should be + * non-destructive to system memory) before we try and write to + * memory (where a failure could corrupt system memory). + */ + + /* Receive DMA mode Register (RDMR) + * + * <15..14> 11 DMA mode = Linked List Buffer mode + * <13> 1 RSBinA/L = store Rx status Block in List entry + * <12> 0 1 = Clear count of List Entry after fetching + * <11..10> 00 Address mode = Increment + * <9> 1 Terminate Buffer on RxBound + * <8> 0 Bus Width = 16bits + * <7..0> ? status Bits (write as 0s) + * + * 1110 0010 0000 0000 = 0xe200 + */ + + usc_OutDmaReg( info, RDMR, 0xe200 ); + + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + + /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */ + + FrameSize = TESTFRAMESIZE; + + /* setup 1st transmit buffer entry: */ + /* with frame size and transmit control word */ + + info->tx_buffer_list[0].count = FrameSize; + info->tx_buffer_list[0].rcc = FrameSize; + info->tx_buffer_list[0].status = 0x4000; + + /* build a transmit frame in 1st transmit DMA buffer */ + + TmpPtr = info->tx_buffer_list[0].virt_addr; + for (i = 0; i < FrameSize; i++ ) + *TmpPtr++ = i; + + /* setup 1st receive buffer entry: */ + /* clear status, set max receive buffer size */ + + info->rx_buffer_list[0].status = 0; + info->rx_buffer_list[0].count = FrameSize + 4; + + /* zero out the 1st receive buffer */ + + memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 ); + + /* Set count field of next buffer entries to prevent */ + /* 16C32 from using buffers after the 1st one. */ + + info->tx_buffer_list[1].count = 0; + info->rx_buffer_list[1].count = 0; + + + /***************************/ + /* Program 16C32 receiver. */ + /***************************/ + + spin_lock_irqsave(&info->irq_spinlock,flags); + + /* setup DMA transfers */ + usc_RTCmd( info, RTCmd_PurgeRxFifo ); + + /* program 16C32 receiver with physical address of 1st DMA buffer entry */ + phys_addr = info->rx_buffer_list[0].phys_entry; + usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr ); + usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) ); + + /* Clear the Rx DMA status bits (read RDMR) and start channel */ + usc_InDmaReg( info, RDMR ); + usc_DmaCmd( info, DmaCmd_InitRxChannel ); + + /* Enable Receiver (RMR <1..0> = 10) */ + usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) ); + + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + + /*************************************************************/ + /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */ + /*************************************************************/ + + /* Wait 100ms for interrupt. */ + EndTime = jiffies + msecs_to_jiffies(100); + + for(;;) { + if (time_after(jiffies, EndTime)) { + rc = false; + break; + } + + spin_lock_irqsave(&info->irq_spinlock,flags); + status = usc_InDmaReg( info, RDMR ); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + if ( !(status & BIT4) && (status & BIT5) ) { + /* INITG (BIT 4) is inactive (no entry read in progress) AND */ + /* BUSY (BIT 5) is active (channel still active). */ + /* This means the buffer entry read has completed. */ + break; + } + } + + + /******************************/ + /* Program 16C32 transmitter. */ + /******************************/ + + spin_lock_irqsave(&info->irq_spinlock,flags); + + /* Program the Transmit Character Length Register (TCLR) */ + /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ + + usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count ); + usc_RTCmd( info, RTCmd_PurgeTxFifo ); + + /* Program the address of the 1st DMA Buffer Entry in linked list */ + + phys_addr = info->tx_buffer_list[0].phys_entry; + usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr ); + usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) ); + + /* unlatch Tx status bits, and start transmit channel. */ + + usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) ); + usc_DmaCmd( info, DmaCmd_InitTxChannel ); + + /* wait for DMA controller to fill transmit FIFO */ + + usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); + + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + + /**********************************/ + /* WAIT FOR TRANSMIT FIFO TO FILL */ + /**********************************/ + + /* Wait 100ms */ + EndTime = jiffies + msecs_to_jiffies(100); + + for(;;) { + if (time_after(jiffies, EndTime)) { + rc = false; + break; + } + + spin_lock_irqsave(&info->irq_spinlock,flags); + FifoLevel = usc_InReg(info, TICR) >> 8; + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + if ( FifoLevel < 16 ) + break; + else + if ( FrameSize < 32 ) { + /* This frame is smaller than the entire transmit FIFO */ + /* so wait for the entire frame to be loaded. */ + if ( FifoLevel <= (32 - FrameSize) ) + break; + } + } + + + if ( rc ) + { + /* Enable 16C32 transmitter. */ + + spin_lock_irqsave(&info->irq_spinlock,flags); + + /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */ + usc_TCmd( info, TCmd_SendFrame ); + usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) ); + + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + + /******************************/ + /* WAIT FOR TRANSMIT COMPLETE */ + /******************************/ + + /* Wait 100ms */ + EndTime = jiffies + msecs_to_jiffies(100); + + /* While timer not expired wait for transmit complete */ + + spin_lock_irqsave(&info->irq_spinlock,flags); + status = usc_InReg( info, TCSR ); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) { + if (time_after(jiffies, EndTime)) { + rc = false; + break; + } + + spin_lock_irqsave(&info->irq_spinlock,flags); + status = usc_InReg( info, TCSR ); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + } + } + + + if ( rc ){ + /* CHECK FOR TRANSMIT ERRORS */ + if ( status & (BIT5 + BIT1) ) + rc = false; + } + + if ( rc ) { + /* WAIT FOR RECEIVE COMPLETE */ + + /* Wait 100ms */ + EndTime = jiffies + msecs_to_jiffies(100); + + /* Wait for 16C32 to write receive status to buffer entry. */ + status=info->rx_buffer_list[0].status; + while ( status == 0 ) { + if (time_after(jiffies, EndTime)) { + rc = false; + break; + } + status=info->rx_buffer_list[0].status; + } + } + + + if ( rc ) { + /* CHECK FOR RECEIVE ERRORS */ + status = info->rx_buffer_list[0].status; + + if ( status & (BIT8 + BIT3 + BIT1) ) { + /* receive error has occurred */ + rc = false; + } else { + if ( memcmp( info->tx_buffer_list[0].virt_addr , + info->rx_buffer_list[0].virt_addr, FrameSize ) ){ + rc = false; + } + } + } + + spin_lock_irqsave(&info->irq_spinlock,flags); + usc_reset( info ); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + /* restore current port options */ + memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); + + return rc; + +} /* end of mgsl_dma_test() */ + +/* mgsl_adapter_test() + * + * Perform the register, IRQ, and DMA tests for the 16C32. + * + * Arguments: info pointer to device instance data + * Return Value: 0 if success, otherwise -ENODEV + */ +static int mgsl_adapter_test( struct mgsl_struct *info ) +{ + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):Testing device %s\n", + __FILE__,__LINE__,info->device_name ); + + if ( !mgsl_register_test( info ) ) { + info->init_error = DiagStatus_AddressFailure; + printk( "%s(%d):Register test failure for device %s Addr=%04X\n", + __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) ); + return -ENODEV; + } + + if ( !mgsl_irq_test( info ) ) { + info->init_error = DiagStatus_IrqFailure; + printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n", + __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) ); + return -ENODEV; + } + + if ( !mgsl_dma_test( info ) ) { + info->init_error = DiagStatus_DmaFailure; + printk( "%s(%d):DMA test failure for device %s DMA=%d\n", + __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) ); + return -ENODEV; + } + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):device %s passed diagnostics\n", + __FILE__,__LINE__,info->device_name ); + + return 0; + +} /* end of mgsl_adapter_test() */ + +/* mgsl_memory_test() + * + * Test the shared memory on a PCI adapter. + * + * Arguments: info pointer to device instance data + * Return Value: true if test passed, otherwise false + */ +static bool mgsl_memory_test( struct mgsl_struct *info ) +{ + static unsigned long BitPatterns[] = + { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 }; + unsigned long Patterncount = ARRAY_SIZE(BitPatterns); + unsigned long i; + unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long); + unsigned long * TestAddr; + + if ( info->bus_type != MGSL_BUS_TYPE_PCI ) + return true; + + TestAddr = (unsigned long *)info->memory_base; + + /* Test data lines with test pattern at one location. */ + + for ( i = 0 ; i < Patterncount ; i++ ) { + *TestAddr = BitPatterns[i]; + if ( *TestAddr != BitPatterns[i] ) + return false; + } + + /* Test address lines with incrementing pattern over */ + /* entire address range. */ + + for ( i = 0 ; i < TestLimit ; i++ ) { + *TestAddr = i * 4; + TestAddr++; + } + + TestAddr = (unsigned long *)info->memory_base; + + for ( i = 0 ; i < TestLimit ; i++ ) { + if ( *TestAddr != i * 4 ) + return false; + TestAddr++; + } + + memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE ); + + return true; + +} /* End Of mgsl_memory_test() */ + + +/* mgsl_load_pci_memory() + * + * Load a large block of data into the PCI shared memory. + * Use this instead of memcpy() or memmove() to move data + * into the PCI shared memory. + * + * Notes: + * + * This function prevents the PCI9050 interface chip from hogging + * the adapter local bus, which can starve the 16C32 by preventing + * 16C32 bus master cycles. + * + * The PCI9050 documentation says that the 9050 will always release + * control of the local bus after completing the current read + * or write operation. + * + * It appears that as long as the PCI9050 write FIFO is full, the + * PCI9050 treats all of the writes as a single burst transaction + * and will not release the bus. This causes DMA latency problems + * at high speeds when copying large data blocks to the shared + * memory. + * + * This function in effect, breaks the a large shared memory write + * into multiple transations by interleaving a shared memory read + * which will flush the write FIFO and 'complete' the write + * transation. This allows any pending DMA request to gain control + * of the local bus in a timely fasion. + * + * Arguments: + * + * TargetPtr pointer to target address in PCI shared memory + * SourcePtr pointer to source buffer for data + * count count in bytes of data to copy + * + * Return Value: None + */ +static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr, + unsigned short count ) +{ + /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */ +#define PCI_LOAD_INTERVAL 64 + + unsigned short Intervalcount = count / PCI_LOAD_INTERVAL; + unsigned short Index; + unsigned long Dummy; + + for ( Index = 0 ; Index < Intervalcount ; Index++ ) + { + memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL); + Dummy = *((volatile unsigned long *)TargetPtr); + TargetPtr += PCI_LOAD_INTERVAL; + SourcePtr += PCI_LOAD_INTERVAL; + } + + memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL ); + +} /* End Of mgsl_load_pci_memory() */ + +static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit) +{ + int i; + int linecount; + if (xmit) + printk("%s tx data:\n",info->device_name); + else + printk("%s rx data:\n",info->device_name); + + while(count) { + if (count > 16) + linecount = 16; + else + linecount = count; + + for(i=0;i<linecount;i++) + printk("%02X ",(unsigned char)data[i]); + for(;i<17;i++) + printk(" "); + for(i=0;i<linecount;i++) { + if (data[i]>=040 && data[i]<=0176) + printk("%c",data[i]); + else + printk("."); + } + printk("\n"); + + data += linecount; + count -= linecount; + } +} /* end of mgsl_trace_block() */ + +/* mgsl_tx_timeout() + * + * called when HDLC frame times out + * update stats and do tx completion processing + * + * Arguments: context pointer to device instance data + * Return Value: None + */ +static void mgsl_tx_timeout(unsigned long context) +{ + struct mgsl_struct *info = (struct mgsl_struct*)context; + unsigned long flags; + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):mgsl_tx_timeout(%s)\n", + __FILE__,__LINE__,info->device_name); + if(info->tx_active && + (info->params.mode == MGSL_MODE_HDLC || + info->params.mode == MGSL_MODE_RAW) ) { + info->icount.txtimeout++; + } + spin_lock_irqsave(&info->irq_spinlock,flags); + info->tx_active = false; + info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; + + if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) + usc_loopmode_cancel_transmit( info ); + + spin_unlock_irqrestore(&info->irq_spinlock,flags); + +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) + hdlcdev_tx_done(info); + else +#endif + mgsl_bh_transmit(info); + +} /* end of mgsl_tx_timeout() */ + +/* signal that there are no more frames to send, so that + * line is 'released' by echoing RxD to TxD when current + * transmission is complete (or immediately if no tx in progress). + */ +static int mgsl_loopmode_send_done( struct mgsl_struct * info ) +{ + unsigned long flags; + + spin_lock_irqsave(&info->irq_spinlock,flags); + if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { + if (info->tx_active) + info->loopmode_send_done_requested = true; + else + usc_loopmode_send_done(info); + } + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + return 0; +} + +/* release the line by echoing RxD to TxD + * upon completion of a transmit frame + */ +static void usc_loopmode_send_done( struct mgsl_struct * info ) +{ + info->loopmode_send_done_requested = false; + /* clear CMR:13 to 0 to start echoing RxData to TxData */ + info->cmr_value &= ~BIT13; + usc_OutReg(info, CMR, info->cmr_value); +} + +/* abort a transmit in progress while in HDLC LoopMode + */ +static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ) +{ + /* reset tx dma channel and purge TxFifo */ + usc_RTCmd( info, RTCmd_PurgeTxFifo ); + usc_DmaCmd( info, DmaCmd_ResetTxChannel ); + usc_loopmode_send_done( info ); +} + +/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled + * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort) + * we must clear CMR:13 to begin repeating TxData to RxData + */ +static void usc_loopmode_insert_request( struct mgsl_struct * info ) +{ + info->loopmode_insert_requested = true; + + /* enable RxAbort irq. On next RxAbort, clear CMR:13 to + * begin repeating TxData on RxData (complete insertion) + */ + usc_OutReg( info, RICR, + (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) ); + + /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */ + info->cmr_value |= BIT13; + usc_OutReg(info, CMR, info->cmr_value); +} + +/* return 1 if station is inserted into the loop, otherwise 0 + */ +static int usc_loopmode_active( struct mgsl_struct * info) +{ + return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ; +} + +#if SYNCLINK_GENERIC_HDLC + +/** + * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) + * set encoding and frame check sequence (FCS) options + * + * dev pointer to network device structure + * encoding serial encoding setting + * parity FCS setting + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity) +{ + struct mgsl_struct *info = dev_to_port(dev); + unsigned char new_encoding; + unsigned short new_crctype; + + /* return error if TTY interface open */ + if (info->port.count) + return -EBUSY; + + switch (encoding) + { + case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; + case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; + case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; + case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; + case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; + default: return -EINVAL; + } + + switch (parity) + { + case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; + case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; + case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; + default: return -EINVAL; + } + + info->params.encoding = new_encoding; + info->params.crc_type = new_crctype; + + /* if network interface up, reprogram hardware */ + if (info->netcount) + mgsl_program_hw(info); + + return 0; +} + +/** + * called by generic HDLC layer to send frame + * + * skb socket buffer containing HDLC frame + * dev pointer to network device structure + */ +static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct mgsl_struct *info = dev_to_port(dev); + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name); + + /* stop sending until this frame completes */ + netif_stop_queue(dev); + + /* copy data to device buffers */ + info->xmit_cnt = skb->len; + mgsl_load_tx_dma_buffer(info, skb->data, skb->len); + + /* update network statistics */ + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + /* done with socket buffer, so free it */ + dev_kfree_skb(skb); + + /* save start time for transmit timeout detection */ + dev->trans_start = jiffies; + + /* start hardware transmitter if necessary */ + spin_lock_irqsave(&info->irq_spinlock,flags); + if (!info->tx_active) + usc_start_transmitter(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + return NETDEV_TX_OK; +} + +/** + * called by network layer when interface enabled + * claim resources and initialize hardware + * + * dev pointer to network device structure + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_open(struct net_device *dev) +{ + struct mgsl_struct *info = dev_to_port(dev); + int rc; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name); + + /* generic HDLC layer open processing */ + if ((rc = hdlc_open(dev))) + return rc; + + /* arbitrate between network and tty opens */ + spin_lock_irqsave(&info->netlock, flags); + if (info->port.count != 0 || info->netcount != 0) { + printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); + spin_unlock_irqrestore(&info->netlock, flags); + return -EBUSY; + } + info->netcount=1; + spin_unlock_irqrestore(&info->netlock, flags); + + /* claim resources and init adapter */ + if ((rc = startup(info)) != 0) { + spin_lock_irqsave(&info->netlock, flags); + info->netcount=0; + spin_unlock_irqrestore(&info->netlock, flags); + return rc; + } + + /* assert DTR and RTS, apply hardware settings */ + info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; + mgsl_program_hw(info); + + /* enable network layer transmit */ + dev->trans_start = jiffies; + netif_start_queue(dev); + + /* inform generic HDLC layer of current DCD status */ + spin_lock_irqsave(&info->irq_spinlock, flags); + usc_get_serial_signals(info); + spin_unlock_irqrestore(&info->irq_spinlock, flags); + if (info->serial_signals & SerialSignal_DCD) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + return 0; +} + +/** + * called by network layer when interface is disabled + * shutdown hardware and release resources + * + * dev pointer to network device structure + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_close(struct net_device *dev) +{ + struct mgsl_struct *info = dev_to_port(dev); + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name); + + netif_stop_queue(dev); + + /* shutdown adapter and release resources */ + shutdown(info); + + hdlc_close(dev); + + spin_lock_irqsave(&info->netlock, flags); + info->netcount=0; + spin_unlock_irqrestore(&info->netlock, flags); + + return 0; +} + +/** + * called by network layer to process IOCTL call to network device + * + * dev pointer to network device structure + * ifr pointer to network interface request structure + * cmd IOCTL command code + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings new_line; + sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; + struct mgsl_struct *info = dev_to_port(dev); + unsigned int flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); + + /* return error if TTY interface open */ + if (info->port.count) + return -EBUSY; + + if (cmd != SIOCWANDEV) + return hdlc_ioctl(dev, ifr, cmd); + + switch(ifr->ifr_settings.type) { + case IF_GET_IFACE: /* return current sync_serial_settings */ + + ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; + if (ifr->ifr_settings.size < size) { + ifr->ifr_settings.size = size; /* data size wanted */ + return -ENOBUFS; + } + + flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | + HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | + HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | + HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); + + switch (flags){ + case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; + case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; + case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; + case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; + default: new_line.clock_type = CLOCK_DEFAULT; + } + + new_line.clock_rate = info->params.clock_speed; + new_line.loopback = info->params.loopback ? 1:0; + + if (copy_to_user(line, &new_line, size)) + return -EFAULT; + return 0; + + case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ + + if(!capable(CAP_NET_ADMIN)) + return -EPERM; + if (copy_from_user(&new_line, line, size)) + return -EFAULT; + + switch (new_line.clock_type) + { + case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; + case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; + case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; + case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; + case CLOCK_DEFAULT: flags = info->params.flags & + (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | + HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | + HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | + HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; + default: return -EINVAL; + } + + if (new_line.loopback != 0 && new_line.loopback != 1) + return -EINVAL; + + info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | + HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | + HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | + HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); + info->params.flags |= flags; + + info->params.loopback = new_line.loopback; + + if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) + info->params.clock_speed = new_line.clock_rate; + else + info->params.clock_speed = 0; + + /* if network interface up, reprogram hardware */ + if (info->netcount) + mgsl_program_hw(info); + return 0; + + default: + return hdlc_ioctl(dev, ifr, cmd); + } +} + +/** + * called by network layer when transmit timeout is detected + * + * dev pointer to network device structure + */ +static void hdlcdev_tx_timeout(struct net_device *dev) +{ + struct mgsl_struct *info = dev_to_port(dev); + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("hdlcdev_tx_timeout(%s)\n",dev->name); + + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + + spin_lock_irqsave(&info->irq_spinlock,flags); + usc_stop_transmitter(info); + spin_unlock_irqrestore(&info->irq_spinlock,flags); + + netif_wake_queue(dev); +} + +/** + * called by device driver when transmit completes + * reenable network layer transmit if stopped + * + * info pointer to device instance information + */ +static void hdlcdev_tx_done(struct mgsl_struct *info) +{ + if (netif_queue_stopped(info->netdev)) + netif_wake_queue(info->netdev); +} + +/** + * called by device driver when frame received + * pass frame to network layer + * + * info pointer to device instance information + * buf pointer to buffer contianing frame data + * size count of data bytes in buf + */ +static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size) +{ + struct sk_buff *skb = dev_alloc_skb(size); + struct net_device *dev = info->netdev; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("hdlcdev_rx(%s)\n", dev->name); + + if (skb == NULL) { + printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", + dev->name); + dev->stats.rx_dropped++; + return; + } + + memcpy(skb_put(skb, size), buf, size); + + skb->protocol = hdlc_type_trans(skb, dev); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += size; + + netif_rx(skb); +} + +static const struct net_device_ops hdlcdev_ops = { + .ndo_open = hdlcdev_open, + .ndo_stop = hdlcdev_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = hdlcdev_ioctl, + .ndo_tx_timeout = hdlcdev_tx_timeout, +}; + +/** + * called by device driver when adding device instance + * do generic HDLC initialization + * + * info pointer to device instance information + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_init(struct mgsl_struct *info) +{ + int rc; + struct net_device *dev; + hdlc_device *hdlc; + + /* allocate and initialize network and HDLC layer objects */ + + if (!(dev = alloc_hdlcdev(info))) { + printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__); + return -ENOMEM; + } + + /* for network layer reporting purposes only */ + dev->base_addr = info->io_base; + dev->irq = info->irq_level; + dev->dma = info->dma_level; + + /* network layer callbacks and settings */ + dev->netdev_ops = &hdlcdev_ops; + dev->watchdog_timeo = 10 * HZ; + dev->tx_queue_len = 50; + + /* generic HDLC layer callbacks and settings */ + hdlc = dev_to_hdlc(dev); + hdlc->attach = hdlcdev_attach; + hdlc->xmit = hdlcdev_xmit; + + /* register objects with HDLC layer */ + if ((rc = register_hdlc_device(dev))) { + printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__); + free_netdev(dev); + return rc; + } + + info->netdev = dev; + return 0; +} + +/** + * called by device driver when removing device instance + * do generic HDLC cleanup + * + * info pointer to device instance information + */ +static void hdlcdev_exit(struct mgsl_struct *info) +{ + unregister_hdlc_device(info->netdev); + free_netdev(info->netdev); + info->netdev = NULL; +} + +#endif /* CONFIG_HDLC */ + + +static int __devinit synclink_init_one (struct pci_dev *dev, + const struct pci_device_id *ent) +{ + struct mgsl_struct *info; + + if (pci_enable_device(dev)) { + printk("error enabling pci device %p\n", dev); + return -EIO; + } + + if (!(info = mgsl_allocate_device())) { + printk("can't allocate device instance data.\n"); + return -EIO; + } + + /* Copy user configuration info to device instance data */ + + info->io_base = pci_resource_start(dev, 2); + info->irq_level = dev->irq; + info->phys_memory_base = pci_resource_start(dev, 3); + + /* Because veremap only works on page boundaries we must map + * a larger area than is actually implemented for the LCR + * memory range. We map a full page starting at the page boundary. + */ + info->phys_lcr_base = pci_resource_start(dev, 0); + info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1); + info->phys_lcr_base &= ~(PAGE_SIZE-1); + + info->bus_type = MGSL_BUS_TYPE_PCI; + info->io_addr_size = 8; + info->irq_flags = IRQF_SHARED; + + if (dev->device == 0x0210) { + /* Version 1 PCI9030 based universal PCI adapter */ + info->misc_ctrl_value = 0x007c4080; + info->hw_version = 1; + } else { + /* Version 0 PCI9050 based 5V PCI adapter + * A PCI9050 bug prevents reading LCR registers if + * LCR base address bit 7 is set. Maintain shadow + * value so we can write to LCR misc control reg. + */ + info->misc_ctrl_value = 0x087e4546; + info->hw_version = 0; + } + + mgsl_add_device(info); + + return 0; +} + +static void __devexit synclink_remove_one (struct pci_dev *dev) +{ +} + diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c new file mode 100644 index 00000000000..a35dd549a00 --- /dev/null +++ b/drivers/tty/synclink_gt.c @@ -0,0 +1,5161 @@ +/* + * Device driver for Microgate SyncLink GT serial adapters. + * + * written by Paul Fulghum for Microgate Corporation + * paulkf@microgate.com + * + * Microgate and SyncLink are trademarks of Microgate Corporation + * + * This code is released under the GNU General Public License (GPL) + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * DEBUG OUTPUT DEFINITIONS + * + * uncomment lines below to enable specific types of debug output + * + * DBGINFO information - most verbose output + * DBGERR serious errors + * DBGBH bottom half service routine debugging + * DBGISR interrupt service routine debugging + * DBGDATA output receive and transmit data + * DBGTBUF output transmit DMA buffers and registers + * DBGRBUF output receive DMA buffers and registers + */ + +#define DBGINFO(fmt) if (debug_level >= DEBUG_LEVEL_INFO) printk fmt +#define DBGERR(fmt) if (debug_level >= DEBUG_LEVEL_ERROR) printk fmt +#define DBGBH(fmt) if (debug_level >= DEBUG_LEVEL_BH) printk fmt +#define DBGISR(fmt) if (debug_level >= DEBUG_LEVEL_ISR) printk fmt +#define DBGDATA(info, buf, size, label) if (debug_level >= DEBUG_LEVEL_DATA) trace_block((info), (buf), (size), (label)) +/*#define DBGTBUF(info) dump_tbufs(info)*/ +/*#define DBGRBUF(info) dump_rbufs(info)*/ + + +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/timer.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/serial.h> +#include <linux/major.h> +#include <linux/string.h> +#include <linux/fcntl.h> +#include <linux/ptrace.h> +#include <linux/ioport.h> +#include <linux/mm.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/ioctl.h> +#include <linux/termios.h> +#include <linux/bitops.h> +#include <linux/workqueue.h> +#include <linux/hdlc.h> +#include <linux/synclink.h> + +#include <asm/system.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/dma.h> +#include <asm/types.h> +#include <asm/uaccess.h> + +#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE)) +#define SYNCLINK_GENERIC_HDLC 1 +#else +#define SYNCLINK_GENERIC_HDLC 0 +#endif + +/* + * module identification + */ +static char *driver_name = "SyncLink GT"; +static char *tty_driver_name = "synclink_gt"; +static char *tty_dev_prefix = "ttySLG"; +MODULE_LICENSE("GPL"); +#define MGSL_MAGIC 0x5401 +#define MAX_DEVICES 32 + +static struct pci_device_id pci_table[] = { + {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT2_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT4_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PCI_VENDOR_ID_MICROGATE, SYNCLINK_AC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {0,}, /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, pci_table); + +static int init_one(struct pci_dev *dev,const struct pci_device_id *ent); +static void remove_one(struct pci_dev *dev); +static struct pci_driver pci_driver = { + .name = "synclink_gt", + .id_table = pci_table, + .probe = init_one, + .remove = __devexit_p(remove_one), +}; + +static bool pci_registered; + +/* + * module configuration and status + */ +static struct slgt_info *slgt_device_list; +static int slgt_device_count; + +static int ttymajor; +static int debug_level; +static int maxframe[MAX_DEVICES]; + +module_param(ttymajor, int, 0); +module_param(debug_level, int, 0); +module_param_array(maxframe, int, NULL, 0); + +MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned"); +MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail"); +MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)"); + +/* + * tty support and callbacks + */ +static struct tty_driver *serial_driver; + +static int open(struct tty_struct *tty, struct file * filp); +static void close(struct tty_struct *tty, struct file * filp); +static void hangup(struct tty_struct *tty); +static void set_termios(struct tty_struct *tty, struct ktermios *old_termios); + +static int write(struct tty_struct *tty, const unsigned char *buf, int count); +static int put_char(struct tty_struct *tty, unsigned char ch); +static void send_xchar(struct tty_struct *tty, char ch); +static void wait_until_sent(struct tty_struct *tty, int timeout); +static int write_room(struct tty_struct *tty); +static void flush_chars(struct tty_struct *tty); +static void flush_buffer(struct tty_struct *tty); +static void tx_hold(struct tty_struct *tty); +static void tx_release(struct tty_struct *tty); + +static int ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); +static int chars_in_buffer(struct tty_struct *tty); +static void throttle(struct tty_struct * tty); +static void unthrottle(struct tty_struct * tty); +static int set_break(struct tty_struct *tty, int break_state); + +/* + * generic HDLC support and callbacks + */ +#if SYNCLINK_GENERIC_HDLC +#define dev_to_port(D) (dev_to_hdlc(D)->priv) +static void hdlcdev_tx_done(struct slgt_info *info); +static void hdlcdev_rx(struct slgt_info *info, char *buf, int size); +static int hdlcdev_init(struct slgt_info *info); +static void hdlcdev_exit(struct slgt_info *info); +#endif + + +/* + * device specific structures, macros and functions + */ + +#define SLGT_MAX_PORTS 4 +#define SLGT_REG_SIZE 256 + +/* + * conditional wait facility + */ +struct cond_wait { + struct cond_wait *next; + wait_queue_head_t q; + wait_queue_t wait; + unsigned int data; +}; +static void init_cond_wait(struct cond_wait *w, unsigned int data); +static void add_cond_wait(struct cond_wait **head, struct cond_wait *w); +static void remove_cond_wait(struct cond_wait **head, struct cond_wait *w); +static void flush_cond_wait(struct cond_wait **head); + +/* + * DMA buffer descriptor and access macros + */ +struct slgt_desc +{ + __le16 count; + __le16 status; + __le32 pbuf; /* physical address of data buffer */ + __le32 next; /* physical address of next descriptor */ + + /* driver book keeping */ + char *buf; /* virtual address of data buffer */ + unsigned int pdesc; /* physical address of this descriptor */ + dma_addr_t buf_dma_addr; + unsigned short buf_count; +}; + +#define set_desc_buffer(a,b) (a).pbuf = cpu_to_le32((unsigned int)(b)) +#define set_desc_next(a,b) (a).next = cpu_to_le32((unsigned int)(b)) +#define set_desc_count(a,b)(a).count = cpu_to_le16((unsigned short)(b)) +#define set_desc_eof(a,b) (a).status = cpu_to_le16((b) ? (le16_to_cpu((a).status) | BIT0) : (le16_to_cpu((a).status) & ~BIT0)) +#define set_desc_status(a, b) (a).status = cpu_to_le16((unsigned short)(b)) +#define desc_count(a) (le16_to_cpu((a).count)) +#define desc_status(a) (le16_to_cpu((a).status)) +#define desc_complete(a) (le16_to_cpu((a).status) & BIT15) +#define desc_eof(a) (le16_to_cpu((a).status) & BIT2) +#define desc_crc_error(a) (le16_to_cpu((a).status) & BIT1) +#define desc_abort(a) (le16_to_cpu((a).status) & BIT0) +#define desc_residue(a) ((le16_to_cpu((a).status) & 0x38) >> 3) + +struct _input_signal_events { + int ri_up; + int ri_down; + int dsr_up; + int dsr_down; + int dcd_up; + int dcd_down; + int cts_up; + int cts_down; +}; + +/* + * device instance data structure + */ +struct slgt_info { + void *if_ptr; /* General purpose pointer (used by SPPP) */ + struct tty_port port; + + struct slgt_info *next_device; /* device list link */ + + int magic; + + char device_name[25]; + struct pci_dev *pdev; + + int port_count; /* count of ports on adapter */ + int adapter_num; /* adapter instance number */ + int port_num; /* port instance number */ + + /* array of pointers to port contexts on this adapter */ + struct slgt_info *port_array[SLGT_MAX_PORTS]; + + int line; /* tty line instance number */ + + struct mgsl_icount icount; + + int timeout; + int x_char; /* xon/xoff character */ + unsigned int read_status_mask; + unsigned int ignore_status_mask; + + wait_queue_head_t status_event_wait_q; + wait_queue_head_t event_wait_q; + struct timer_list tx_timer; + struct timer_list rx_timer; + + unsigned int gpio_present; + struct cond_wait *gpio_wait_q; + + spinlock_t lock; /* spinlock for synchronizing with ISR */ + + struct work_struct task; + u32 pending_bh; + bool bh_requested; + bool bh_running; + + int isr_overflow; + bool irq_requested; /* true if IRQ requested */ + bool irq_occurred; /* for diagnostics use */ + + /* device configuration */ + + unsigned int bus_type; + unsigned int irq_level; + unsigned long irq_flags; + + unsigned char __iomem * reg_addr; /* memory mapped registers address */ + u32 phys_reg_addr; + bool reg_addr_requested; + + MGSL_PARAMS params; /* communications parameters */ + u32 idle_mode; + u32 max_frame_size; /* as set by device config */ + + unsigned int rbuf_fill_level; + unsigned int rx_pio; + unsigned int if_mode; + unsigned int base_clock; + unsigned int xsync; + unsigned int xctrl; + + /* device status */ + + bool rx_enabled; + bool rx_restart; + + bool tx_enabled; + bool tx_active; + + unsigned char signals; /* serial signal states */ + int init_error; /* initialization error */ + + unsigned char *tx_buf; + int tx_count; + + char flag_buf[MAX_ASYNC_BUFFER_SIZE]; + char char_buf[MAX_ASYNC_BUFFER_SIZE]; + bool drop_rts_on_tx_done; + struct _input_signal_events input_signal_events; + + int dcd_chkcount; /* check counts to prevent */ + int cts_chkcount; /* too many IRQs if a signal */ + int dsr_chkcount; /* is floating */ + int ri_chkcount; + + char *bufs; /* virtual address of DMA buffer lists */ + dma_addr_t bufs_dma_addr; /* physical address of buffer descriptors */ + + unsigned int rbuf_count; + struct slgt_desc *rbufs; + unsigned int rbuf_current; + unsigned int rbuf_index; + unsigned int rbuf_fill_index; + unsigned short rbuf_fill_count; + + unsigned int tbuf_count; + struct slgt_desc *tbufs; + unsigned int tbuf_current; + unsigned int tbuf_start; + + unsigned char *tmp_rbuf; + unsigned int tmp_rbuf_count; + + /* SPPP/Cisco HDLC device parts */ + + int netcount; + spinlock_t netlock; +#if SYNCLINK_GENERIC_HDLC + struct net_device *netdev; +#endif + +}; + +static MGSL_PARAMS default_params = { + .mode = MGSL_MODE_HDLC, + .loopback = 0, + .flags = HDLC_FLAG_UNDERRUN_ABORT15, + .encoding = HDLC_ENCODING_NRZI_SPACE, + .clock_speed = 0, + .addr_filter = 0xff, + .crc_type = HDLC_CRC_16_CCITT, + .preamble_length = HDLC_PREAMBLE_LENGTH_8BITS, + .preamble = HDLC_PREAMBLE_PATTERN_NONE, + .data_rate = 9600, + .data_bits = 8, + .stop_bits = 1, + .parity = ASYNC_PARITY_NONE +}; + + +#define BH_RECEIVE 1 +#define BH_TRANSMIT 2 +#define BH_STATUS 4 +#define IO_PIN_SHUTDOWN_LIMIT 100 + +#define DMABUFSIZE 256 +#define DESC_LIST_SIZE 4096 + +#define MASK_PARITY BIT1 +#define MASK_FRAMING BIT0 +#define MASK_BREAK BIT14 +#define MASK_OVERRUN BIT4 + +#define GSR 0x00 /* global status */ +#define JCR 0x04 /* JTAG control */ +#define IODR 0x08 /* GPIO direction */ +#define IOER 0x0c /* GPIO interrupt enable */ +#define IOVR 0x10 /* GPIO value */ +#define IOSR 0x14 /* GPIO interrupt status */ +#define TDR 0x80 /* tx data */ +#define RDR 0x80 /* rx data */ +#define TCR 0x82 /* tx control */ +#define TIR 0x84 /* tx idle */ +#define TPR 0x85 /* tx preamble */ +#define RCR 0x86 /* rx control */ +#define VCR 0x88 /* V.24 control */ +#define CCR 0x89 /* clock control */ +#define BDR 0x8a /* baud divisor */ +#define SCR 0x8c /* serial control */ +#define SSR 0x8e /* serial status */ +#define RDCSR 0x90 /* rx DMA control/status */ +#define TDCSR 0x94 /* tx DMA control/status */ +#define RDDAR 0x98 /* rx DMA descriptor address */ +#define TDDAR 0x9c /* tx DMA descriptor address */ +#define XSR 0x40 /* extended sync pattern */ +#define XCR 0x44 /* extended control */ + +#define RXIDLE BIT14 +#define RXBREAK BIT14 +#define IRQ_TXDATA BIT13 +#define IRQ_TXIDLE BIT12 +#define IRQ_TXUNDER BIT11 /* HDLC */ +#define IRQ_RXDATA BIT10 +#define IRQ_RXIDLE BIT9 /* HDLC */ +#define IRQ_RXBREAK BIT9 /* async */ +#define IRQ_RXOVER BIT8 +#define IRQ_DSR BIT7 +#define IRQ_CTS BIT6 +#define IRQ_DCD BIT5 +#define IRQ_RI BIT4 +#define IRQ_ALL 0x3ff0 +#define IRQ_MASTER BIT0 + +#define slgt_irq_on(info, mask) \ + wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) | (mask))) +#define slgt_irq_off(info, mask) \ + wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) & ~(mask))) + +static __u8 rd_reg8(struct slgt_info *info, unsigned int addr); +static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value); +static __u16 rd_reg16(struct slgt_info *info, unsigned int addr); +static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value); +static __u32 rd_reg32(struct slgt_info *info, unsigned int addr); +static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value); + +static void msc_set_vcr(struct slgt_info *info); + +static int startup(struct slgt_info *info); +static int block_til_ready(struct tty_struct *tty, struct file * filp,struct slgt_info *info); +static void shutdown(struct slgt_info *info); +static void program_hw(struct slgt_info *info); +static void change_params(struct slgt_info *info); + +static int register_test(struct slgt_info *info); +static int irq_test(struct slgt_info *info); +static int loopback_test(struct slgt_info *info); +static int adapter_test(struct slgt_info *info); + +static void reset_adapter(struct slgt_info *info); +static void reset_port(struct slgt_info *info); +static void async_mode(struct slgt_info *info); +static void sync_mode(struct slgt_info *info); + +static void rx_stop(struct slgt_info *info); +static void rx_start(struct slgt_info *info); +static void reset_rbufs(struct slgt_info *info); +static void free_rbufs(struct slgt_info *info, unsigned int first, unsigned int last); +static void rdma_reset(struct slgt_info *info); +static bool rx_get_frame(struct slgt_info *info); +static bool rx_get_buf(struct slgt_info *info); + +static void tx_start(struct slgt_info *info); +static void tx_stop(struct slgt_info *info); +static void tx_set_idle(struct slgt_info *info); +static unsigned int free_tbuf_count(struct slgt_info *info); +static unsigned int tbuf_bytes(struct slgt_info *info); +static void reset_tbufs(struct slgt_info *info); +static void tdma_reset(struct slgt_info *info); +static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count); + +static void get_signals(struct slgt_info *info); +static void set_signals(struct slgt_info *info); +static void enable_loopback(struct slgt_info *info); +static void set_rate(struct slgt_info *info, u32 data_rate); + +static int bh_action(struct slgt_info *info); +static void bh_handler(struct work_struct *work); +static void bh_transmit(struct slgt_info *info); +static void isr_serial(struct slgt_info *info); +static void isr_rdma(struct slgt_info *info); +static void isr_txeom(struct slgt_info *info, unsigned short status); +static void isr_tdma(struct slgt_info *info); + +static int alloc_dma_bufs(struct slgt_info *info); +static void free_dma_bufs(struct slgt_info *info); +static int alloc_desc(struct slgt_info *info); +static void free_desc(struct slgt_info *info); +static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count); +static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count); + +static int alloc_tmp_rbuf(struct slgt_info *info); +static void free_tmp_rbuf(struct slgt_info *info); + +static void tx_timeout(unsigned long context); +static void rx_timeout(unsigned long context); + +/* + * ioctl handlers + */ +static int get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount); +static int get_params(struct slgt_info *info, MGSL_PARAMS __user *params); +static int set_params(struct slgt_info *info, MGSL_PARAMS __user *params); +static int get_txidle(struct slgt_info *info, int __user *idle_mode); +static int set_txidle(struct slgt_info *info, int idle_mode); +static int tx_enable(struct slgt_info *info, int enable); +static int tx_abort(struct slgt_info *info); +static int rx_enable(struct slgt_info *info, int enable); +static int modem_input_wait(struct slgt_info *info,int arg); +static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr); +static int tiocmget(struct tty_struct *tty); +static int tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear); +static int set_break(struct tty_struct *tty, int break_state); +static int get_interface(struct slgt_info *info, int __user *if_mode); +static int set_interface(struct slgt_info *info, int if_mode); +static int set_gpio(struct slgt_info *info, struct gpio_desc __user *gpio); +static int get_gpio(struct slgt_info *info, struct gpio_desc __user *gpio); +static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *gpio); +static int get_xsync(struct slgt_info *info, int __user *if_mode); +static int set_xsync(struct slgt_info *info, int if_mode); +static int get_xctrl(struct slgt_info *info, int __user *if_mode); +static int set_xctrl(struct slgt_info *info, int if_mode); + +/* + * driver functions + */ +static void add_device(struct slgt_info *info); +static void device_init(int adapter_num, struct pci_dev *pdev); +static int claim_resources(struct slgt_info *info); +static void release_resources(struct slgt_info *info); + +/* + * DEBUG OUTPUT CODE + */ +#ifndef DBGINFO +#define DBGINFO(fmt) +#endif +#ifndef DBGERR +#define DBGERR(fmt) +#endif +#ifndef DBGBH +#define DBGBH(fmt) +#endif +#ifndef DBGISR +#define DBGISR(fmt) +#endif + +#ifdef DBGDATA +static void trace_block(struct slgt_info *info, const char *data, int count, const char *label) +{ + int i; + int linecount; + printk("%s %s data:\n",info->device_name, label); + while(count) { + linecount = (count > 16) ? 16 : count; + for(i=0; i < linecount; i++) + printk("%02X ",(unsigned char)data[i]); + for(;i<17;i++) + printk(" "); + for(i=0;i<linecount;i++) { + if (data[i]>=040 && data[i]<=0176) + printk("%c",data[i]); + else + printk("."); + } + printk("\n"); + data += linecount; + count -= linecount; + } +} +#else +#define DBGDATA(info, buf, size, label) +#endif + +#ifdef DBGTBUF +static void dump_tbufs(struct slgt_info *info) +{ + int i; + printk("tbuf_current=%d\n", info->tbuf_current); + for (i=0 ; i < info->tbuf_count ; i++) { + printk("%d: count=%04X status=%04X\n", + i, le16_to_cpu(info->tbufs[i].count), le16_to_cpu(info->tbufs[i].status)); + } +} +#else +#define DBGTBUF(info) +#endif + +#ifdef DBGRBUF +static void dump_rbufs(struct slgt_info *info) +{ + int i; + printk("rbuf_current=%d\n", info->rbuf_current); + for (i=0 ; i < info->rbuf_count ; i++) { + printk("%d: count=%04X status=%04X\n", + i, le16_to_cpu(info->rbufs[i].count), le16_to_cpu(info->rbufs[i].status)); + } +} +#else +#define DBGRBUF(info) +#endif + +static inline int sanity_check(struct slgt_info *info, char *devname, const char *name) +{ +#ifdef SANITY_CHECK + if (!info) { + printk("null struct slgt_info for (%s) in %s\n", devname, name); + return 1; + } + if (info->magic != MGSL_MAGIC) { + printk("bad magic number struct slgt_info (%s) in %s\n", devname, name); + return 1; + } +#else + if (!info) + return 1; +#endif + return 0; +} + +/** + * line discipline callback wrappers + * + * The wrappers maintain line discipline references + * while calling into the line discipline. + * + * ldisc_receive_buf - pass receive data to line discipline + */ +static void ldisc_receive_buf(struct tty_struct *tty, + const __u8 *data, char *flags, int count) +{ + struct tty_ldisc *ld; + if (!tty) + return; + ld = tty_ldisc_ref(tty); + if (ld) { + if (ld->ops->receive_buf) + ld->ops->receive_buf(tty, data, flags, count); + tty_ldisc_deref(ld); + } +} + +/* tty callbacks */ + +static int open(struct tty_struct *tty, struct file *filp) +{ + struct slgt_info *info; + int retval, line; + unsigned long flags; + + line = tty->index; + if ((line < 0) || (line >= slgt_device_count)) { + DBGERR(("%s: open with invalid line #%d.\n", driver_name, line)); + return -ENODEV; + } + + info = slgt_device_list; + while(info && info->line != line) + info = info->next_device; + if (sanity_check(info, tty->name, "open")) + return -ENODEV; + if (info->init_error) { + DBGERR(("%s init error=%d\n", info->device_name, info->init_error)); + return -ENODEV; + } + + tty->driver_data = info; + info->port.tty = tty; + + DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count)); + + /* If port is closing, signal caller to try again */ + if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){ + if (info->port.flags & ASYNC_CLOSING) + interruptible_sleep_on(&info->port.close_wait); + retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ? + -EAGAIN : -ERESTARTSYS); + goto cleanup; + } + + mutex_lock(&info->port.mutex); + info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; + + spin_lock_irqsave(&info->netlock, flags); + if (info->netcount) { + retval = -EBUSY; + spin_unlock_irqrestore(&info->netlock, flags); + mutex_unlock(&info->port.mutex); + goto cleanup; + } + info->port.count++; + spin_unlock_irqrestore(&info->netlock, flags); + + if (info->port.count == 1) { + /* 1st open on this device, init hardware */ + retval = startup(info); + if (retval < 0) { + mutex_unlock(&info->port.mutex); + goto cleanup; + } + } + mutex_unlock(&info->port.mutex); + retval = block_til_ready(tty, filp, info); + if (retval) { + DBGINFO(("%s block_til_ready rc=%d\n", info->device_name, retval)); + goto cleanup; + } + + retval = 0; + +cleanup: + if (retval) { + if (tty->count == 1) + info->port.tty = NULL; /* tty layer will release tty struct */ + if(info->port.count) + info->port.count--; + } + + DBGINFO(("%s open rc=%d\n", info->device_name, retval)); + return retval; +} + +static void close(struct tty_struct *tty, struct file *filp) +{ + struct slgt_info *info = tty->driver_data; + + if (sanity_check(info, tty->name, "close")) + return; + DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count)); + + if (tty_port_close_start(&info->port, tty, filp) == 0) + goto cleanup; + + mutex_lock(&info->port.mutex); + if (info->port.flags & ASYNC_INITIALIZED) + wait_until_sent(tty, info->timeout); + flush_buffer(tty); + tty_ldisc_flush(tty); + + shutdown(info); + mutex_unlock(&info->port.mutex); + + tty_port_close_end(&info->port, tty); + info->port.tty = NULL; +cleanup: + DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count)); +} + +static void hangup(struct tty_struct *tty) +{ + struct slgt_info *info = tty->driver_data; + unsigned long flags; + + if (sanity_check(info, tty->name, "hangup")) + return; + DBGINFO(("%s hangup\n", info->device_name)); + + flush_buffer(tty); + + mutex_lock(&info->port.mutex); + shutdown(info); + + spin_lock_irqsave(&info->port.lock, flags); + info->port.count = 0; + info->port.flags &= ~ASYNC_NORMAL_ACTIVE; + info->port.tty = NULL; + spin_unlock_irqrestore(&info->port.lock, flags); + mutex_unlock(&info->port.mutex); + + wake_up_interruptible(&info->port.open_wait); +} + +static void set_termios(struct tty_struct *tty, struct ktermios *old_termios) +{ + struct slgt_info *info = tty->driver_data; + unsigned long flags; + + DBGINFO(("%s set_termios\n", tty->driver->name)); + + change_params(info); + + /* Handle transition to B0 status */ + if (old_termios->c_cflag & CBAUD && + !(tty->termios->c_cflag & CBAUD)) { + info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR); + spin_lock_irqsave(&info->lock,flags); + set_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + } + + /* Handle transition away from B0 status */ + if (!(old_termios->c_cflag & CBAUD) && + tty->termios->c_cflag & CBAUD) { + info->signals |= SerialSignal_DTR; + if (!(tty->termios->c_cflag & CRTSCTS) || + !test_bit(TTY_THROTTLED, &tty->flags)) { + info->signals |= SerialSignal_RTS; + } + spin_lock_irqsave(&info->lock,flags); + set_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + } + + /* Handle turning off CRTSCTS */ + if (old_termios->c_cflag & CRTSCTS && + !(tty->termios->c_cflag & CRTSCTS)) { + tty->hw_stopped = 0; + tx_release(tty); + } +} + +static void update_tx_timer(struct slgt_info *info) +{ + /* + * use worst case speed of 1200bps to calculate transmit timeout + * based on data in buffers (tbuf_bytes) and FIFO (128 bytes) + */ + if (info->params.mode == MGSL_MODE_HDLC) { + int timeout = (tbuf_bytes(info) * 7) + 1000; + mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(timeout)); + } +} + +static int write(struct tty_struct *tty, + const unsigned char *buf, int count) +{ + int ret = 0; + struct slgt_info *info = tty->driver_data; + unsigned long flags; + + if (sanity_check(info, tty->name, "write")) + return -EIO; + + DBGINFO(("%s write count=%d\n", info->device_name, count)); + + if (!info->tx_buf || (count > info->max_frame_size)) + return -EIO; + + if (!count || tty->stopped || tty->hw_stopped) + return 0; + + spin_lock_irqsave(&info->lock, flags); + + if (info->tx_count) { + /* send accumulated data from send_char() */ + if (!tx_load(info, info->tx_buf, info->tx_count)) + goto cleanup; + info->tx_count = 0; + } + + if (tx_load(info, buf, count)) + ret = count; + +cleanup: + spin_unlock_irqrestore(&info->lock, flags); + DBGINFO(("%s write rc=%d\n", info->device_name, ret)); + return ret; +} + +static int put_char(struct tty_struct *tty, unsigned char ch) +{ + struct slgt_info *info = tty->driver_data; + unsigned long flags; + int ret = 0; + + if (sanity_check(info, tty->name, "put_char")) + return 0; + DBGINFO(("%s put_char(%d)\n", info->device_name, ch)); + if (!info->tx_buf) + return 0; + spin_lock_irqsave(&info->lock,flags); + if (info->tx_count < info->max_frame_size) { + info->tx_buf[info->tx_count++] = ch; + ret = 1; + } + spin_unlock_irqrestore(&info->lock,flags); + return ret; +} + +static void send_xchar(struct tty_struct *tty, char ch) +{ + struct slgt_info *info = tty->driver_data; + unsigned long flags; + + if (sanity_check(info, tty->name, "send_xchar")) + return; + DBGINFO(("%s send_xchar(%d)\n", info->device_name, ch)); + info->x_char = ch; + if (ch) { + spin_lock_irqsave(&info->lock,flags); + if (!info->tx_enabled) + tx_start(info); + spin_unlock_irqrestore(&info->lock,flags); + } +} + +static void wait_until_sent(struct tty_struct *tty, int timeout) +{ + struct slgt_info *info = tty->driver_data; + unsigned long orig_jiffies, char_time; + + if (!info ) + return; + if (sanity_check(info, tty->name, "wait_until_sent")) + return; + DBGINFO(("%s wait_until_sent entry\n", info->device_name)); + if (!(info->port.flags & ASYNC_INITIALIZED)) + goto exit; + + orig_jiffies = jiffies; + + /* Set check interval to 1/5 of estimated time to + * send a character, and make it at least 1. The check + * interval should also be less than the timeout. + * Note: use tight timings here to satisfy the NIST-PCTS. + */ + + if (info->params.data_rate) { + char_time = info->timeout/(32 * 5); + if (!char_time) + char_time++; + } else + char_time = 1; + + if (timeout) + char_time = min_t(unsigned long, char_time, timeout); + + while (info->tx_active) { + msleep_interruptible(jiffies_to_msecs(char_time)); + if (signal_pending(current)) + break; + if (timeout && time_after(jiffies, orig_jiffies + timeout)) + break; + } +exit: + DBGINFO(("%s wait_until_sent exit\n", info->device_name)); +} + +static int write_room(struct tty_struct *tty) +{ + struct slgt_info *info = tty->driver_data; + int ret; + + if (sanity_check(info, tty->name, "write_room")) + return 0; + ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE; + DBGINFO(("%s write_room=%d\n", info->device_name, ret)); + return ret; +} + +static void flush_chars(struct tty_struct *tty) +{ + struct slgt_info *info = tty->driver_data; + unsigned long flags; + + if (sanity_check(info, tty->name, "flush_chars")) + return; + DBGINFO(("%s flush_chars entry tx_count=%d\n", info->device_name, info->tx_count)); + + if (info->tx_count <= 0 || tty->stopped || + tty->hw_stopped || !info->tx_buf) + return; + + DBGINFO(("%s flush_chars start transmit\n", info->device_name)); + + spin_lock_irqsave(&info->lock,flags); + if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count)) + info->tx_count = 0; + spin_unlock_irqrestore(&info->lock,flags); +} + +static void flush_buffer(struct tty_struct *tty) +{ + struct slgt_info *info = tty->driver_data; + unsigned long flags; + + if (sanity_check(info, tty->name, "flush_buffer")) + return; + DBGINFO(("%s flush_buffer\n", info->device_name)); + + spin_lock_irqsave(&info->lock, flags); + info->tx_count = 0; + spin_unlock_irqrestore(&info->lock, flags); + + tty_wakeup(tty); +} + +/* + * throttle (stop) transmitter + */ +static void tx_hold(struct tty_struct *tty) +{ + struct slgt_info *info = tty->driver_data; + unsigned long flags; + + if (sanity_check(info, tty->name, "tx_hold")) + return; + DBGINFO(("%s tx_hold\n", info->device_name)); + spin_lock_irqsave(&info->lock,flags); + if (info->tx_enabled && info->params.mode == MGSL_MODE_ASYNC) + tx_stop(info); + spin_unlock_irqrestore(&info->lock,flags); +} + +/* + * release (start) transmitter + */ +static void tx_release(struct tty_struct *tty) +{ + struct slgt_info *info = tty->driver_data; + unsigned long flags; + + if (sanity_check(info, tty->name, "tx_release")) + return; + DBGINFO(("%s tx_release\n", info->device_name)); + spin_lock_irqsave(&info->lock, flags); + if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count)) + info->tx_count = 0; + spin_unlock_irqrestore(&info->lock, flags); +} + +/* + * Service an IOCTL request + * + * Arguments + * + * tty pointer to tty instance data + * cmd IOCTL command code + * arg command argument/context + * + * Return 0 if success, otherwise error code + */ +static int ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + struct slgt_info *info = tty->driver_data; + void __user *argp = (void __user *)arg; + int ret; + + if (sanity_check(info, tty->name, "ioctl")) + return -ENODEV; + DBGINFO(("%s ioctl() cmd=%08X\n", info->device_name, cmd)); + + if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && + (cmd != TIOCMIWAIT)) { + if (tty->flags & (1 << TTY_IO_ERROR)) + return -EIO; + } + + switch (cmd) { + case MGSL_IOCWAITEVENT: + return wait_mgsl_event(info, argp); + case TIOCMIWAIT: + return modem_input_wait(info,(int)arg); + case MGSL_IOCSGPIO: + return set_gpio(info, argp); + case MGSL_IOCGGPIO: + return get_gpio(info, argp); + case MGSL_IOCWAITGPIO: + return wait_gpio(info, argp); + case MGSL_IOCGXSYNC: + return get_xsync(info, argp); + case MGSL_IOCSXSYNC: + return set_xsync(info, (int)arg); + case MGSL_IOCGXCTRL: + return get_xctrl(info, argp); + case MGSL_IOCSXCTRL: + return set_xctrl(info, (int)arg); + } + mutex_lock(&info->port.mutex); + switch (cmd) { + case MGSL_IOCGPARAMS: + ret = get_params(info, argp); + break; + case MGSL_IOCSPARAMS: + ret = set_params(info, argp); + break; + case MGSL_IOCGTXIDLE: + ret = get_txidle(info, argp); + break; + case MGSL_IOCSTXIDLE: + ret = set_txidle(info, (int)arg); + break; + case MGSL_IOCTXENABLE: + ret = tx_enable(info, (int)arg); + break; + case MGSL_IOCRXENABLE: + ret = rx_enable(info, (int)arg); + break; + case MGSL_IOCTXABORT: + ret = tx_abort(info); + break; + case MGSL_IOCGSTATS: + ret = get_stats(info, argp); + break; + case MGSL_IOCGIF: + ret = get_interface(info, argp); + break; + case MGSL_IOCSIF: + ret = set_interface(info,(int)arg); + break; + default: + ret = -ENOIOCTLCMD; + } + mutex_unlock(&info->port.mutex); + return ret; +} + +static int get_icount(struct tty_struct *tty, + struct serial_icounter_struct *icount) + +{ + struct slgt_info *info = tty->driver_data; + struct mgsl_icount cnow; /* kernel counter temps */ + unsigned long flags; + + spin_lock_irqsave(&info->lock,flags); + cnow = info->icount; + spin_unlock_irqrestore(&info->lock,flags); + + icount->cts = cnow.cts; + icount->dsr = cnow.dsr; + icount->rng = cnow.rng; + icount->dcd = cnow.dcd; + icount->rx = cnow.rx; + icount->tx = cnow.tx; + icount->frame = cnow.frame; + icount->overrun = cnow.overrun; + icount->parity = cnow.parity; + icount->brk = cnow.brk; + icount->buf_overrun = cnow.buf_overrun; + + return 0; +} + +/* + * support for 32 bit ioctl calls on 64 bit systems + */ +#ifdef CONFIG_COMPAT +static long get_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *user_params) +{ + struct MGSL_PARAMS32 tmp_params; + + DBGINFO(("%s get_params32\n", info->device_name)); + memset(&tmp_params, 0, sizeof(tmp_params)); + tmp_params.mode = (compat_ulong_t)info->params.mode; + tmp_params.loopback = info->params.loopback; + tmp_params.flags = info->params.flags; + tmp_params.encoding = info->params.encoding; + tmp_params.clock_speed = (compat_ulong_t)info->params.clock_speed; + tmp_params.addr_filter = info->params.addr_filter; + tmp_params.crc_type = info->params.crc_type; + tmp_params.preamble_length = info->params.preamble_length; + tmp_params.preamble = info->params.preamble; + tmp_params.data_rate = (compat_ulong_t)info->params.data_rate; + tmp_params.data_bits = info->params.data_bits; + tmp_params.stop_bits = info->params.stop_bits; + tmp_params.parity = info->params.parity; + if (copy_to_user(user_params, &tmp_params, sizeof(struct MGSL_PARAMS32))) + return -EFAULT; + return 0; +} + +static long set_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *new_params) +{ + struct MGSL_PARAMS32 tmp_params; + + DBGINFO(("%s set_params32\n", info->device_name)); + if (copy_from_user(&tmp_params, new_params, sizeof(struct MGSL_PARAMS32))) + return -EFAULT; + + spin_lock(&info->lock); + if (tmp_params.mode == MGSL_MODE_BASE_CLOCK) { + info->base_clock = tmp_params.clock_speed; + } else { + info->params.mode = tmp_params.mode; + info->params.loopback = tmp_params.loopback; + info->params.flags = tmp_params.flags; + info->params.encoding = tmp_params.encoding; + info->params.clock_speed = tmp_params.clock_speed; + info->params.addr_filter = tmp_params.addr_filter; + info->params.crc_type = tmp_params.crc_type; + info->params.preamble_length = tmp_params.preamble_length; + info->params.preamble = tmp_params.preamble; + info->params.data_rate = tmp_params.data_rate; + info->params.data_bits = tmp_params.data_bits; + info->params.stop_bits = tmp_params.stop_bits; + info->params.parity = tmp_params.parity; + } + spin_unlock(&info->lock); + + program_hw(info); + + return 0; +} + +static long slgt_compat_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + struct slgt_info *info = tty->driver_data; + int rc = -ENOIOCTLCMD; + + if (sanity_check(info, tty->name, "compat_ioctl")) + return -ENODEV; + DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd)); + + switch (cmd) { + + case MGSL_IOCSPARAMS32: + rc = set_params32(info, compat_ptr(arg)); + break; + + case MGSL_IOCGPARAMS32: + rc = get_params32(info, compat_ptr(arg)); + break; + + case MGSL_IOCGPARAMS: + case MGSL_IOCSPARAMS: + case MGSL_IOCGTXIDLE: + case MGSL_IOCGSTATS: + case MGSL_IOCWAITEVENT: + case MGSL_IOCGIF: + case MGSL_IOCSGPIO: + case MGSL_IOCGGPIO: + case MGSL_IOCWAITGPIO: + case MGSL_IOCGXSYNC: + case MGSL_IOCGXCTRL: + case MGSL_IOCSTXIDLE: + case MGSL_IOCTXENABLE: + case MGSL_IOCRXENABLE: + case MGSL_IOCTXABORT: + case TIOCMIWAIT: + case MGSL_IOCSIF: + case MGSL_IOCSXSYNC: + case MGSL_IOCSXCTRL: + rc = ioctl(tty, cmd, arg); + break; + } + + DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc)); + return rc; +} +#else +#define slgt_compat_ioctl NULL +#endif /* ifdef CONFIG_COMPAT */ + +/* + * proc fs support + */ +static inline void line_info(struct seq_file *m, struct slgt_info *info) +{ + char stat_buf[30]; + unsigned long flags; + + seq_printf(m, "%s: IO=%08X IRQ=%d MaxFrameSize=%u\n", + info->device_name, info->phys_reg_addr, + info->irq_level, info->max_frame_size); + + /* output current serial signal states */ + spin_lock_irqsave(&info->lock,flags); + get_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + + stat_buf[0] = 0; + stat_buf[1] = 0; + if (info->signals & SerialSignal_RTS) + strcat(stat_buf, "|RTS"); + if (info->signals & SerialSignal_CTS) + strcat(stat_buf, "|CTS"); + if (info->signals & SerialSignal_DTR) + strcat(stat_buf, "|DTR"); + if (info->signals & SerialSignal_DSR) + strcat(stat_buf, "|DSR"); + if (info->signals & SerialSignal_DCD) + strcat(stat_buf, "|CD"); + if (info->signals & SerialSignal_RI) + strcat(stat_buf, "|RI"); + + if (info->params.mode != MGSL_MODE_ASYNC) { + seq_printf(m, "\tHDLC txok:%d rxok:%d", + info->icount.txok, info->icount.rxok); + if (info->icount.txunder) + seq_printf(m, " txunder:%d", info->icount.txunder); + if (info->icount.txabort) + seq_printf(m, " txabort:%d", info->icount.txabort); + if (info->icount.rxshort) + seq_printf(m, " rxshort:%d", info->icount.rxshort); + if (info->icount.rxlong) + seq_printf(m, " rxlong:%d", info->icount.rxlong); + if (info->icount.rxover) + seq_printf(m, " rxover:%d", info->icount.rxover); + if (info->icount.rxcrc) + seq_printf(m, " rxcrc:%d", info->icount.rxcrc); + } else { + seq_printf(m, "\tASYNC tx:%d rx:%d", + info->icount.tx, info->icount.rx); + if (info->icount.frame) + seq_printf(m, " fe:%d", info->icount.frame); + if (info->icount.parity) + seq_printf(m, " pe:%d", info->icount.parity); + if (info->icount.brk) + seq_printf(m, " brk:%d", info->icount.brk); + if (info->icount.overrun) + seq_printf(m, " oe:%d", info->icount.overrun); + } + + /* Append serial signal status to end */ + seq_printf(m, " %s\n", stat_buf+1); + + seq_printf(m, "\ttxactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", + info->tx_active,info->bh_requested,info->bh_running, + info->pending_bh); +} + +/* Called to print information about devices + */ +static int synclink_gt_proc_show(struct seq_file *m, void *v) +{ + struct slgt_info *info; + + seq_puts(m, "synclink_gt driver\n"); + + info = slgt_device_list; + while( info ) { + line_info(m, info); + info = info->next_device; + } + return 0; +} + +static int synclink_gt_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, synclink_gt_proc_show, NULL); +} + +static const struct file_operations synclink_gt_proc_fops = { + .owner = THIS_MODULE, + .open = synclink_gt_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * return count of bytes in transmit buffer + */ +static int chars_in_buffer(struct tty_struct *tty) +{ + struct slgt_info *info = tty->driver_data; + int count; + if (sanity_check(info, tty->name, "chars_in_buffer")) + return 0; + count = tbuf_bytes(info); + DBGINFO(("%s chars_in_buffer()=%d\n", info->device_name, count)); + return count; +} + +/* + * signal remote device to throttle send data (our receive data) + */ +static void throttle(struct tty_struct * tty) +{ + struct slgt_info *info = tty->driver_data; + unsigned long flags; + + if (sanity_check(info, tty->name, "throttle")) + return; + DBGINFO(("%s throttle\n", info->device_name)); + if (I_IXOFF(tty)) + send_xchar(tty, STOP_CHAR(tty)); + if (tty->termios->c_cflag & CRTSCTS) { + spin_lock_irqsave(&info->lock,flags); + info->signals &= ~SerialSignal_RTS; + set_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + } +} + +/* + * signal remote device to stop throttling send data (our receive data) + */ +static void unthrottle(struct tty_struct * tty) +{ + struct slgt_info *info = tty->driver_data; + unsigned long flags; + + if (sanity_check(info, tty->name, "unthrottle")) + return; + DBGINFO(("%s unthrottle\n", info->device_name)); + if (I_IXOFF(tty)) { + if (info->x_char) + info->x_char = 0; + else + send_xchar(tty, START_CHAR(tty)); + } + if (tty->termios->c_cflag & CRTSCTS) { + spin_lock_irqsave(&info->lock,flags); + info->signals |= SerialSignal_RTS; + set_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + } +} + +/* + * set or clear transmit break condition + * break_state -1=set break condition, 0=clear + */ +static int set_break(struct tty_struct *tty, int break_state) +{ + struct slgt_info *info = tty->driver_data; + unsigned short value; + unsigned long flags; + + if (sanity_check(info, tty->name, "set_break")) + return -EINVAL; + DBGINFO(("%s set_break(%d)\n", info->device_name, break_state)); + + spin_lock_irqsave(&info->lock,flags); + value = rd_reg16(info, TCR); + if (break_state == -1) + value |= BIT6; + else + value &= ~BIT6; + wr_reg16(info, TCR, value); + spin_unlock_irqrestore(&info->lock,flags); + return 0; +} + +#if SYNCLINK_GENERIC_HDLC + +/** + * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) + * set encoding and frame check sequence (FCS) options + * + * dev pointer to network device structure + * encoding serial encoding setting + * parity FCS setting + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity) +{ + struct slgt_info *info = dev_to_port(dev); + unsigned char new_encoding; + unsigned short new_crctype; + + /* return error if TTY interface open */ + if (info->port.count) + return -EBUSY; + + DBGINFO(("%s hdlcdev_attach\n", info->device_name)); + + switch (encoding) + { + case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; + case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; + case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; + case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; + case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; + default: return -EINVAL; + } + + switch (parity) + { + case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; + case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; + case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; + default: return -EINVAL; + } + + info->params.encoding = new_encoding; + info->params.crc_type = new_crctype; + + /* if network interface up, reprogram hardware */ + if (info->netcount) + program_hw(info); + + return 0; +} + +/** + * called by generic HDLC layer to send frame + * + * skb socket buffer containing HDLC frame + * dev pointer to network device structure + */ +static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct slgt_info *info = dev_to_port(dev); + unsigned long flags; + + DBGINFO(("%s hdlc_xmit\n", dev->name)); + + if (!skb->len) + return NETDEV_TX_OK; + + /* stop sending until this frame completes */ + netif_stop_queue(dev); + + /* update network statistics */ + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + /* save start time for transmit timeout detection */ + dev->trans_start = jiffies; + + spin_lock_irqsave(&info->lock, flags); + tx_load(info, skb->data, skb->len); + spin_unlock_irqrestore(&info->lock, flags); + + /* done with socket buffer, so free it */ + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/** + * called by network layer when interface enabled + * claim resources and initialize hardware + * + * dev pointer to network device structure + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_open(struct net_device *dev) +{ + struct slgt_info *info = dev_to_port(dev); + int rc; + unsigned long flags; + + if (!try_module_get(THIS_MODULE)) + return -EBUSY; + + DBGINFO(("%s hdlcdev_open\n", dev->name)); + + /* generic HDLC layer open processing */ + if ((rc = hdlc_open(dev))) + return rc; + + /* arbitrate between network and tty opens */ + spin_lock_irqsave(&info->netlock, flags); + if (info->port.count != 0 || info->netcount != 0) { + DBGINFO(("%s hdlc_open busy\n", dev->name)); + spin_unlock_irqrestore(&info->netlock, flags); + return -EBUSY; + } + info->netcount=1; + spin_unlock_irqrestore(&info->netlock, flags); + + /* claim resources and init adapter */ + if ((rc = startup(info)) != 0) { + spin_lock_irqsave(&info->netlock, flags); + info->netcount=0; + spin_unlock_irqrestore(&info->netlock, flags); + return rc; + } + + /* assert DTR and RTS, apply hardware settings */ + info->signals |= SerialSignal_RTS + SerialSignal_DTR; + program_hw(info); + + /* enable network layer transmit */ + dev->trans_start = jiffies; + netif_start_queue(dev); + + /* inform generic HDLC layer of current DCD status */ + spin_lock_irqsave(&info->lock, flags); + get_signals(info); + spin_unlock_irqrestore(&info->lock, flags); + if (info->signals & SerialSignal_DCD) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + return 0; +} + +/** + * called by network layer when interface is disabled + * shutdown hardware and release resources + * + * dev pointer to network device structure + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_close(struct net_device *dev) +{ + struct slgt_info *info = dev_to_port(dev); + unsigned long flags; + + DBGINFO(("%s hdlcdev_close\n", dev->name)); + + netif_stop_queue(dev); + + /* shutdown adapter and release resources */ + shutdown(info); + + hdlc_close(dev); + + spin_lock_irqsave(&info->netlock, flags); + info->netcount=0; + spin_unlock_irqrestore(&info->netlock, flags); + + module_put(THIS_MODULE); + return 0; +} + +/** + * called by network layer to process IOCTL call to network device + * + * dev pointer to network device structure + * ifr pointer to network interface request structure + * cmd IOCTL command code + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings new_line; + sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; + struct slgt_info *info = dev_to_port(dev); + unsigned int flags; + + DBGINFO(("%s hdlcdev_ioctl\n", dev->name)); + + /* return error if TTY interface open */ + if (info->port.count) + return -EBUSY; + + if (cmd != SIOCWANDEV) + return hdlc_ioctl(dev, ifr, cmd); + + memset(&new_line, 0, sizeof(new_line)); + + switch(ifr->ifr_settings.type) { + case IF_GET_IFACE: /* return current sync_serial_settings */ + + ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; + if (ifr->ifr_settings.size < size) { + ifr->ifr_settings.size = size; /* data size wanted */ + return -ENOBUFS; + } + + flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | + HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | + HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | + HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); + + switch (flags){ + case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; + case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; + case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; + case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; + default: new_line.clock_type = CLOCK_DEFAULT; + } + + new_line.clock_rate = info->params.clock_speed; + new_line.loopback = info->params.loopback ? 1:0; + + if (copy_to_user(line, &new_line, size)) + return -EFAULT; + return 0; + + case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ + + if(!capable(CAP_NET_ADMIN)) + return -EPERM; + if (copy_from_user(&new_line, line, size)) + return -EFAULT; + + switch (new_line.clock_type) + { + case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; + case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; + case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; + case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; + case CLOCK_DEFAULT: flags = info->params.flags & + (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | + HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | + HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | + HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; + default: return -EINVAL; + } + + if (new_line.loopback != 0 && new_line.loopback != 1) + return -EINVAL; + + info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | + HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | + HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | + HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); + info->params.flags |= flags; + + info->params.loopback = new_line.loopback; + + if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) + info->params.clock_speed = new_line.clock_rate; + else + info->params.clock_speed = 0; + + /* if network interface up, reprogram hardware */ + if (info->netcount) + program_hw(info); + return 0; + + default: + return hdlc_ioctl(dev, ifr, cmd); + } +} + +/** + * called by network layer when transmit timeout is detected + * + * dev pointer to network device structure + */ +static void hdlcdev_tx_timeout(struct net_device *dev) +{ + struct slgt_info *info = dev_to_port(dev); + unsigned long flags; + + DBGINFO(("%s hdlcdev_tx_timeout\n", dev->name)); + + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + + spin_lock_irqsave(&info->lock,flags); + tx_stop(info); + spin_unlock_irqrestore(&info->lock,flags); + + netif_wake_queue(dev); +} + +/** + * called by device driver when transmit completes + * reenable network layer transmit if stopped + * + * info pointer to device instance information + */ +static void hdlcdev_tx_done(struct slgt_info *info) +{ + if (netif_queue_stopped(info->netdev)) + netif_wake_queue(info->netdev); +} + +/** + * called by device driver when frame received + * pass frame to network layer + * + * info pointer to device instance information + * buf pointer to buffer contianing frame data + * size count of data bytes in buf + */ +static void hdlcdev_rx(struct slgt_info *info, char *buf, int size) +{ + struct sk_buff *skb = dev_alloc_skb(size); + struct net_device *dev = info->netdev; + + DBGINFO(("%s hdlcdev_rx\n", dev->name)); + + if (skb == NULL) { + DBGERR(("%s: can't alloc skb, drop packet\n", dev->name)); + dev->stats.rx_dropped++; + return; + } + + memcpy(skb_put(skb, size), buf, size); + + skb->protocol = hdlc_type_trans(skb, dev); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += size; + + netif_rx(skb); +} + +static const struct net_device_ops hdlcdev_ops = { + .ndo_open = hdlcdev_open, + .ndo_stop = hdlcdev_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = hdlcdev_ioctl, + .ndo_tx_timeout = hdlcdev_tx_timeout, +}; + +/** + * called by device driver when adding device instance + * do generic HDLC initialization + * + * info pointer to device instance information + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_init(struct slgt_info *info) +{ + int rc; + struct net_device *dev; + hdlc_device *hdlc; + + /* allocate and initialize network and HDLC layer objects */ + + if (!(dev = alloc_hdlcdev(info))) { + printk(KERN_ERR "%s hdlc device alloc failure\n", info->device_name); + return -ENOMEM; + } + + /* for network layer reporting purposes only */ + dev->mem_start = info->phys_reg_addr; + dev->mem_end = info->phys_reg_addr + SLGT_REG_SIZE - 1; + dev->irq = info->irq_level; + + /* network layer callbacks and settings */ + dev->netdev_ops = &hdlcdev_ops; + dev->watchdog_timeo = 10 * HZ; + dev->tx_queue_len = 50; + + /* generic HDLC layer callbacks and settings */ + hdlc = dev_to_hdlc(dev); + hdlc->attach = hdlcdev_attach; + hdlc->xmit = hdlcdev_xmit; + + /* register objects with HDLC layer */ + if ((rc = register_hdlc_device(dev))) { + printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__); + free_netdev(dev); + return rc; + } + + info->netdev = dev; + return 0; +} + +/** + * called by device driver when removing device instance + * do generic HDLC cleanup + * + * info pointer to device instance information + */ +static void hdlcdev_exit(struct slgt_info *info) +{ + unregister_hdlc_device(info->netdev); + free_netdev(info->netdev); + info->netdev = NULL; +} + +#endif /* ifdef CONFIG_HDLC */ + +/* + * get async data from rx DMA buffers + */ +static void rx_async(struct slgt_info *info) +{ + struct tty_struct *tty = info->port.tty; + struct mgsl_icount *icount = &info->icount; + unsigned int start, end; + unsigned char *p; + unsigned char status; + struct slgt_desc *bufs = info->rbufs; + int i, count; + int chars = 0; + int stat; + unsigned char ch; + + start = end = info->rbuf_current; + + while(desc_complete(bufs[end])) { + count = desc_count(bufs[end]) - info->rbuf_index; + p = bufs[end].buf + info->rbuf_index; + + DBGISR(("%s rx_async count=%d\n", info->device_name, count)); + DBGDATA(info, p, count, "rx"); + + for(i=0 ; i < count; i+=2, p+=2) { + ch = *p; + icount->rx++; + + stat = 0; + + if ((status = *(p+1) & (BIT1 + BIT0))) { + if (status & BIT1) + icount->parity++; + else if (status & BIT0) + icount->frame++; + /* discard char if tty control flags say so */ + if (status & info->ignore_status_mask) + continue; + if (status & BIT1) + stat = TTY_PARITY; + else if (status & BIT0) + stat = TTY_FRAME; + } + if (tty) { + tty_insert_flip_char(tty, ch, stat); + chars++; + } + } + + if (i < count) { + /* receive buffer not completed */ + info->rbuf_index += i; + mod_timer(&info->rx_timer, jiffies + 1); + break; + } + + info->rbuf_index = 0; + free_rbufs(info, end, end); + + if (++end == info->rbuf_count) + end = 0; + + /* if entire list searched then no frame available */ + if (end == start) + break; + } + + if (tty && chars) + tty_flip_buffer_push(tty); +} + +/* + * return next bottom half action to perform + */ +static int bh_action(struct slgt_info *info) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(&info->lock,flags); + + if (info->pending_bh & BH_RECEIVE) { + info->pending_bh &= ~BH_RECEIVE; + rc = BH_RECEIVE; + } else if (info->pending_bh & BH_TRANSMIT) { + info->pending_bh &= ~BH_TRANSMIT; + rc = BH_TRANSMIT; + } else if (info->pending_bh & BH_STATUS) { + info->pending_bh &= ~BH_STATUS; + rc = BH_STATUS; + } else { + /* Mark BH routine as complete */ + info->bh_running = false; + info->bh_requested = false; + rc = 0; + } + + spin_unlock_irqrestore(&info->lock,flags); + + return rc; +} + +/* + * perform bottom half processing + */ +static void bh_handler(struct work_struct *work) +{ + struct slgt_info *info = container_of(work, struct slgt_info, task); + int action; + + if (!info) + return; + info->bh_running = true; + + while((action = bh_action(info))) { + switch (action) { + case BH_RECEIVE: + DBGBH(("%s bh receive\n", info->device_name)); + switch(info->params.mode) { + case MGSL_MODE_ASYNC: + rx_async(info); + break; + case MGSL_MODE_HDLC: + while(rx_get_frame(info)); + break; + case MGSL_MODE_RAW: + case MGSL_MODE_MONOSYNC: + case MGSL_MODE_BISYNC: + case MGSL_MODE_XSYNC: + while(rx_get_buf(info)); + break; + } + /* restart receiver if rx DMA buffers exhausted */ + if (info->rx_restart) + rx_start(info); + break; + case BH_TRANSMIT: + bh_transmit(info); + break; + case BH_STATUS: + DBGBH(("%s bh status\n", info->device_name)); + info->ri_chkcount = 0; + info->dsr_chkcount = 0; + info->dcd_chkcount = 0; + info->cts_chkcount = 0; + break; + default: + DBGBH(("%s unknown action\n", info->device_name)); + break; + } + } + DBGBH(("%s bh_handler exit\n", info->device_name)); +} + +static void bh_transmit(struct slgt_info *info) +{ + struct tty_struct *tty = info->port.tty; + + DBGBH(("%s bh_transmit\n", info->device_name)); + if (tty) + tty_wakeup(tty); +} + +static void dsr_change(struct slgt_info *info, unsigned short status) +{ + if (status & BIT3) { + info->signals |= SerialSignal_DSR; + info->input_signal_events.dsr_up++; + } else { + info->signals &= ~SerialSignal_DSR; + info->input_signal_events.dsr_down++; + } + DBGISR(("dsr_change %s signals=%04X\n", info->device_name, info->signals)); + if ((info->dsr_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) { + slgt_irq_off(info, IRQ_DSR); + return; + } + info->icount.dsr++; + wake_up_interruptible(&info->status_event_wait_q); + wake_up_interruptible(&info->event_wait_q); + info->pending_bh |= BH_STATUS; +} + +static void cts_change(struct slgt_info *info, unsigned short status) +{ + if (status & BIT2) { + info->signals |= SerialSignal_CTS; + info->input_signal_events.cts_up++; + } else { + info->signals &= ~SerialSignal_CTS; + info->input_signal_events.cts_down++; + } + DBGISR(("cts_change %s signals=%04X\n", info->device_name, info->signals)); + if ((info->cts_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) { + slgt_irq_off(info, IRQ_CTS); + return; + } + info->icount.cts++; + wake_up_interruptible(&info->status_event_wait_q); + wake_up_interruptible(&info->event_wait_q); + info->pending_bh |= BH_STATUS; + + if (info->port.flags & ASYNC_CTS_FLOW) { + if (info->port.tty) { + if (info->port.tty->hw_stopped) { + if (info->signals & SerialSignal_CTS) { + info->port.tty->hw_stopped = 0; + info->pending_bh |= BH_TRANSMIT; + return; + } + } else { + if (!(info->signals & SerialSignal_CTS)) + info->port.tty->hw_stopped = 1; + } + } + } +} + +static void dcd_change(struct slgt_info *info, unsigned short status) +{ + if (status & BIT1) { + info->signals |= SerialSignal_DCD; + info->input_signal_events.dcd_up++; + } else { + info->signals &= ~SerialSignal_DCD; + info->input_signal_events.dcd_down++; + } + DBGISR(("dcd_change %s signals=%04X\n", info->device_name, info->signals)); + if ((info->dcd_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) { + slgt_irq_off(info, IRQ_DCD); + return; + } + info->icount.dcd++; +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) { + if (info->signals & SerialSignal_DCD) + netif_carrier_on(info->netdev); + else + netif_carrier_off(info->netdev); + } +#endif + wake_up_interruptible(&info->status_event_wait_q); + wake_up_interruptible(&info->event_wait_q); + info->pending_bh |= BH_STATUS; + + if (info->port.flags & ASYNC_CHECK_CD) { + if (info->signals & SerialSignal_DCD) + wake_up_interruptible(&info->port.open_wait); + else { + if (info->port.tty) + tty_hangup(info->port.tty); + } + } +} + +static void ri_change(struct slgt_info *info, unsigned short status) +{ + if (status & BIT0) { + info->signals |= SerialSignal_RI; + info->input_signal_events.ri_up++; + } else { + info->signals &= ~SerialSignal_RI; + info->input_signal_events.ri_down++; + } + DBGISR(("ri_change %s signals=%04X\n", info->device_name, info->signals)); + if ((info->ri_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) { + slgt_irq_off(info, IRQ_RI); + return; + } + info->icount.rng++; + wake_up_interruptible(&info->status_event_wait_q); + wake_up_interruptible(&info->event_wait_q); + info->pending_bh |= BH_STATUS; +} + +static void isr_rxdata(struct slgt_info *info) +{ + unsigned int count = info->rbuf_fill_count; + unsigned int i = info->rbuf_fill_index; + unsigned short reg; + + while (rd_reg16(info, SSR) & IRQ_RXDATA) { + reg = rd_reg16(info, RDR); + DBGISR(("isr_rxdata %s RDR=%04X\n", info->device_name, reg)); + if (desc_complete(info->rbufs[i])) { + /* all buffers full */ + rx_stop(info); + info->rx_restart = 1; + continue; + } + info->rbufs[i].buf[count++] = (unsigned char)reg; + /* async mode saves status byte to buffer for each data byte */ + if (info->params.mode == MGSL_MODE_ASYNC) + info->rbufs[i].buf[count++] = (unsigned char)(reg >> 8); + if (count == info->rbuf_fill_level || (reg & BIT10)) { + /* buffer full or end of frame */ + set_desc_count(info->rbufs[i], count); + set_desc_status(info->rbufs[i], BIT15 | (reg >> 8)); + info->rbuf_fill_count = count = 0; + if (++i == info->rbuf_count) + i = 0; + info->pending_bh |= BH_RECEIVE; + } + } + + info->rbuf_fill_index = i; + info->rbuf_fill_count = count; +} + +static void isr_serial(struct slgt_info *info) +{ + unsigned short status = rd_reg16(info, SSR); + + DBGISR(("%s isr_serial status=%04X\n", info->device_name, status)); + + wr_reg16(info, SSR, status); /* clear pending */ + + info->irq_occurred = true; + + if (info->params.mode == MGSL_MODE_ASYNC) { + if (status & IRQ_TXIDLE) { + if (info->tx_active) + isr_txeom(info, status); + } + if (info->rx_pio && (status & IRQ_RXDATA)) + isr_rxdata(info); + if ((status & IRQ_RXBREAK) && (status & RXBREAK)) { + info->icount.brk++; + /* process break detection if tty control allows */ + if (info->port.tty) { + if (!(status & info->ignore_status_mask)) { + if (info->read_status_mask & MASK_BREAK) { + tty_insert_flip_char(info->port.tty, 0, TTY_BREAK); + if (info->port.flags & ASYNC_SAK) + do_SAK(info->port.tty); + } + } + } + } + } else { + if (status & (IRQ_TXIDLE + IRQ_TXUNDER)) + isr_txeom(info, status); + if (info->rx_pio && (status & IRQ_RXDATA)) + isr_rxdata(info); + if (status & IRQ_RXIDLE) { + if (status & RXIDLE) + info->icount.rxidle++; + else + info->icount.exithunt++; + wake_up_interruptible(&info->event_wait_q); + } + + if (status & IRQ_RXOVER) + rx_start(info); + } + + if (status & IRQ_DSR) + dsr_change(info, status); + if (status & IRQ_CTS) + cts_change(info, status); + if (status & IRQ_DCD) + dcd_change(info, status); + if (status & IRQ_RI) + ri_change(info, status); +} + +static void isr_rdma(struct slgt_info *info) +{ + unsigned int status = rd_reg32(info, RDCSR); + + DBGISR(("%s isr_rdma status=%08x\n", info->device_name, status)); + + /* RDCSR (rx DMA control/status) + * + * 31..07 reserved + * 06 save status byte to DMA buffer + * 05 error + * 04 eol (end of list) + * 03 eob (end of buffer) + * 02 IRQ enable + * 01 reset + * 00 enable + */ + wr_reg32(info, RDCSR, status); /* clear pending */ + + if (status & (BIT5 + BIT4)) { + DBGISR(("%s isr_rdma rx_restart=1\n", info->device_name)); + info->rx_restart = true; + } + info->pending_bh |= BH_RECEIVE; +} + +static void isr_tdma(struct slgt_info *info) +{ + unsigned int status = rd_reg32(info, TDCSR); + + DBGISR(("%s isr_tdma status=%08x\n", info->device_name, status)); + + /* TDCSR (tx DMA control/status) + * + * 31..06 reserved + * 05 error + * 04 eol (end of list) + * 03 eob (end of buffer) + * 02 IRQ enable + * 01 reset + * 00 enable + */ + wr_reg32(info, TDCSR, status); /* clear pending */ + + if (status & (BIT5 + BIT4 + BIT3)) { + // another transmit buffer has completed + // run bottom half to get more send data from user + info->pending_bh |= BH_TRANSMIT; + } +} + +/* + * return true if there are unsent tx DMA buffers, otherwise false + * + * if there are unsent buffers then info->tbuf_start + * is set to index of first unsent buffer + */ +static bool unsent_tbufs(struct slgt_info *info) +{ + unsigned int i = info->tbuf_current; + bool rc = false; + + /* + * search backwards from last loaded buffer (precedes tbuf_current) + * for first unsent buffer (desc_count > 0) + */ + + do { + if (i) + i--; + else + i = info->tbuf_count - 1; + if (!desc_count(info->tbufs[i])) + break; + info->tbuf_start = i; + rc = true; + } while (i != info->tbuf_current); + + return rc; +} + +static void isr_txeom(struct slgt_info *info, unsigned short status) +{ + DBGISR(("%s txeom status=%04x\n", info->device_name, status)); + + slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER); + tdma_reset(info); + if (status & IRQ_TXUNDER) { + unsigned short val = rd_reg16(info, TCR); + wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */ + wr_reg16(info, TCR, val); /* clear reset bit */ + } + + if (info->tx_active) { + if (info->params.mode != MGSL_MODE_ASYNC) { + if (status & IRQ_TXUNDER) + info->icount.txunder++; + else if (status & IRQ_TXIDLE) + info->icount.txok++; + } + + if (unsent_tbufs(info)) { + tx_start(info); + update_tx_timer(info); + return; + } + info->tx_active = false; + + del_timer(&info->tx_timer); + + if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) { + info->signals &= ~SerialSignal_RTS; + info->drop_rts_on_tx_done = false; + set_signals(info); + } + +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) + hdlcdev_tx_done(info); + else +#endif + { + if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) { + tx_stop(info); + return; + } + info->pending_bh |= BH_TRANSMIT; + } + } +} + +static void isr_gpio(struct slgt_info *info, unsigned int changed, unsigned int state) +{ + struct cond_wait *w, *prev; + + /* wake processes waiting for specific transitions */ + for (w = info->gpio_wait_q, prev = NULL ; w != NULL ; w = w->next) { + if (w->data & changed) { + w->data = state; + wake_up_interruptible(&w->q); + if (prev != NULL) + prev->next = w->next; + else + info->gpio_wait_q = w->next; + } else + prev = w; + } +} + +/* interrupt service routine + * + * irq interrupt number + * dev_id device ID supplied during interrupt registration + */ +static irqreturn_t slgt_interrupt(int dummy, void *dev_id) +{ + struct slgt_info *info = dev_id; + unsigned int gsr; + unsigned int i; + + DBGISR(("slgt_interrupt irq=%d entry\n", info->irq_level)); + + while((gsr = rd_reg32(info, GSR) & 0xffffff00)) { + DBGISR(("%s gsr=%08x\n", info->device_name, gsr)); + info->irq_occurred = true; + for(i=0; i < info->port_count ; i++) { + if (info->port_array[i] == NULL) + continue; + spin_lock(&info->port_array[i]->lock); + if (gsr & (BIT8 << i)) + isr_serial(info->port_array[i]); + if (gsr & (BIT16 << (i*2))) + isr_rdma(info->port_array[i]); + if (gsr & (BIT17 << (i*2))) + isr_tdma(info->port_array[i]); + spin_unlock(&info->port_array[i]->lock); + } + } + + if (info->gpio_present) { + unsigned int state; + unsigned int changed; + spin_lock(&info->lock); + while ((changed = rd_reg32(info, IOSR)) != 0) { + DBGISR(("%s iosr=%08x\n", info->device_name, changed)); + /* read latched state of GPIO signals */ + state = rd_reg32(info, IOVR); + /* clear pending GPIO interrupt bits */ + wr_reg32(info, IOSR, changed); + for (i=0 ; i < info->port_count ; i++) { + if (info->port_array[i] != NULL) + isr_gpio(info->port_array[i], changed, state); + } + } + spin_unlock(&info->lock); + } + + for(i=0; i < info->port_count ; i++) { + struct slgt_info *port = info->port_array[i]; + if (port == NULL) + continue; + spin_lock(&port->lock); + if ((port->port.count || port->netcount) && + port->pending_bh && !port->bh_running && + !port->bh_requested) { + DBGISR(("%s bh queued\n", port->device_name)); + schedule_work(&port->task); + port->bh_requested = true; + } + spin_unlock(&port->lock); + } + + DBGISR(("slgt_interrupt irq=%d exit\n", info->irq_level)); + return IRQ_HANDLED; +} + +static int startup(struct slgt_info *info) +{ + DBGINFO(("%s startup\n", info->device_name)); + + if (info->port.flags & ASYNC_INITIALIZED) + return 0; + + if (!info->tx_buf) { + info->tx_buf = kmalloc(info->max_frame_size, GFP_KERNEL); + if (!info->tx_buf) { + DBGERR(("%s can't allocate tx buffer\n", info->device_name)); + return -ENOMEM; + } + } + + info->pending_bh = 0; + + memset(&info->icount, 0, sizeof(info->icount)); + + /* program hardware for current parameters */ + change_params(info); + + if (info->port.tty) + clear_bit(TTY_IO_ERROR, &info->port.tty->flags); + + info->port.flags |= ASYNC_INITIALIZED; + + return 0; +} + +/* + * called by close() and hangup() to shutdown hardware + */ +static void shutdown(struct slgt_info *info) +{ + unsigned long flags; + + if (!(info->port.flags & ASYNC_INITIALIZED)) + return; + + DBGINFO(("%s shutdown\n", info->device_name)); + + /* clear status wait queue because status changes */ + /* can't happen after shutting down the hardware */ + wake_up_interruptible(&info->status_event_wait_q); + wake_up_interruptible(&info->event_wait_q); + + del_timer_sync(&info->tx_timer); + del_timer_sync(&info->rx_timer); + + kfree(info->tx_buf); + info->tx_buf = NULL; + + spin_lock_irqsave(&info->lock,flags); + + tx_stop(info); + rx_stop(info); + + slgt_irq_off(info, IRQ_ALL | IRQ_MASTER); + + if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) { + info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS); + set_signals(info); + } + + flush_cond_wait(&info->gpio_wait_q); + + spin_unlock_irqrestore(&info->lock,flags); + + if (info->port.tty) + set_bit(TTY_IO_ERROR, &info->port.tty->flags); + + info->port.flags &= ~ASYNC_INITIALIZED; +} + +static void program_hw(struct slgt_info *info) +{ + unsigned long flags; + + spin_lock_irqsave(&info->lock,flags); + + rx_stop(info); + tx_stop(info); + + if (info->params.mode != MGSL_MODE_ASYNC || + info->netcount) + sync_mode(info); + else + async_mode(info); + + set_signals(info); + + info->dcd_chkcount = 0; + info->cts_chkcount = 0; + info->ri_chkcount = 0; + info->dsr_chkcount = 0; + + slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI); + get_signals(info); + + if (info->netcount || + (info->port.tty && info->port.tty->termios->c_cflag & CREAD)) + rx_start(info); + + spin_unlock_irqrestore(&info->lock,flags); +} + +/* + * reconfigure adapter based on new parameters + */ +static void change_params(struct slgt_info *info) +{ + unsigned cflag; + int bits_per_char; + + if (!info->port.tty || !info->port.tty->termios) + return; + DBGINFO(("%s change_params\n", info->device_name)); + + cflag = info->port.tty->termios->c_cflag; + + /* if B0 rate (hangup) specified then negate DTR and RTS */ + /* otherwise assert DTR and RTS */ + if (cflag & CBAUD) + info->signals |= SerialSignal_RTS + SerialSignal_DTR; + else + info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR); + + /* byte size and parity */ + + switch (cflag & CSIZE) { + case CS5: info->params.data_bits = 5; break; + case CS6: info->params.data_bits = 6; break; + case CS7: info->params.data_bits = 7; break; + case CS8: info->params.data_bits = 8; break; + default: info->params.data_bits = 7; break; + } + + info->params.stop_bits = (cflag & CSTOPB) ? 2 : 1; + + if (cflag & PARENB) + info->params.parity = (cflag & PARODD) ? ASYNC_PARITY_ODD : ASYNC_PARITY_EVEN; + else + info->params.parity = ASYNC_PARITY_NONE; + + /* calculate number of jiffies to transmit a full + * FIFO (32 bytes) at specified data rate + */ + bits_per_char = info->params.data_bits + + info->params.stop_bits + 1; + + info->params.data_rate = tty_get_baud_rate(info->port.tty); + + if (info->params.data_rate) { + info->timeout = (32*HZ*bits_per_char) / + info->params.data_rate; + } + info->timeout += HZ/50; /* Add .02 seconds of slop */ + + if (cflag & CRTSCTS) + info->port.flags |= ASYNC_CTS_FLOW; + else + info->port.flags &= ~ASYNC_CTS_FLOW; + + if (cflag & CLOCAL) + info->port.flags &= ~ASYNC_CHECK_CD; + else + info->port.flags |= ASYNC_CHECK_CD; + + /* process tty input control flags */ + + info->read_status_mask = IRQ_RXOVER; + if (I_INPCK(info->port.tty)) + info->read_status_mask |= MASK_PARITY | MASK_FRAMING; + if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) + info->read_status_mask |= MASK_BREAK; + if (I_IGNPAR(info->port.tty)) + info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING; + if (I_IGNBRK(info->port.tty)) { + info->ignore_status_mask |= MASK_BREAK; + /* If ignoring parity and break indicators, ignore + * overruns too. (For real raw support). + */ + if (I_IGNPAR(info->port.tty)) + info->ignore_status_mask |= MASK_OVERRUN; + } + + program_hw(info); +} + +static int get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount) +{ + DBGINFO(("%s get_stats\n", info->device_name)); + if (!user_icount) { + memset(&info->icount, 0, sizeof(info->icount)); + } else { + if (copy_to_user(user_icount, &info->icount, sizeof(struct mgsl_icount))) + return -EFAULT; + } + return 0; +} + +static int get_params(struct slgt_info *info, MGSL_PARAMS __user *user_params) +{ + DBGINFO(("%s get_params\n", info->device_name)); + if (copy_to_user(user_params, &info->params, sizeof(MGSL_PARAMS))) + return -EFAULT; + return 0; +} + +static int set_params(struct slgt_info *info, MGSL_PARAMS __user *new_params) +{ + unsigned long flags; + MGSL_PARAMS tmp_params; + + DBGINFO(("%s set_params\n", info->device_name)); + if (copy_from_user(&tmp_params, new_params, sizeof(MGSL_PARAMS))) + return -EFAULT; + + spin_lock_irqsave(&info->lock, flags); + if (tmp_params.mode == MGSL_MODE_BASE_CLOCK) + info->base_clock = tmp_params.clock_speed; + else + memcpy(&info->params, &tmp_params, sizeof(MGSL_PARAMS)); + spin_unlock_irqrestore(&info->lock, flags); + + program_hw(info); + + return 0; +} + +static int get_txidle(struct slgt_info *info, int __user *idle_mode) +{ + DBGINFO(("%s get_txidle=%d\n", info->device_name, info->idle_mode)); + if (put_user(info->idle_mode, idle_mode)) + return -EFAULT; + return 0; +} + +static int set_txidle(struct slgt_info *info, int idle_mode) +{ + unsigned long flags; + DBGINFO(("%s set_txidle(%d)\n", info->device_name, idle_mode)); + spin_lock_irqsave(&info->lock,flags); + info->idle_mode = idle_mode; + if (info->params.mode != MGSL_MODE_ASYNC) + tx_set_idle(info); + spin_unlock_irqrestore(&info->lock,flags); + return 0; +} + +static int tx_enable(struct slgt_info *info, int enable) +{ + unsigned long flags; + DBGINFO(("%s tx_enable(%d)\n", info->device_name, enable)); + spin_lock_irqsave(&info->lock,flags); + if (enable) { + if (!info->tx_enabled) + tx_start(info); + } else { + if (info->tx_enabled) + tx_stop(info); + } + spin_unlock_irqrestore(&info->lock,flags); + return 0; +} + +/* + * abort transmit HDLC frame + */ +static int tx_abort(struct slgt_info *info) +{ + unsigned long flags; + DBGINFO(("%s tx_abort\n", info->device_name)); + spin_lock_irqsave(&info->lock,flags); + tdma_reset(info); + spin_unlock_irqrestore(&info->lock,flags); + return 0; +} + +static int rx_enable(struct slgt_info *info, int enable) +{ + unsigned long flags; + unsigned int rbuf_fill_level; + DBGINFO(("%s rx_enable(%08x)\n", info->device_name, enable)); + spin_lock_irqsave(&info->lock,flags); + /* + * enable[31..16] = receive DMA buffer fill level + * 0 = noop (leave fill level unchanged) + * fill level must be multiple of 4 and <= buffer size + */ + rbuf_fill_level = ((unsigned int)enable) >> 16; + if (rbuf_fill_level) { + if ((rbuf_fill_level > DMABUFSIZE) || (rbuf_fill_level % 4)) { + spin_unlock_irqrestore(&info->lock, flags); + return -EINVAL; + } + info->rbuf_fill_level = rbuf_fill_level; + if (rbuf_fill_level < 128) + info->rx_pio = 1; /* PIO mode */ + else + info->rx_pio = 0; /* DMA mode */ + rx_stop(info); /* restart receiver to use new fill level */ + } + + /* + * enable[1..0] = receiver enable command + * 0 = disable + * 1 = enable + * 2 = enable or force hunt mode if already enabled + */ + enable &= 3; + if (enable) { + if (!info->rx_enabled) + rx_start(info); + else if (enable == 2) { + /* force hunt mode (write 1 to RCR[3]) */ + wr_reg16(info, RCR, rd_reg16(info, RCR) | BIT3); + } + } else { + if (info->rx_enabled) + rx_stop(info); + } + spin_unlock_irqrestore(&info->lock,flags); + return 0; +} + +/* + * wait for specified event to occur + */ +static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr) +{ + unsigned long flags; + int s; + int rc=0; + struct mgsl_icount cprev, cnow; + int events; + int mask; + struct _input_signal_events oldsigs, newsigs; + DECLARE_WAITQUEUE(wait, current); + + if (get_user(mask, mask_ptr)) + return -EFAULT; + + DBGINFO(("%s wait_mgsl_event(%d)\n", info->device_name, mask)); + + spin_lock_irqsave(&info->lock,flags); + + /* return immediately if state matches requested events */ + get_signals(info); + s = info->signals; + + events = mask & + ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + + ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + + ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + + ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); + if (events) { + spin_unlock_irqrestore(&info->lock,flags); + goto exit; + } + + /* save current irq counts */ + cprev = info->icount; + oldsigs = info->input_signal_events; + + /* enable hunt and idle irqs if needed */ + if (mask & (MgslEvent_ExitHuntMode+MgslEvent_IdleReceived)) { + unsigned short val = rd_reg16(info, SCR); + if (!(val & IRQ_RXIDLE)) + wr_reg16(info, SCR, (unsigned short)(val | IRQ_RXIDLE)); + } + + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&info->event_wait_q, &wait); + + spin_unlock_irqrestore(&info->lock,flags); + + for(;;) { + schedule(); + if (signal_pending(current)) { + rc = -ERESTARTSYS; + break; + } + + /* get current irq counts */ + spin_lock_irqsave(&info->lock,flags); + cnow = info->icount; + newsigs = info->input_signal_events; + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&info->lock,flags); + + /* if no change, wait aborted for some reason */ + if (newsigs.dsr_up == oldsigs.dsr_up && + newsigs.dsr_down == oldsigs.dsr_down && + newsigs.dcd_up == oldsigs.dcd_up && + newsigs.dcd_down == oldsigs.dcd_down && + newsigs.cts_up == oldsigs.cts_up && + newsigs.cts_down == oldsigs.cts_down && + newsigs.ri_up == oldsigs.ri_up && + newsigs.ri_down == oldsigs.ri_down && + cnow.exithunt == cprev.exithunt && + cnow.rxidle == cprev.rxidle) { + rc = -EIO; + break; + } + + events = mask & + ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + + (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + + (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + + (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + + (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + + (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + + (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + + (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + + (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + + (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); + if (events) + break; + + cprev = cnow; + oldsigs = newsigs; + } + + remove_wait_queue(&info->event_wait_q, &wait); + set_current_state(TASK_RUNNING); + + + if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { + spin_lock_irqsave(&info->lock,flags); + if (!waitqueue_active(&info->event_wait_q)) { + /* disable enable exit hunt mode/idle rcvd IRQs */ + wr_reg16(info, SCR, + (unsigned short)(rd_reg16(info, SCR) & ~IRQ_RXIDLE)); + } + spin_unlock_irqrestore(&info->lock,flags); + } +exit: + if (rc == 0) + rc = put_user(events, mask_ptr); + return rc; +} + +static int get_interface(struct slgt_info *info, int __user *if_mode) +{ + DBGINFO(("%s get_interface=%x\n", info->device_name, info->if_mode)); + if (put_user(info->if_mode, if_mode)) + return -EFAULT; + return 0; +} + +static int set_interface(struct slgt_info *info, int if_mode) +{ + unsigned long flags; + unsigned short val; + + DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode)); + spin_lock_irqsave(&info->lock,flags); + info->if_mode = if_mode; + + msc_set_vcr(info); + + /* TCR (tx control) 07 1=RTS driver control */ + val = rd_reg16(info, TCR); + if (info->if_mode & MGSL_INTERFACE_RTS_EN) + val |= BIT7; + else + val &= ~BIT7; + wr_reg16(info, TCR, val); + + spin_unlock_irqrestore(&info->lock,flags); + return 0; +} + +static int get_xsync(struct slgt_info *info, int __user *xsync) +{ + DBGINFO(("%s get_xsync=%x\n", info->device_name, info->xsync)); + if (put_user(info->xsync, xsync)) + return -EFAULT; + return 0; +} + +/* + * set extended sync pattern (1 to 4 bytes) for extended sync mode + * + * sync pattern is contained in least significant bytes of value + * most significant byte of sync pattern is oldest (1st sent/detected) + */ +static int set_xsync(struct slgt_info *info, int xsync) +{ + unsigned long flags; + + DBGINFO(("%s set_xsync=%x)\n", info->device_name, xsync)); + spin_lock_irqsave(&info->lock, flags); + info->xsync = xsync; + wr_reg32(info, XSR, xsync); + spin_unlock_irqrestore(&info->lock, flags); + return 0; +} + +static int get_xctrl(struct slgt_info *info, int __user *xctrl) +{ + DBGINFO(("%s get_xctrl=%x\n", info->device_name, info->xctrl)); + if (put_user(info->xctrl, xctrl)) + return -EFAULT; + return 0; +} + +/* + * set extended control options + * + * xctrl[31:19] reserved, must be zero + * xctrl[18:17] extended sync pattern length in bytes + * 00 = 1 byte in xsr[7:0] + * 01 = 2 bytes in xsr[15:0] + * 10 = 3 bytes in xsr[23:0] + * 11 = 4 bytes in xsr[31:0] + * xctrl[16] 1 = enable terminal count, 0=disabled + * xctrl[15:0] receive terminal count for fixed length packets + * value is count minus one (0 = 1 byte packet) + * when terminal count is reached, receiver + * automatically returns to hunt mode and receive + * FIFO contents are flushed to DMA buffers with + * end of frame (EOF) status + */ +static int set_xctrl(struct slgt_info *info, int xctrl) +{ + unsigned long flags; + + DBGINFO(("%s set_xctrl=%x)\n", info->device_name, xctrl)); + spin_lock_irqsave(&info->lock, flags); + info->xctrl = xctrl; + wr_reg32(info, XCR, xctrl); + spin_unlock_irqrestore(&info->lock, flags); + return 0; +} + +/* + * set general purpose IO pin state and direction + * + * user_gpio fields: + * state each bit indicates a pin state + * smask set bit indicates pin state to set + * dir each bit indicates a pin direction (0=input, 1=output) + * dmask set bit indicates pin direction to set + */ +static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio) +{ + unsigned long flags; + struct gpio_desc gpio; + __u32 data; + + if (!info->gpio_present) + return -EINVAL; + if (copy_from_user(&gpio, user_gpio, sizeof(gpio))) + return -EFAULT; + DBGINFO(("%s set_gpio state=%08x smask=%08x dir=%08x dmask=%08x\n", + info->device_name, gpio.state, gpio.smask, + gpio.dir, gpio.dmask)); + + spin_lock_irqsave(&info->port_array[0]->lock, flags); + if (gpio.dmask) { + data = rd_reg32(info, IODR); + data |= gpio.dmask & gpio.dir; + data &= ~(gpio.dmask & ~gpio.dir); + wr_reg32(info, IODR, data); + } + if (gpio.smask) { + data = rd_reg32(info, IOVR); + data |= gpio.smask & gpio.state; + data &= ~(gpio.smask & ~gpio.state); + wr_reg32(info, IOVR, data); + } + spin_unlock_irqrestore(&info->port_array[0]->lock, flags); + + return 0; +} + +/* + * get general purpose IO pin state and direction + */ +static int get_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio) +{ + struct gpio_desc gpio; + if (!info->gpio_present) + return -EINVAL; + gpio.state = rd_reg32(info, IOVR); + gpio.smask = 0xffffffff; + gpio.dir = rd_reg32(info, IODR); + gpio.dmask = 0xffffffff; + if (copy_to_user(user_gpio, &gpio, sizeof(gpio))) + return -EFAULT; + DBGINFO(("%s get_gpio state=%08x dir=%08x\n", + info->device_name, gpio.state, gpio.dir)); + return 0; +} + +/* + * conditional wait facility + */ +static void init_cond_wait(struct cond_wait *w, unsigned int data) +{ + init_waitqueue_head(&w->q); + init_waitqueue_entry(&w->wait, current); + w->data = data; +} + +static void add_cond_wait(struct cond_wait **head, struct cond_wait *w) +{ + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&w->q, &w->wait); + w->next = *head; + *head = w; +} + +static void remove_cond_wait(struct cond_wait **head, struct cond_wait *cw) +{ + struct cond_wait *w, *prev; + remove_wait_queue(&cw->q, &cw->wait); + set_current_state(TASK_RUNNING); + for (w = *head, prev = NULL ; w != NULL ; prev = w, w = w->next) { + if (w == cw) { + if (prev != NULL) + prev->next = w->next; + else + *head = w->next; + break; + } + } +} + +static void flush_cond_wait(struct cond_wait **head) +{ + while (*head != NULL) { + wake_up_interruptible(&(*head)->q); + *head = (*head)->next; + } +} + +/* + * wait for general purpose I/O pin(s) to enter specified state + * + * user_gpio fields: + * state - bit indicates target pin state + * smask - set bit indicates watched pin + * + * The wait ends when at least one watched pin enters the specified + * state. When 0 (no error) is returned, user_gpio->state is set to the + * state of all GPIO pins when the wait ends. + * + * Note: Each pin may be a dedicated input, dedicated output, or + * configurable input/output. The number and configuration of pins + * varies with the specific adapter model. Only input pins (dedicated + * or configured) can be monitored with this function. + */ +static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio) +{ + unsigned long flags; + int rc = 0; + struct gpio_desc gpio; + struct cond_wait wait; + u32 state; + + if (!info->gpio_present) + return -EINVAL; + if (copy_from_user(&gpio, user_gpio, sizeof(gpio))) + return -EFAULT; + DBGINFO(("%s wait_gpio() state=%08x smask=%08x\n", + info->device_name, gpio.state, gpio.smask)); + /* ignore output pins identified by set IODR bit */ + if ((gpio.smask &= ~rd_reg32(info, IODR)) == 0) + return -EINVAL; + init_cond_wait(&wait, gpio.smask); + + spin_lock_irqsave(&info->port_array[0]->lock, flags); + /* enable interrupts for watched pins */ + wr_reg32(info, IOER, rd_reg32(info, IOER) | gpio.smask); + /* get current pin states */ + state = rd_reg32(info, IOVR); + + if (gpio.smask & ~(state ^ gpio.state)) { + /* already in target state */ + gpio.state = state; + } else { + /* wait for target state */ + add_cond_wait(&info->gpio_wait_q, &wait); + spin_unlock_irqrestore(&info->port_array[0]->lock, flags); + schedule(); + if (signal_pending(current)) + rc = -ERESTARTSYS; + else + gpio.state = wait.data; + spin_lock_irqsave(&info->port_array[0]->lock, flags); + remove_cond_wait(&info->gpio_wait_q, &wait); + } + + /* disable all GPIO interrupts if no waiting processes */ + if (info->gpio_wait_q == NULL) + wr_reg32(info, IOER, 0); + spin_unlock_irqrestore(&info->port_array[0]->lock, flags); + + if ((rc == 0) && copy_to_user(user_gpio, &gpio, sizeof(gpio))) + rc = -EFAULT; + return rc; +} + +static int modem_input_wait(struct slgt_info *info,int arg) +{ + unsigned long flags; + int rc; + struct mgsl_icount cprev, cnow; + DECLARE_WAITQUEUE(wait, current); + + /* save current irq counts */ + spin_lock_irqsave(&info->lock,flags); + cprev = info->icount; + add_wait_queue(&info->status_event_wait_q, &wait); + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&info->lock,flags); + + for(;;) { + schedule(); + if (signal_pending(current)) { + rc = -ERESTARTSYS; + break; + } + + /* get new irq counts */ + spin_lock_irqsave(&info->lock,flags); + cnow = info->icount; + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&info->lock,flags); + + /* if no change, wait aborted for some reason */ + if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && + cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { + rc = -EIO; + break; + } + + /* check for change in caller specified modem input */ + if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || + (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || + (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || + (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { + rc = 0; + break; + } + + cprev = cnow; + } + remove_wait_queue(&info->status_event_wait_q, &wait); + set_current_state(TASK_RUNNING); + return rc; +} + +/* + * return state of serial control and status signals + */ +static int tiocmget(struct tty_struct *tty) +{ + struct slgt_info *info = tty->driver_data; + unsigned int result; + unsigned long flags; + + spin_lock_irqsave(&info->lock,flags); + get_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + + result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) + + ((info->signals & SerialSignal_DTR) ? TIOCM_DTR:0) + + ((info->signals & SerialSignal_DCD) ? TIOCM_CAR:0) + + ((info->signals & SerialSignal_RI) ? TIOCM_RNG:0) + + ((info->signals & SerialSignal_DSR) ? TIOCM_DSR:0) + + ((info->signals & SerialSignal_CTS) ? TIOCM_CTS:0); + + DBGINFO(("%s tiocmget value=%08X\n", info->device_name, result)); + return result; +} + +/* + * set modem control signals (DTR/RTS) + * + * cmd signal command: TIOCMBIS = set bit TIOCMBIC = clear bit + * TIOCMSET = set/clear signal values + * value bit mask for command + */ +static int tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear) +{ + struct slgt_info *info = tty->driver_data; + unsigned long flags; + + DBGINFO(("%s tiocmset(%x,%x)\n", info->device_name, set, clear)); + + if (set & TIOCM_RTS) + info->signals |= SerialSignal_RTS; + if (set & TIOCM_DTR) + info->signals |= SerialSignal_DTR; + if (clear & TIOCM_RTS) + info->signals &= ~SerialSignal_RTS; + if (clear & TIOCM_DTR) + info->signals &= ~SerialSignal_DTR; + + spin_lock_irqsave(&info->lock,flags); + set_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + return 0; +} + +static int carrier_raised(struct tty_port *port) +{ + unsigned long flags; + struct slgt_info *info = container_of(port, struct slgt_info, port); + + spin_lock_irqsave(&info->lock,flags); + get_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + return (info->signals & SerialSignal_DCD) ? 1 : 0; +} + +static void dtr_rts(struct tty_port *port, int on) +{ + unsigned long flags; + struct slgt_info *info = container_of(port, struct slgt_info, port); + + spin_lock_irqsave(&info->lock,flags); + if (on) + info->signals |= SerialSignal_RTS + SerialSignal_DTR; + else + info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR); + set_signals(info); + spin_unlock_irqrestore(&info->lock,flags); +} + + +/* + * block current process until the device is ready to open + */ +static int block_til_ready(struct tty_struct *tty, struct file *filp, + struct slgt_info *info) +{ + DECLARE_WAITQUEUE(wait, current); + int retval; + bool do_clocal = false; + bool extra_count = false; + unsigned long flags; + int cd; + struct tty_port *port = &info->port; + + DBGINFO(("%s block_til_ready\n", tty->driver->name)); + + if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ + /* nonblock mode is set or port is not enabled */ + port->flags |= ASYNC_NORMAL_ACTIVE; + return 0; + } + + if (tty->termios->c_cflag & CLOCAL) + do_clocal = true; + + /* Wait for carrier detect and the line to become + * free (i.e., not in use by the callout). While we are in + * this loop, port->count is dropped by one, so that + * close() knows when to free things. We restore it upon + * exit, either normal or abnormal. + */ + + retval = 0; + add_wait_queue(&port->open_wait, &wait); + + spin_lock_irqsave(&info->lock, flags); + if (!tty_hung_up_p(filp)) { + extra_count = true; + port->count--; + } + spin_unlock_irqrestore(&info->lock, flags); + port->blocked_open++; + + while (1) { + if ((tty->termios->c_cflag & CBAUD)) + tty_port_raise_dtr_rts(port); + + set_current_state(TASK_INTERRUPTIBLE); + + if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){ + retval = (port->flags & ASYNC_HUP_NOTIFY) ? + -EAGAIN : -ERESTARTSYS; + break; + } + + cd = tty_port_carrier_raised(port); + + if (!(port->flags & ASYNC_CLOSING) && (do_clocal || cd )) + break; + + if (signal_pending(current)) { + retval = -ERESTARTSYS; + break; + } + + DBGINFO(("%s block_til_ready wait\n", tty->driver->name)); + tty_unlock(); + schedule(); + tty_lock(); + } + + set_current_state(TASK_RUNNING); + remove_wait_queue(&port->open_wait, &wait); + + if (extra_count) + port->count++; + port->blocked_open--; + + if (!retval) + port->flags |= ASYNC_NORMAL_ACTIVE; + + DBGINFO(("%s block_til_ready ready, rc=%d\n", tty->driver->name, retval)); + return retval; +} + +static int alloc_tmp_rbuf(struct slgt_info *info) +{ + info->tmp_rbuf = kmalloc(info->max_frame_size + 5, GFP_KERNEL); + if (info->tmp_rbuf == NULL) + return -ENOMEM; + return 0; +} + +static void free_tmp_rbuf(struct slgt_info *info) +{ + kfree(info->tmp_rbuf); + info->tmp_rbuf = NULL; +} + +/* + * allocate DMA descriptor lists. + */ +static int alloc_desc(struct slgt_info *info) +{ + unsigned int i; + unsigned int pbufs; + + /* allocate memory to hold descriptor lists */ + info->bufs = pci_alloc_consistent(info->pdev, DESC_LIST_SIZE, &info->bufs_dma_addr); + if (info->bufs == NULL) + return -ENOMEM; + + memset(info->bufs, 0, DESC_LIST_SIZE); + + info->rbufs = (struct slgt_desc*)info->bufs; + info->tbufs = ((struct slgt_desc*)info->bufs) + info->rbuf_count; + + pbufs = (unsigned int)info->bufs_dma_addr; + + /* + * Build circular lists of descriptors + */ + + for (i=0; i < info->rbuf_count; i++) { + /* physical address of this descriptor */ + info->rbufs[i].pdesc = pbufs + (i * sizeof(struct slgt_desc)); + + /* physical address of next descriptor */ + if (i == info->rbuf_count - 1) + info->rbufs[i].next = cpu_to_le32(pbufs); + else + info->rbufs[i].next = cpu_to_le32(pbufs + ((i+1) * sizeof(struct slgt_desc))); + set_desc_count(info->rbufs[i], DMABUFSIZE); + } + + for (i=0; i < info->tbuf_count; i++) { + /* physical address of this descriptor */ + info->tbufs[i].pdesc = pbufs + ((info->rbuf_count + i) * sizeof(struct slgt_desc)); + + /* physical address of next descriptor */ + if (i == info->tbuf_count - 1) + info->tbufs[i].next = cpu_to_le32(pbufs + info->rbuf_count * sizeof(struct slgt_desc)); + else + info->tbufs[i].next = cpu_to_le32(pbufs + ((info->rbuf_count + i + 1) * sizeof(struct slgt_desc))); + } + + return 0; +} + +static void free_desc(struct slgt_info *info) +{ + if (info->bufs != NULL) { + pci_free_consistent(info->pdev, DESC_LIST_SIZE, info->bufs, info->bufs_dma_addr); + info->bufs = NULL; + info->rbufs = NULL; + info->tbufs = NULL; + } +} + +static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count) +{ + int i; + for (i=0; i < count; i++) { + if ((bufs[i].buf = pci_alloc_consistent(info->pdev, DMABUFSIZE, &bufs[i].buf_dma_addr)) == NULL) + return -ENOMEM; + bufs[i].pbuf = cpu_to_le32((unsigned int)bufs[i].buf_dma_addr); + } + return 0; +} + +static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count) +{ + int i; + for (i=0; i < count; i++) { + if (bufs[i].buf == NULL) + continue; + pci_free_consistent(info->pdev, DMABUFSIZE, bufs[i].buf, bufs[i].buf_dma_addr); + bufs[i].buf = NULL; + } +} + +static int alloc_dma_bufs(struct slgt_info *info) +{ + info->rbuf_count = 32; + info->tbuf_count = 32; + + if (alloc_desc(info) < 0 || + alloc_bufs(info, info->rbufs, info->rbuf_count) < 0 || + alloc_bufs(info, info->tbufs, info->tbuf_count) < 0 || + alloc_tmp_rbuf(info) < 0) { + DBGERR(("%s DMA buffer alloc fail\n", info->device_name)); + return -ENOMEM; + } + reset_rbufs(info); + return 0; +} + +static void free_dma_bufs(struct slgt_info *info) +{ + if (info->bufs) { + free_bufs(info, info->rbufs, info->rbuf_count); + free_bufs(info, info->tbufs, info->tbuf_count); + free_desc(info); + } + free_tmp_rbuf(info); +} + +static int claim_resources(struct slgt_info *info) +{ + if (request_mem_region(info->phys_reg_addr, SLGT_REG_SIZE, "synclink_gt") == NULL) { + DBGERR(("%s reg addr conflict, addr=%08X\n", + info->device_name, info->phys_reg_addr)); + info->init_error = DiagStatus_AddressConflict; + goto errout; + } + else + info->reg_addr_requested = true; + + info->reg_addr = ioremap_nocache(info->phys_reg_addr, SLGT_REG_SIZE); + if (!info->reg_addr) { + DBGERR(("%s cant map device registers, addr=%08X\n", + info->device_name, info->phys_reg_addr)); + info->init_error = DiagStatus_CantAssignPciResources; + goto errout; + } + return 0; + +errout: + release_resources(info); + return -ENODEV; +} + +static void release_resources(struct slgt_info *info) +{ + if (info->irq_requested) { + free_irq(info->irq_level, info); + info->irq_requested = false; + } + + if (info->reg_addr_requested) { + release_mem_region(info->phys_reg_addr, SLGT_REG_SIZE); + info->reg_addr_requested = false; + } + + if (info->reg_addr) { + iounmap(info->reg_addr); + info->reg_addr = NULL; + } +} + +/* Add the specified device instance data structure to the + * global linked list of devices and increment the device count. + */ +static void add_device(struct slgt_info *info) +{ + char *devstr; + + info->next_device = NULL; + info->line = slgt_device_count; + sprintf(info->device_name, "%s%d", tty_dev_prefix, info->line); + + if (info->line < MAX_DEVICES) { + if (maxframe[info->line]) + info->max_frame_size = maxframe[info->line]; + } + + slgt_device_count++; + + if (!slgt_device_list) + slgt_device_list = info; + else { + struct slgt_info *current_dev = slgt_device_list; + while(current_dev->next_device) + current_dev = current_dev->next_device; + current_dev->next_device = info; + } + + if (info->max_frame_size < 4096) + info->max_frame_size = 4096; + else if (info->max_frame_size > 65535) + info->max_frame_size = 65535; + + switch(info->pdev->device) { + case SYNCLINK_GT_DEVICE_ID: + devstr = "GT"; + break; + case SYNCLINK_GT2_DEVICE_ID: + devstr = "GT2"; + break; + case SYNCLINK_GT4_DEVICE_ID: + devstr = "GT4"; + break; + case SYNCLINK_AC_DEVICE_ID: + devstr = "AC"; + info->params.mode = MGSL_MODE_ASYNC; + break; + default: + devstr = "(unknown model)"; + } + printk("SyncLink %s %s IO=%08x IRQ=%d MaxFrameSize=%u\n", + devstr, info->device_name, info->phys_reg_addr, + info->irq_level, info->max_frame_size); + +#if SYNCLINK_GENERIC_HDLC + hdlcdev_init(info); +#endif +} + +static const struct tty_port_operations slgt_port_ops = { + .carrier_raised = carrier_raised, + .dtr_rts = dtr_rts, +}; + +/* + * allocate device instance structure, return NULL on failure + */ +static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev) +{ + struct slgt_info *info; + + info = kzalloc(sizeof(struct slgt_info), GFP_KERNEL); + + if (!info) { + DBGERR(("%s device alloc failed adapter=%d port=%d\n", + driver_name, adapter_num, port_num)); + } else { + tty_port_init(&info->port); + info->port.ops = &slgt_port_ops; + info->magic = MGSL_MAGIC; + INIT_WORK(&info->task, bh_handler); + info->max_frame_size = 4096; + info->base_clock = 14745600; + info->rbuf_fill_level = DMABUFSIZE; + info->port.close_delay = 5*HZ/10; + info->port.closing_wait = 30*HZ; + init_waitqueue_head(&info->status_event_wait_q); + init_waitqueue_head(&info->event_wait_q); + spin_lock_init(&info->netlock); + memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); + info->idle_mode = HDLC_TXIDLE_FLAGS; + info->adapter_num = adapter_num; + info->port_num = port_num; + + setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info); + setup_timer(&info->rx_timer, rx_timeout, (unsigned long)info); + + /* Copy configuration info to device instance data */ + info->pdev = pdev; + info->irq_level = pdev->irq; + info->phys_reg_addr = pci_resource_start(pdev,0); + + info->bus_type = MGSL_BUS_TYPE_PCI; + info->irq_flags = IRQF_SHARED; + + info->init_error = -1; /* assume error, set to 0 on successful init */ + } + + return info; +} + +static void device_init(int adapter_num, struct pci_dev *pdev) +{ + struct slgt_info *port_array[SLGT_MAX_PORTS]; + int i; + int port_count = 1; + + if (pdev->device == SYNCLINK_GT2_DEVICE_ID) + port_count = 2; + else if (pdev->device == SYNCLINK_GT4_DEVICE_ID) + port_count = 4; + + /* allocate device instances for all ports */ + for (i=0; i < port_count; ++i) { + port_array[i] = alloc_dev(adapter_num, i, pdev); + if (port_array[i] == NULL) { + for (--i; i >= 0; --i) + kfree(port_array[i]); + return; + } + } + + /* give copy of port_array to all ports and add to device list */ + for (i=0; i < port_count; ++i) { + memcpy(port_array[i]->port_array, port_array, sizeof(port_array)); + add_device(port_array[i]); + port_array[i]->port_count = port_count; + spin_lock_init(&port_array[i]->lock); + } + + /* Allocate and claim adapter resources */ + if (!claim_resources(port_array[0])) { + + alloc_dma_bufs(port_array[0]); + + /* copy resource information from first port to others */ + for (i = 1; i < port_count; ++i) { + port_array[i]->irq_level = port_array[0]->irq_level; + port_array[i]->reg_addr = port_array[0]->reg_addr; + alloc_dma_bufs(port_array[i]); + } + + if (request_irq(port_array[0]->irq_level, + slgt_interrupt, + port_array[0]->irq_flags, + port_array[0]->device_name, + port_array[0]) < 0) { + DBGERR(("%s request_irq failed IRQ=%d\n", + port_array[0]->device_name, + port_array[0]->irq_level)); + } else { + port_array[0]->irq_requested = true; + adapter_test(port_array[0]); + for (i=1 ; i < port_count ; i++) { + port_array[i]->init_error = port_array[0]->init_error; + port_array[i]->gpio_present = port_array[0]->gpio_present; + } + } + } + + for (i=0; i < port_count; ++i) + tty_register_device(serial_driver, port_array[i]->line, &(port_array[i]->pdev->dev)); +} + +static int __devinit init_one(struct pci_dev *dev, + const struct pci_device_id *ent) +{ + if (pci_enable_device(dev)) { + printk("error enabling pci device %p\n", dev); + return -EIO; + } + pci_set_master(dev); + device_init(slgt_device_count, dev); + return 0; +} + +static void __devexit remove_one(struct pci_dev *dev) +{ +} + +static const struct tty_operations ops = { + .open = open, + .close = close, + .write = write, + .put_char = put_char, + .flush_chars = flush_chars, + .write_room = write_room, + .chars_in_buffer = chars_in_buffer, + .flush_buffer = flush_buffer, + .ioctl = ioctl, + .compat_ioctl = slgt_compat_ioctl, + .throttle = throttle, + .unthrottle = unthrottle, + .send_xchar = send_xchar, + .break_ctl = set_break, + .wait_until_sent = wait_until_sent, + .set_termios = set_termios, + .stop = tx_hold, + .start = tx_release, + .hangup = hangup, + .tiocmget = tiocmget, + .tiocmset = tiocmset, + .get_icount = get_icount, + .proc_fops = &synclink_gt_proc_fops, +}; + +static void slgt_cleanup(void) +{ + int rc; + struct slgt_info *info; + struct slgt_info *tmp; + + printk(KERN_INFO "unload %s\n", driver_name); + + if (serial_driver) { + for (info=slgt_device_list ; info != NULL ; info=info->next_device) + tty_unregister_device(serial_driver, info->line); + if ((rc = tty_unregister_driver(serial_driver))) + DBGERR(("tty_unregister_driver error=%d\n", rc)); + put_tty_driver(serial_driver); + } + + /* reset devices */ + info = slgt_device_list; + while(info) { + reset_port(info); + info = info->next_device; + } + + /* release devices */ + info = slgt_device_list; + while(info) { +#if SYNCLINK_GENERIC_HDLC + hdlcdev_exit(info); +#endif + free_dma_bufs(info); + free_tmp_rbuf(info); + if (info->port_num == 0) + release_resources(info); + tmp = info; + info = info->next_device; + kfree(tmp); + } + + if (pci_registered) + pci_unregister_driver(&pci_driver); +} + +/* + * Driver initialization entry point. + */ +static int __init slgt_init(void) +{ + int rc; + + printk(KERN_INFO "%s\n", driver_name); + + serial_driver = alloc_tty_driver(MAX_DEVICES); + if (!serial_driver) { + printk("%s can't allocate tty driver\n", driver_name); + return -ENOMEM; + } + + /* Initialize the tty_driver structure */ + + serial_driver->owner = THIS_MODULE; + serial_driver->driver_name = tty_driver_name; + serial_driver->name = tty_dev_prefix; + serial_driver->major = ttymajor; + serial_driver->minor_start = 64; + serial_driver->type = TTY_DRIVER_TYPE_SERIAL; + serial_driver->subtype = SERIAL_TYPE_NORMAL; + serial_driver->init_termios = tty_std_termios; + serial_driver->init_termios.c_cflag = + B9600 | CS8 | CREAD | HUPCL | CLOCAL; + serial_driver->init_termios.c_ispeed = 9600; + serial_driver->init_termios.c_ospeed = 9600; + serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; + tty_set_operations(serial_driver, &ops); + if ((rc = tty_register_driver(serial_driver)) < 0) { + DBGERR(("%s can't register serial driver\n", driver_name)); + put_tty_driver(serial_driver); + serial_driver = NULL; + goto error; + } + + printk(KERN_INFO "%s, tty major#%d\n", + driver_name, serial_driver->major); + + slgt_device_count = 0; + if ((rc = pci_register_driver(&pci_driver)) < 0) { + printk("%s pci_register_driver error=%d\n", driver_name, rc); + goto error; + } + pci_registered = true; + + if (!slgt_device_list) + printk("%s no devices found\n",driver_name); + + return 0; + +error: + slgt_cleanup(); + return rc; +} + +static void __exit slgt_exit(void) +{ + slgt_cleanup(); +} + +module_init(slgt_init); +module_exit(slgt_exit); + +/* + * register access routines + */ + +#define CALC_REGADDR() \ + unsigned long reg_addr = ((unsigned long)info->reg_addr) + addr; \ + if (addr >= 0x80) \ + reg_addr += (info->port_num) * 32; \ + else if (addr >= 0x40) \ + reg_addr += (info->port_num) * 16; + +static __u8 rd_reg8(struct slgt_info *info, unsigned int addr) +{ + CALC_REGADDR(); + return readb((void __iomem *)reg_addr); +} + +static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value) +{ + CALC_REGADDR(); + writeb(value, (void __iomem *)reg_addr); +} + +static __u16 rd_reg16(struct slgt_info *info, unsigned int addr) +{ + CALC_REGADDR(); + return readw((void __iomem *)reg_addr); +} + +static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value) +{ + CALC_REGADDR(); + writew(value, (void __iomem *)reg_addr); +} + +static __u32 rd_reg32(struct slgt_info *info, unsigned int addr) +{ + CALC_REGADDR(); + return readl((void __iomem *)reg_addr); +} + +static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value) +{ + CALC_REGADDR(); + writel(value, (void __iomem *)reg_addr); +} + +static void rdma_reset(struct slgt_info *info) +{ + unsigned int i; + + /* set reset bit */ + wr_reg32(info, RDCSR, BIT1); + + /* wait for enable bit cleared */ + for(i=0 ; i < 1000 ; i++) + if (!(rd_reg32(info, RDCSR) & BIT0)) + break; +} + +static void tdma_reset(struct slgt_info *info) +{ + unsigned int i; + + /* set reset bit */ + wr_reg32(info, TDCSR, BIT1); + + /* wait for enable bit cleared */ + for(i=0 ; i < 1000 ; i++) + if (!(rd_reg32(info, TDCSR) & BIT0)) + break; +} + +/* + * enable internal loopback + * TxCLK and RxCLK are generated from BRG + * and TxD is looped back to RxD internally. + */ +static void enable_loopback(struct slgt_info *info) +{ + /* SCR (serial control) BIT2=looopback enable */ + wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT2)); + + if (info->params.mode != MGSL_MODE_ASYNC) { + /* CCR (clock control) + * 07..05 tx clock source (010 = BRG) + * 04..02 rx clock source (010 = BRG) + * 01 auxclk enable (0 = disable) + * 00 BRG enable (1 = enable) + * + * 0100 1001 + */ + wr_reg8(info, CCR, 0x49); + + /* set speed if available, otherwise use default */ + if (info->params.clock_speed) + set_rate(info, info->params.clock_speed); + else + set_rate(info, 3686400); + } +} + +/* + * set baud rate generator to specified rate + */ +static void set_rate(struct slgt_info *info, u32 rate) +{ + unsigned int div; + unsigned int osc = info->base_clock; + + /* div = osc/rate - 1 + * + * Round div up if osc/rate is not integer to + * force to next slowest rate. + */ + + if (rate) { + div = osc/rate; + if (!(osc % rate) && div) + div--; + wr_reg16(info, BDR, (unsigned short)div); + } +} + +static void rx_stop(struct slgt_info *info) +{ + unsigned short val; + + /* disable and reset receiver */ + val = rd_reg16(info, RCR) & ~BIT1; /* clear enable bit */ + wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */ + wr_reg16(info, RCR, val); /* clear reset bit */ + + slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA + IRQ_RXIDLE); + + /* clear pending rx interrupts */ + wr_reg16(info, SSR, IRQ_RXIDLE + IRQ_RXOVER); + + rdma_reset(info); + + info->rx_enabled = false; + info->rx_restart = false; +} + +static void rx_start(struct slgt_info *info) +{ + unsigned short val; + + slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA); + + /* clear pending rx overrun IRQ */ + wr_reg16(info, SSR, IRQ_RXOVER); + + /* reset and disable receiver */ + val = rd_reg16(info, RCR) & ~BIT1; /* clear enable bit */ + wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */ + wr_reg16(info, RCR, val); /* clear reset bit */ + + rdma_reset(info); + reset_rbufs(info); + + if (info->rx_pio) { + /* rx request when rx FIFO not empty */ + wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) & ~BIT14)); + slgt_irq_on(info, IRQ_RXDATA); + if (info->params.mode == MGSL_MODE_ASYNC) { + /* enable saving of rx status */ + wr_reg32(info, RDCSR, BIT6); + } + } else { + /* rx request when rx FIFO half full */ + wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT14)); + /* set 1st descriptor address */ + wr_reg32(info, RDDAR, info->rbufs[0].pdesc); + + if (info->params.mode != MGSL_MODE_ASYNC) { + /* enable rx DMA and DMA interrupt */ + wr_reg32(info, RDCSR, (BIT2 + BIT0)); + } else { + /* enable saving of rx status, rx DMA and DMA interrupt */ + wr_reg32(info, RDCSR, (BIT6 + BIT2 + BIT0)); + } + } + + slgt_irq_on(info, IRQ_RXOVER); + + /* enable receiver */ + wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | BIT1)); + + info->rx_restart = false; + info->rx_enabled = true; +} + +static void tx_start(struct slgt_info *info) +{ + if (!info->tx_enabled) { + wr_reg16(info, TCR, + (unsigned short)((rd_reg16(info, TCR) | BIT1) & ~BIT2)); + info->tx_enabled = true; + } + + if (desc_count(info->tbufs[info->tbuf_start])) { + info->drop_rts_on_tx_done = false; + + if (info->params.mode != MGSL_MODE_ASYNC) { + if (info->params.flags & HDLC_FLAG_AUTO_RTS) { + get_signals(info); + if (!(info->signals & SerialSignal_RTS)) { + info->signals |= SerialSignal_RTS; + set_signals(info); + info->drop_rts_on_tx_done = true; + } + } + + slgt_irq_off(info, IRQ_TXDATA); + slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE); + /* clear tx idle and underrun status bits */ + wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER)); + } else { + slgt_irq_off(info, IRQ_TXDATA); + slgt_irq_on(info, IRQ_TXIDLE); + /* clear tx idle status bit */ + wr_reg16(info, SSR, IRQ_TXIDLE); + } + /* set 1st descriptor address and start DMA */ + wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc); + wr_reg32(info, TDCSR, BIT2 + BIT0); + info->tx_active = true; + } +} + +static void tx_stop(struct slgt_info *info) +{ + unsigned short val; + + del_timer(&info->tx_timer); + + tdma_reset(info); + + /* reset and disable transmitter */ + val = rd_reg16(info, TCR) & ~BIT1; /* clear enable bit */ + wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */ + + slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER); + + /* clear tx idle and underrun status bit */ + wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER)); + + reset_tbufs(info); + + info->tx_enabled = false; + info->tx_active = false; +} + +static void reset_port(struct slgt_info *info) +{ + if (!info->reg_addr) + return; + + tx_stop(info); + rx_stop(info); + + info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS); + set_signals(info); + + slgt_irq_off(info, IRQ_ALL | IRQ_MASTER); +} + +static void reset_adapter(struct slgt_info *info) +{ + int i; + for (i=0; i < info->port_count; ++i) { + if (info->port_array[i]) + reset_port(info->port_array[i]); + } +} + +static void async_mode(struct slgt_info *info) +{ + unsigned short val; + + slgt_irq_off(info, IRQ_ALL | IRQ_MASTER); + tx_stop(info); + rx_stop(info); + + /* TCR (tx control) + * + * 15..13 mode, 010=async + * 12..10 encoding, 000=NRZ + * 09 parity enable + * 08 1=odd parity, 0=even parity + * 07 1=RTS driver control + * 06 1=break enable + * 05..04 character length + * 00=5 bits + * 01=6 bits + * 10=7 bits + * 11=8 bits + * 03 0=1 stop bit, 1=2 stop bits + * 02 reset + * 01 enable + * 00 auto-CTS enable + */ + val = 0x4000; + + if (info->if_mode & MGSL_INTERFACE_RTS_EN) + val |= BIT7; + + if (info->params.parity != ASYNC_PARITY_NONE) { + val |= BIT9; + if (info->params.parity == ASYNC_PARITY_ODD) + val |= BIT8; + } + + switch (info->params.data_bits) + { + case 6: val |= BIT4; break; + case 7: val |= BIT5; break; + case 8: val |= BIT5 + BIT4; break; + } + + if (info->params.stop_bits != 1) + val |= BIT3; + + if (info->params.flags & HDLC_FLAG_AUTO_CTS) + val |= BIT0; + + wr_reg16(info, TCR, val); + + /* RCR (rx control) + * + * 15..13 mode, 010=async + * 12..10 encoding, 000=NRZ + * 09 parity enable + * 08 1=odd parity, 0=even parity + * 07..06 reserved, must be 0 + * 05..04 character length + * 00=5 bits + * 01=6 bits + * 10=7 bits + * 11=8 bits + * 03 reserved, must be zero + * 02 reset + * 01 enable + * 00 auto-DCD enable + */ + val = 0x4000; + + if (info->params.parity != ASYNC_PARITY_NONE) { + val |= BIT9; + if (info->params.parity == ASYNC_PARITY_ODD) + val |= BIT8; + } + + switch (info->params.data_bits) + { + case 6: val |= BIT4; break; + case 7: val |= BIT5; break; + case 8: val |= BIT5 + BIT4; break; + } + + if (info->params.flags & HDLC_FLAG_AUTO_DCD) + val |= BIT0; + + wr_reg16(info, RCR, val); + + /* CCR (clock control) + * + * 07..05 011 = tx clock source is BRG/16 + * 04..02 010 = rx clock source is BRG + * 01 0 = auxclk disabled + * 00 1 = BRG enabled + * + * 0110 1001 + */ + wr_reg8(info, CCR, 0x69); + + msc_set_vcr(info); + + /* SCR (serial control) + * + * 15 1=tx req on FIFO half empty + * 14 1=rx req on FIFO half full + * 13 tx data IRQ enable + * 12 tx idle IRQ enable + * 11 rx break on IRQ enable + * 10 rx data IRQ enable + * 09 rx break off IRQ enable + * 08 overrun IRQ enable + * 07 DSR IRQ enable + * 06 CTS IRQ enable + * 05 DCD IRQ enable + * 04 RI IRQ enable + * 03 0=16x sampling, 1=8x sampling + * 02 1=txd->rxd internal loopback enable + * 01 reserved, must be zero + * 00 1=master IRQ enable + */ + val = BIT15 + BIT14 + BIT0; + /* JCR[8] : 1 = x8 async mode feature available */ + if ((rd_reg32(info, JCR) & BIT8) && info->params.data_rate && + ((info->base_clock < (info->params.data_rate * 16)) || + (info->base_clock % (info->params.data_rate * 16)))) { + /* use 8x sampling */ + val |= BIT3; + set_rate(info, info->params.data_rate * 8); + } else { + /* use 16x sampling */ + set_rate(info, info->params.data_rate * 16); + } + wr_reg16(info, SCR, val); + + slgt_irq_on(info, IRQ_RXBREAK | IRQ_RXOVER); + + if (info->params.loopback) + enable_loopback(info); +} + +static void sync_mode(struct slgt_info *info) +{ + unsigned short val; + + slgt_irq_off(info, IRQ_ALL | IRQ_MASTER); + tx_stop(info); + rx_stop(info); + + /* TCR (tx control) + * + * 15..13 mode + * 000=HDLC/SDLC + * 001=raw bit synchronous + * 010=asynchronous/isochronous + * 011=monosync byte synchronous + * 100=bisync byte synchronous + * 101=xsync byte synchronous + * 12..10 encoding + * 09 CRC enable + * 08 CRC32 + * 07 1=RTS driver control + * 06 preamble enable + * 05..04 preamble length + * 03 share open/close flag + * 02 reset + * 01 enable + * 00 auto-CTS enable + */ + val = BIT2; + + switch(info->params.mode) { + case MGSL_MODE_XSYNC: + val |= BIT15 + BIT13; + break; + case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break; + case MGSL_MODE_BISYNC: val |= BIT15; break; + case MGSL_MODE_RAW: val |= BIT13; break; + } + if (info->if_mode & MGSL_INTERFACE_RTS_EN) + val |= BIT7; + + switch(info->params.encoding) + { + case HDLC_ENCODING_NRZB: val |= BIT10; break; + case HDLC_ENCODING_NRZI_MARK: val |= BIT11; break; + case HDLC_ENCODING_NRZI: val |= BIT11 + BIT10; break; + case HDLC_ENCODING_BIPHASE_MARK: val |= BIT12; break; + case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break; + case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break; + case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break; + } + + switch (info->params.crc_type & HDLC_CRC_MASK) + { + case HDLC_CRC_16_CCITT: val |= BIT9; break; + case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break; + } + + if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE) + val |= BIT6; + + switch (info->params.preamble_length) + { + case HDLC_PREAMBLE_LENGTH_16BITS: val |= BIT5; break; + case HDLC_PREAMBLE_LENGTH_32BITS: val |= BIT4; break; + case HDLC_PREAMBLE_LENGTH_64BITS: val |= BIT5 + BIT4; break; + } + + if (info->params.flags & HDLC_FLAG_AUTO_CTS) + val |= BIT0; + + wr_reg16(info, TCR, val); + + /* TPR (transmit preamble) */ + + switch (info->params.preamble) + { + case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break; + case HDLC_PREAMBLE_PATTERN_ONES: val = 0xff; break; + case HDLC_PREAMBLE_PATTERN_ZEROS: val = 0x00; break; + case HDLC_PREAMBLE_PATTERN_10: val = 0x55; break; + case HDLC_PREAMBLE_PATTERN_01: val = 0xaa; break; + default: val = 0x7e; break; + } + wr_reg8(info, TPR, (unsigned char)val); + + /* RCR (rx control) + * + * 15..13 mode + * 000=HDLC/SDLC + * 001=raw bit synchronous + * 010=asynchronous/isochronous + * 011=monosync byte synchronous + * 100=bisync byte synchronous + * 101=xsync byte synchronous + * 12..10 encoding + * 09 CRC enable + * 08 CRC32 + * 07..03 reserved, must be 0 + * 02 reset + * 01 enable + * 00 auto-DCD enable + */ + val = 0; + + switch(info->params.mode) { + case MGSL_MODE_XSYNC: + val |= BIT15 + BIT13; + break; + case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break; + case MGSL_MODE_BISYNC: val |= BIT15; break; + case MGSL_MODE_RAW: val |= BIT13; break; + } + + switch(info->params.encoding) + { + case HDLC_ENCODING_NRZB: val |= BIT10; break; + case HDLC_ENCODING_NRZI_MARK: val |= BIT11; break; + case HDLC_ENCODING_NRZI: val |= BIT11 + BIT10; break; + case HDLC_ENCODING_BIPHASE_MARK: val |= BIT12; break; + case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break; + case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break; + case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break; + } + + switch (info->params.crc_type & HDLC_CRC_MASK) + { + case HDLC_CRC_16_CCITT: val |= BIT9; break; + case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break; + } + + if (info->params.flags & HDLC_FLAG_AUTO_DCD) + val |= BIT0; + + wr_reg16(info, RCR, val); + + /* CCR (clock control) + * + * 07..05 tx clock source + * 04..02 rx clock source + * 01 auxclk enable + * 00 BRG enable + */ + val = 0; + + if (info->params.flags & HDLC_FLAG_TXC_BRG) + { + // when RxC source is DPLL, BRG generates 16X DPLL + // reference clock, so take TxC from BRG/16 to get + // transmit clock at actual data rate + if (info->params.flags & HDLC_FLAG_RXC_DPLL) + val |= BIT6 + BIT5; /* 011, txclk = BRG/16 */ + else + val |= BIT6; /* 010, txclk = BRG */ + } + else if (info->params.flags & HDLC_FLAG_TXC_DPLL) + val |= BIT7; /* 100, txclk = DPLL Input */ + else if (info->params.flags & HDLC_FLAG_TXC_RXCPIN) + val |= BIT5; /* 001, txclk = RXC Input */ + + if (info->params.flags & HDLC_FLAG_RXC_BRG) + val |= BIT3; /* 010, rxclk = BRG */ + else if (info->params.flags & HDLC_FLAG_RXC_DPLL) + val |= BIT4; /* 100, rxclk = DPLL */ + else if (info->params.flags & HDLC_FLAG_RXC_TXCPIN) + val |= BIT2; /* 001, rxclk = TXC Input */ + + if (info->params.clock_speed) + val |= BIT1 + BIT0; + + wr_reg8(info, CCR, (unsigned char)val); + + if (info->params.flags & (HDLC_FLAG_TXC_DPLL + HDLC_FLAG_RXC_DPLL)) + { + // program DPLL mode + switch(info->params.encoding) + { + case HDLC_ENCODING_BIPHASE_MARK: + case HDLC_ENCODING_BIPHASE_SPACE: + val = BIT7; break; + case HDLC_ENCODING_BIPHASE_LEVEL: + case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: + val = BIT7 + BIT6; break; + default: val = BIT6; // NRZ encodings + } + wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | val)); + + // DPLL requires a 16X reference clock from BRG + set_rate(info, info->params.clock_speed * 16); + } + else + set_rate(info, info->params.clock_speed); + + tx_set_idle(info); + + msc_set_vcr(info); + + /* SCR (serial control) + * + * 15 1=tx req on FIFO half empty + * 14 1=rx req on FIFO half full + * 13 tx data IRQ enable + * 12 tx idle IRQ enable + * 11 underrun IRQ enable + * 10 rx data IRQ enable + * 09 rx idle IRQ enable + * 08 overrun IRQ enable + * 07 DSR IRQ enable + * 06 CTS IRQ enable + * 05 DCD IRQ enable + * 04 RI IRQ enable + * 03 reserved, must be zero + * 02 1=txd->rxd internal loopback enable + * 01 reserved, must be zero + * 00 1=master IRQ enable + */ + wr_reg16(info, SCR, BIT15 + BIT14 + BIT0); + + if (info->params.loopback) + enable_loopback(info); +} + +/* + * set transmit idle mode + */ +static void tx_set_idle(struct slgt_info *info) +{ + unsigned char val; + unsigned short tcr; + + /* if preamble enabled (tcr[6] == 1) then tx idle size = 8 bits + * else tcr[5:4] = tx idle size: 00 = 8 bits, 01 = 16 bits + */ + tcr = rd_reg16(info, TCR); + if (info->idle_mode & HDLC_TXIDLE_CUSTOM_16) { + /* disable preamble, set idle size to 16 bits */ + tcr = (tcr & ~(BIT6 + BIT5)) | BIT4; + /* MSB of 16 bit idle specified in tx preamble register (TPR) */ + wr_reg8(info, TPR, (unsigned char)((info->idle_mode >> 8) & 0xff)); + } else if (!(tcr & BIT6)) { + /* preamble is disabled, set idle size to 8 bits */ + tcr &= ~(BIT5 + BIT4); + } + wr_reg16(info, TCR, tcr); + + if (info->idle_mode & (HDLC_TXIDLE_CUSTOM_8 | HDLC_TXIDLE_CUSTOM_16)) { + /* LSB of custom tx idle specified in tx idle register */ + val = (unsigned char)(info->idle_mode & 0xff); + } else { + /* standard 8 bit idle patterns */ + switch(info->idle_mode) + { + case HDLC_TXIDLE_FLAGS: val = 0x7e; break; + case HDLC_TXIDLE_ALT_ZEROS_ONES: + case HDLC_TXIDLE_ALT_MARK_SPACE: val = 0xaa; break; + case HDLC_TXIDLE_ZEROS: + case HDLC_TXIDLE_SPACE: val = 0x00; break; + default: val = 0xff; + } + } + + wr_reg8(info, TIR, val); +} + +/* + * get state of V24 status (input) signals + */ +static void get_signals(struct slgt_info *info) +{ + unsigned short status = rd_reg16(info, SSR); + + /* clear all serial signals except DTR and RTS */ + info->signals &= SerialSignal_DTR + SerialSignal_RTS; + + if (status & BIT3) + info->signals |= SerialSignal_DSR; + if (status & BIT2) + info->signals |= SerialSignal_CTS; + if (status & BIT1) + info->signals |= SerialSignal_DCD; + if (status & BIT0) + info->signals |= SerialSignal_RI; +} + +/* + * set V.24 Control Register based on current configuration + */ +static void msc_set_vcr(struct slgt_info *info) +{ + unsigned char val = 0; + + /* VCR (V.24 control) + * + * 07..04 serial IF select + * 03 DTR + * 02 RTS + * 01 LL + * 00 RL + */ + + switch(info->if_mode & MGSL_INTERFACE_MASK) + { + case MGSL_INTERFACE_RS232: + val |= BIT5; /* 0010 */ + break; + case MGSL_INTERFACE_V35: + val |= BIT7 + BIT6 + BIT5; /* 1110 */ + break; + case MGSL_INTERFACE_RS422: + val |= BIT6; /* 0100 */ + break; + } + + if (info->if_mode & MGSL_INTERFACE_MSB_FIRST) + val |= BIT4; + if (info->signals & SerialSignal_DTR) + val |= BIT3; + if (info->signals & SerialSignal_RTS) + val |= BIT2; + if (info->if_mode & MGSL_INTERFACE_LL) + val |= BIT1; + if (info->if_mode & MGSL_INTERFACE_RL) + val |= BIT0; + wr_reg8(info, VCR, val); +} + +/* + * set state of V24 control (output) signals + */ +static void set_signals(struct slgt_info *info) +{ + unsigned char val = rd_reg8(info, VCR); + if (info->signals & SerialSignal_DTR) + val |= BIT3; + else + val &= ~BIT3; + if (info->signals & SerialSignal_RTS) + val |= BIT2; + else + val &= ~BIT2; + wr_reg8(info, VCR, val); +} + +/* + * free range of receive DMA buffers (i to last) + */ +static void free_rbufs(struct slgt_info *info, unsigned int i, unsigned int last) +{ + int done = 0; + + while(!done) { + /* reset current buffer for reuse */ + info->rbufs[i].status = 0; + set_desc_count(info->rbufs[i], info->rbuf_fill_level); + if (i == last) + done = 1; + if (++i == info->rbuf_count) + i = 0; + } + info->rbuf_current = i; +} + +/* + * mark all receive DMA buffers as free + */ +static void reset_rbufs(struct slgt_info *info) +{ + free_rbufs(info, 0, info->rbuf_count - 1); + info->rbuf_fill_index = 0; + info->rbuf_fill_count = 0; +} + +/* + * pass receive HDLC frame to upper layer + * + * return true if frame available, otherwise false + */ +static bool rx_get_frame(struct slgt_info *info) +{ + unsigned int start, end; + unsigned short status; + unsigned int framesize = 0; + unsigned long flags; + struct tty_struct *tty = info->port.tty; + unsigned char addr_field = 0xff; + unsigned int crc_size = 0; + + switch (info->params.crc_type & HDLC_CRC_MASK) { + case HDLC_CRC_16_CCITT: crc_size = 2; break; + case HDLC_CRC_32_CCITT: crc_size = 4; break; + } + +check_again: + + framesize = 0; + addr_field = 0xff; + start = end = info->rbuf_current; + + for (;;) { + if (!desc_complete(info->rbufs[end])) + goto cleanup; + + if (framesize == 0 && info->params.addr_filter != 0xff) + addr_field = info->rbufs[end].buf[0]; + + framesize += desc_count(info->rbufs[end]); + + if (desc_eof(info->rbufs[end])) + break; + + if (++end == info->rbuf_count) + end = 0; + + if (end == info->rbuf_current) { + if (info->rx_enabled){ + spin_lock_irqsave(&info->lock,flags); + rx_start(info); + spin_unlock_irqrestore(&info->lock,flags); + } + goto cleanup; + } + } + + /* status + * + * 15 buffer complete + * 14..06 reserved + * 05..04 residue + * 02 eof (end of frame) + * 01 CRC error + * 00 abort + */ + status = desc_status(info->rbufs[end]); + + /* ignore CRC bit if not using CRC (bit is undefined) */ + if ((info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_NONE) + status &= ~BIT1; + + if (framesize == 0 || + (addr_field != 0xff && addr_field != info->params.addr_filter)) { + free_rbufs(info, start, end); + goto check_again; + } + + if (framesize < (2 + crc_size) || status & BIT0) { + info->icount.rxshort++; + framesize = 0; + } else if (status & BIT1) { + info->icount.rxcrc++; + if (!(info->params.crc_type & HDLC_CRC_RETURN_EX)) + framesize = 0; + } + +#if SYNCLINK_GENERIC_HDLC + if (framesize == 0) { + info->netdev->stats.rx_errors++; + info->netdev->stats.rx_frame_errors++; + } +#endif + + DBGBH(("%s rx frame status=%04X size=%d\n", + info->device_name, status, framesize)); + DBGDATA(info, info->rbufs[start].buf, min_t(int, framesize, info->rbuf_fill_level), "rx"); + + if (framesize) { + if (!(info->params.crc_type & HDLC_CRC_RETURN_EX)) { + framesize -= crc_size; + crc_size = 0; + } + + if (framesize > info->max_frame_size + crc_size) + info->icount.rxlong++; + else { + /* copy dma buffer(s) to contiguous temp buffer */ + int copy_count = framesize; + int i = start; + unsigned char *p = info->tmp_rbuf; + info->tmp_rbuf_count = framesize; + + info->icount.rxok++; + + while(copy_count) { + int partial_count = min_t(int, copy_count, info->rbuf_fill_level); + memcpy(p, info->rbufs[i].buf, partial_count); + p += partial_count; + copy_count -= partial_count; + if (++i == info->rbuf_count) + i = 0; + } + + if (info->params.crc_type & HDLC_CRC_RETURN_EX) { + *p = (status & BIT1) ? RX_CRC_ERROR : RX_OK; + framesize++; + } + +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) + hdlcdev_rx(info,info->tmp_rbuf, framesize); + else +#endif + ldisc_receive_buf(tty, info->tmp_rbuf, info->flag_buf, framesize); + } + } + free_rbufs(info, start, end); + return true; + +cleanup: + return false; +} + +/* + * pass receive buffer (RAW synchronous mode) to tty layer + * return true if buffer available, otherwise false + */ +static bool rx_get_buf(struct slgt_info *info) +{ + unsigned int i = info->rbuf_current; + unsigned int count; + + if (!desc_complete(info->rbufs[i])) + return false; + count = desc_count(info->rbufs[i]); + switch(info->params.mode) { + case MGSL_MODE_MONOSYNC: + case MGSL_MODE_BISYNC: + case MGSL_MODE_XSYNC: + /* ignore residue in byte synchronous modes */ + if (desc_residue(info->rbufs[i])) + count--; + break; + } + DBGDATA(info, info->rbufs[i].buf, count, "rx"); + DBGINFO(("rx_get_buf size=%d\n", count)); + if (count) + ldisc_receive_buf(info->port.tty, info->rbufs[i].buf, + info->flag_buf, count); + free_rbufs(info, i, i); + return true; +} + +static void reset_tbufs(struct slgt_info *info) +{ + unsigned int i; + info->tbuf_current = 0; + for (i=0 ; i < info->tbuf_count ; i++) { + info->tbufs[i].status = 0; + info->tbufs[i].count = 0; + } +} + +/* + * return number of free transmit DMA buffers + */ +static unsigned int free_tbuf_count(struct slgt_info *info) +{ + unsigned int count = 0; + unsigned int i = info->tbuf_current; + + do + { + if (desc_count(info->tbufs[i])) + break; /* buffer in use */ + ++count; + if (++i == info->tbuf_count) + i=0; + } while (i != info->tbuf_current); + + /* if tx DMA active, last zero count buffer is in use */ + if (count && (rd_reg32(info, TDCSR) & BIT0)) + --count; + + return count; +} + +/* + * return number of bytes in unsent transmit DMA buffers + * and the serial controller tx FIFO + */ +static unsigned int tbuf_bytes(struct slgt_info *info) +{ + unsigned int total_count = 0; + unsigned int i = info->tbuf_current; + unsigned int reg_value; + unsigned int count; + unsigned int active_buf_count = 0; + + /* + * Add descriptor counts for all tx DMA buffers. + * If count is zero (cleared by DMA controller after read), + * the buffer is complete or is actively being read from. + * + * Record buf_count of last buffer with zero count starting + * from current ring position. buf_count is mirror + * copy of count and is not cleared by serial controller. + * If DMA controller is active, that buffer is actively + * being read so add to total. + */ + do { + count = desc_count(info->tbufs[i]); + if (count) + total_count += count; + else if (!total_count) + active_buf_count = info->tbufs[i].buf_count; + if (++i == info->tbuf_count) + i = 0; + } while (i != info->tbuf_current); + + /* read tx DMA status register */ + reg_value = rd_reg32(info, TDCSR); + + /* if tx DMA active, last zero count buffer is in use */ + if (reg_value & BIT0) + total_count += active_buf_count; + + /* add tx FIFO count = reg_value[15..8] */ + total_count += (reg_value >> 8) & 0xff; + + /* if transmitter active add one byte for shift register */ + if (info->tx_active) + total_count++; + + return total_count; +} + +/* + * load data into transmit DMA buffer ring and start transmitter if needed + * return true if data accepted, otherwise false (buffers full) + */ +static bool tx_load(struct slgt_info *info, const char *buf, unsigned int size) +{ + unsigned short count; + unsigned int i; + struct slgt_desc *d; + + /* check required buffer space */ + if (DIV_ROUND_UP(size, DMABUFSIZE) > free_tbuf_count(info)) + return false; + + DBGDATA(info, buf, size, "tx"); + + /* + * copy data to one or more DMA buffers in circular ring + * tbuf_start = first buffer for this data + * tbuf_current = next free buffer + * + * Copy all data before making data visible to DMA controller by + * setting descriptor count of the first buffer. + * This prevents an active DMA controller from reading the first DMA + * buffers of a frame and stopping before the final buffers are filled. + */ + + info->tbuf_start = i = info->tbuf_current; + + while (size) { + d = &info->tbufs[i]; + + count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size); + memcpy(d->buf, buf, count); + + size -= count; + buf += count; + + /* + * set EOF bit for last buffer of HDLC frame or + * for every buffer in raw mode + */ + if ((!size && info->params.mode == MGSL_MODE_HDLC) || + info->params.mode == MGSL_MODE_RAW) + set_desc_eof(*d, 1); + else + set_desc_eof(*d, 0); + + /* set descriptor count for all but first buffer */ + if (i != info->tbuf_start) + set_desc_count(*d, count); + d->buf_count = count; + + if (++i == info->tbuf_count) + i = 0; + } + + info->tbuf_current = i; + + /* set first buffer count to make new data visible to DMA controller */ + d = &info->tbufs[info->tbuf_start]; + set_desc_count(*d, d->buf_count); + + /* start transmitter if needed and update transmit timeout */ + if (!info->tx_active) + tx_start(info); + update_tx_timer(info); + + return true; +} + +static int register_test(struct slgt_info *info) +{ + static unsigned short patterns[] = + {0x0000, 0xffff, 0xaaaa, 0x5555, 0x6969, 0x9696}; + static unsigned int count = ARRAY_SIZE(patterns); + unsigned int i; + int rc = 0; + + for (i=0 ; i < count ; i++) { + wr_reg16(info, TIR, patterns[i]); + wr_reg16(info, BDR, patterns[(i+1)%count]); + if ((rd_reg16(info, TIR) != patterns[i]) || + (rd_reg16(info, BDR) != patterns[(i+1)%count])) { + rc = -ENODEV; + break; + } + } + info->gpio_present = (rd_reg32(info, JCR) & BIT5) ? 1 : 0; + info->init_error = rc ? 0 : DiagStatus_AddressFailure; + return rc; +} + +static int irq_test(struct slgt_info *info) +{ + unsigned long timeout; + unsigned long flags; + struct tty_struct *oldtty = info->port.tty; + u32 speed = info->params.data_rate; + + info->params.data_rate = 921600; + info->port.tty = NULL; + + spin_lock_irqsave(&info->lock, flags); + async_mode(info); + slgt_irq_on(info, IRQ_TXIDLE); + + /* enable transmitter */ + wr_reg16(info, TCR, + (unsigned short)(rd_reg16(info, TCR) | BIT1)); + + /* write one byte and wait for tx idle */ + wr_reg16(info, TDR, 0); + + /* assume failure */ + info->init_error = DiagStatus_IrqFailure; + info->irq_occurred = false; + + spin_unlock_irqrestore(&info->lock, flags); + + timeout=100; + while(timeout-- && !info->irq_occurred) + msleep_interruptible(10); + + spin_lock_irqsave(&info->lock,flags); + reset_port(info); + spin_unlock_irqrestore(&info->lock,flags); + + info->params.data_rate = speed; + info->port.tty = oldtty; + + info->init_error = info->irq_occurred ? 0 : DiagStatus_IrqFailure; + return info->irq_occurred ? 0 : -ENODEV; +} + +static int loopback_test_rx(struct slgt_info *info) +{ + unsigned char *src, *dest; + int count; + + if (desc_complete(info->rbufs[0])) { + count = desc_count(info->rbufs[0]); + src = info->rbufs[0].buf; + dest = info->tmp_rbuf; + + for( ; count ; count-=2, src+=2) { + /* src=data byte (src+1)=status byte */ + if (!(*(src+1) & (BIT9 + BIT8))) { + *dest = *src; + dest++; + info->tmp_rbuf_count++; + } + } + DBGDATA(info, info->tmp_rbuf, info->tmp_rbuf_count, "rx"); + return 1; + } + return 0; +} + +static int loopback_test(struct slgt_info *info) +{ +#define TESTFRAMESIZE 20 + + unsigned long timeout; + u16 count = TESTFRAMESIZE; + unsigned char buf[TESTFRAMESIZE]; + int rc = -ENODEV; + unsigned long flags; + + struct tty_struct *oldtty = info->port.tty; + MGSL_PARAMS params; + + memcpy(¶ms, &info->params, sizeof(params)); + + info->params.mode = MGSL_MODE_ASYNC; + info->params.data_rate = 921600; + info->params.loopback = 1; + info->port.tty = NULL; + + /* build and send transmit frame */ + for (count = 0; count < TESTFRAMESIZE; ++count) + buf[count] = (unsigned char)count; + + info->tmp_rbuf_count = 0; + memset(info->tmp_rbuf, 0, TESTFRAMESIZE); + + /* program hardware for HDLC and enabled receiver */ + spin_lock_irqsave(&info->lock,flags); + async_mode(info); + rx_start(info); + tx_load(info, buf, count); + spin_unlock_irqrestore(&info->lock, flags); + + /* wait for receive complete */ + for (timeout = 100; timeout; --timeout) { + msleep_interruptible(10); + if (loopback_test_rx(info)) { + rc = 0; + break; + } + } + + /* verify received frame length and contents */ + if (!rc && (info->tmp_rbuf_count != count || + memcmp(buf, info->tmp_rbuf, count))) { + rc = -ENODEV; + } + + spin_lock_irqsave(&info->lock,flags); + reset_adapter(info); + spin_unlock_irqrestore(&info->lock,flags); + + memcpy(&info->params, ¶ms, sizeof(info->params)); + info->port.tty = oldtty; + + info->init_error = rc ? DiagStatus_DmaFailure : 0; + return rc; +} + +static int adapter_test(struct slgt_info *info) +{ + DBGINFO(("testing %s\n", info->device_name)); + if (register_test(info) < 0) { + printk("register test failure %s addr=%08X\n", + info->device_name, info->phys_reg_addr); + } else if (irq_test(info) < 0) { + printk("IRQ test failure %s IRQ=%d\n", + info->device_name, info->irq_level); + } else if (loopback_test(info) < 0) { + printk("loopback test failure %s\n", info->device_name); + } + return info->init_error; +} + +/* + * transmit timeout handler + */ +static void tx_timeout(unsigned long context) +{ + struct slgt_info *info = (struct slgt_info*)context; + unsigned long flags; + + DBGINFO(("%s tx_timeout\n", info->device_name)); + if(info->tx_active && info->params.mode == MGSL_MODE_HDLC) { + info->icount.txtimeout++; + } + spin_lock_irqsave(&info->lock,flags); + tx_stop(info); + spin_unlock_irqrestore(&info->lock,flags); + +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) + hdlcdev_tx_done(info); + else +#endif + bh_transmit(info); +} + +/* + * receive buffer polling timer + */ +static void rx_timeout(unsigned long context) +{ + struct slgt_info *info = (struct slgt_info*)context; + unsigned long flags; + + DBGINFO(("%s rx_timeout\n", info->device_name)); + spin_lock_irqsave(&info->lock, flags); + info->pending_bh |= BH_RECEIVE; + spin_unlock_irqrestore(&info->lock, flags); + bh_handler(&info->task); +} + diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c new file mode 100644 index 00000000000..32734369447 --- /dev/null +++ b/drivers/tty/synclinkmp.c @@ -0,0 +1,5600 @@ +/* + * $Id: synclinkmp.c,v 4.38 2005/07/15 13:29:44 paulkf Exp $ + * + * Device driver for Microgate SyncLink Multiport + * high speed multiprotocol serial adapter. + * + * written by Paul Fulghum for Microgate Corporation + * paulkf@microgate.com + * + * Microgate and SyncLink are trademarks of Microgate Corporation + * + * Derived from serial.c written by Theodore Ts'o and Linus Torvalds + * This code is released under the GNU General Public License (GPL) + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define VERSION(ver,rel,seq) (((ver)<<16) | ((rel)<<8) | (seq)) +#if defined(__i386__) +# define BREAKPOINT() asm(" int $3"); +#else +# define BREAKPOINT() { } +#endif + +#define MAX_DEVICES 12 + +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/timer.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/serial.h> +#include <linux/major.h> +#include <linux/string.h> +#include <linux/fcntl.h> +#include <linux/ptrace.h> +#include <linux/ioport.h> +#include <linux/mm.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/ioctl.h> + +#include <asm/system.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/dma.h> +#include <linux/bitops.h> +#include <asm/types.h> +#include <linux/termios.h> +#include <linux/workqueue.h> +#include <linux/hdlc.h> +#include <linux/synclink.h> + +#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINKMP_MODULE)) +#define SYNCLINK_GENERIC_HDLC 1 +#else +#define SYNCLINK_GENERIC_HDLC 0 +#endif + +#define GET_USER(error,value,addr) error = get_user(value,addr) +#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0 +#define PUT_USER(error,value,addr) error = put_user(value,addr) +#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 + +#include <asm/uaccess.h> + +static MGSL_PARAMS default_params = { + MGSL_MODE_HDLC, /* unsigned long mode */ + 0, /* unsigned char loopback; */ + HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */ + HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */ + 0, /* unsigned long clock_speed; */ + 0xff, /* unsigned char addr_filter; */ + HDLC_CRC_16_CCITT, /* unsigned short crc_type; */ + HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */ + HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */ + 9600, /* unsigned long data_rate; */ + 8, /* unsigned char data_bits; */ + 1, /* unsigned char stop_bits; */ + ASYNC_PARITY_NONE /* unsigned char parity; */ +}; + +/* size in bytes of DMA data buffers */ +#define SCABUFSIZE 1024 +#define SCA_MEM_SIZE 0x40000 +#define SCA_BASE_SIZE 512 +#define SCA_REG_SIZE 16 +#define SCA_MAX_PORTS 4 +#define SCAMAXDESC 128 + +#define BUFFERLISTSIZE 4096 + +/* SCA-I style DMA buffer descriptor */ +typedef struct _SCADESC +{ + u16 next; /* lower l6 bits of next descriptor addr */ + u16 buf_ptr; /* lower 16 bits of buffer addr */ + u8 buf_base; /* upper 8 bits of buffer addr */ + u8 pad1; + u16 length; /* length of buffer */ + u8 status; /* status of buffer */ + u8 pad2; +} SCADESC, *PSCADESC; + +typedef struct _SCADESC_EX +{ + /* device driver bookkeeping section */ + char *virt_addr; /* virtual address of data buffer */ + u16 phys_entry; /* lower 16-bits of physical address of this descriptor */ +} SCADESC_EX, *PSCADESC_EX; + +/* The queue of BH actions to be performed */ + +#define BH_RECEIVE 1 +#define BH_TRANSMIT 2 +#define BH_STATUS 4 + +#define IO_PIN_SHUTDOWN_LIMIT 100 + +struct _input_signal_events { + int ri_up; + int ri_down; + int dsr_up; + int dsr_down; + int dcd_up; + int dcd_down; + int cts_up; + int cts_down; +}; + +/* + * Device instance data structure + */ +typedef struct _synclinkmp_info { + void *if_ptr; /* General purpose pointer (used by SPPP) */ + int magic; + struct tty_port port; + int line; + unsigned short close_delay; + unsigned short closing_wait; /* time to wait before closing */ + + struct mgsl_icount icount; + + int timeout; + int x_char; /* xon/xoff character */ + u16 read_status_mask1; /* break detection (SR1 indications) */ + u16 read_status_mask2; /* parity/framing/overun (SR2 indications) */ + unsigned char ignore_status_mask1; /* break detection (SR1 indications) */ + unsigned char ignore_status_mask2; /* parity/framing/overun (SR2 indications) */ + unsigned char *tx_buf; + int tx_put; + int tx_get; + int tx_count; + + wait_queue_head_t status_event_wait_q; + wait_queue_head_t event_wait_q; + struct timer_list tx_timer; /* HDLC transmit timeout timer */ + struct _synclinkmp_info *next_device; /* device list link */ + struct timer_list status_timer; /* input signal status check timer */ + + spinlock_t lock; /* spinlock for synchronizing with ISR */ + struct work_struct task; /* task structure for scheduling bh */ + + u32 max_frame_size; /* as set by device config */ + + u32 pending_bh; + + bool bh_running; /* Protection from multiple */ + int isr_overflow; + bool bh_requested; + + int dcd_chkcount; /* check counts to prevent */ + int cts_chkcount; /* too many IRQs if a signal */ + int dsr_chkcount; /* is floating */ + int ri_chkcount; + + char *buffer_list; /* virtual address of Rx & Tx buffer lists */ + unsigned long buffer_list_phys; + + unsigned int rx_buf_count; /* count of total allocated Rx buffers */ + SCADESC *rx_buf_list; /* list of receive buffer entries */ + SCADESC_EX rx_buf_list_ex[SCAMAXDESC]; /* list of receive buffer entries */ + unsigned int current_rx_buf; + + unsigned int tx_buf_count; /* count of total allocated Tx buffers */ + SCADESC *tx_buf_list; /* list of transmit buffer entries */ + SCADESC_EX tx_buf_list_ex[SCAMAXDESC]; /* list of transmit buffer entries */ + unsigned int last_tx_buf; + + unsigned char *tmp_rx_buf; + unsigned int tmp_rx_buf_count; + + bool rx_enabled; + bool rx_overflow; + + bool tx_enabled; + bool tx_active; + u32 idle_mode; + + unsigned char ie0_value; + unsigned char ie1_value; + unsigned char ie2_value; + unsigned char ctrlreg_value; + unsigned char old_signals; + + char device_name[25]; /* device instance name */ + + int port_count; + int adapter_num; + int port_num; + + struct _synclinkmp_info *port_array[SCA_MAX_PORTS]; + + unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */ + + unsigned int irq_level; /* interrupt level */ + unsigned long irq_flags; + bool irq_requested; /* true if IRQ requested */ + + MGSL_PARAMS params; /* communications parameters */ + + unsigned char serial_signals; /* current serial signal states */ + + bool irq_occurred; /* for diagnostics use */ + unsigned int init_error; /* Initialization startup error */ + + u32 last_mem_alloc; + unsigned char* memory_base; /* shared memory address (PCI only) */ + u32 phys_memory_base; + int shared_mem_requested; + + unsigned char* sca_base; /* HD64570 SCA Memory address */ + u32 phys_sca_base; + u32 sca_offset; + bool sca_base_requested; + + unsigned char* lcr_base; /* local config registers (PCI only) */ + u32 phys_lcr_base; + u32 lcr_offset; + int lcr_mem_requested; + + unsigned char* statctrl_base; /* status/control register memory */ + u32 phys_statctrl_base; + u32 statctrl_offset; + bool sca_statctrl_requested; + + u32 misc_ctrl_value; + char flag_buf[MAX_ASYNC_BUFFER_SIZE]; + char char_buf[MAX_ASYNC_BUFFER_SIZE]; + bool drop_rts_on_tx_done; + + struct _input_signal_events input_signal_events; + + /* SPPP/Cisco HDLC device parts */ + int netcount; + spinlock_t netlock; + +#if SYNCLINK_GENERIC_HDLC + struct net_device *netdev; +#endif + +} SLMP_INFO; + +#define MGSL_MAGIC 0x5401 + +/* + * define serial signal status change macros + */ +#define MISCSTATUS_DCD_LATCHED (SerialSignal_DCD<<8) /* indicates change in DCD */ +#define MISCSTATUS_RI_LATCHED (SerialSignal_RI<<8) /* indicates change in RI */ +#define MISCSTATUS_CTS_LATCHED (SerialSignal_CTS<<8) /* indicates change in CTS */ +#define MISCSTATUS_DSR_LATCHED (SerialSignal_DSR<<8) /* change in DSR */ + +/* Common Register macros */ +#define LPR 0x00 +#define PABR0 0x02 +#define PABR1 0x03 +#define WCRL 0x04 +#define WCRM 0x05 +#define WCRH 0x06 +#define DPCR 0x08 +#define DMER 0x09 +#define ISR0 0x10 +#define ISR1 0x11 +#define ISR2 0x12 +#define IER0 0x14 +#define IER1 0x15 +#define IER2 0x16 +#define ITCR 0x18 +#define INTVR 0x1a +#define IMVR 0x1c + +/* MSCI Register macros */ +#define TRB 0x20 +#define TRBL 0x20 +#define TRBH 0x21 +#define SR0 0x22 +#define SR1 0x23 +#define SR2 0x24 +#define SR3 0x25 +#define FST 0x26 +#define IE0 0x28 +#define IE1 0x29 +#define IE2 0x2a +#define FIE 0x2b +#define CMD 0x2c +#define MD0 0x2e +#define MD1 0x2f +#define MD2 0x30 +#define CTL 0x31 +#define SA0 0x32 +#define SA1 0x33 +#define IDL 0x34 +#define TMC 0x35 +#define RXS 0x36 +#define TXS 0x37 +#define TRC0 0x38 +#define TRC1 0x39 +#define RRC 0x3a +#define CST0 0x3c +#define CST1 0x3d + +/* Timer Register Macros */ +#define TCNT 0x60 +#define TCNTL 0x60 +#define TCNTH 0x61 +#define TCONR 0x62 +#define TCONRL 0x62 +#define TCONRH 0x63 +#define TMCS 0x64 +#define TEPR 0x65 + +/* DMA Controller Register macros */ +#define DARL 0x80 +#define DARH 0x81 +#define DARB 0x82 +#define BAR 0x80 +#define BARL 0x80 +#define BARH 0x81 +#define BARB 0x82 +#define SAR 0x84 +#define SARL 0x84 +#define SARH 0x85 +#define SARB 0x86 +#define CPB 0x86 +#define CDA 0x88 +#define CDAL 0x88 +#define CDAH 0x89 +#define EDA 0x8a +#define EDAL 0x8a +#define EDAH 0x8b +#define BFL 0x8c +#define BFLL 0x8c +#define BFLH 0x8d +#define BCR 0x8e +#define BCRL 0x8e +#define BCRH 0x8f +#define DSR 0x90 +#define DMR 0x91 +#define FCT 0x93 +#define DIR 0x94 +#define DCMD 0x95 + +/* combine with timer or DMA register address */ +#define TIMER0 0x00 +#define TIMER1 0x08 +#define TIMER2 0x10 +#define TIMER3 0x18 +#define RXDMA 0x00 +#define TXDMA 0x20 + +/* SCA Command Codes */ +#define NOOP 0x00 +#define TXRESET 0x01 +#define TXENABLE 0x02 +#define TXDISABLE 0x03 +#define TXCRCINIT 0x04 +#define TXCRCEXCL 0x05 +#define TXEOM 0x06 +#define TXABORT 0x07 +#define MPON 0x08 +#define TXBUFCLR 0x09 +#define RXRESET 0x11 +#define RXENABLE 0x12 +#define RXDISABLE 0x13 +#define RXCRCINIT 0x14 +#define RXREJECT 0x15 +#define SEARCHMP 0x16 +#define RXCRCEXCL 0x17 +#define RXCRCCALC 0x18 +#define CHRESET 0x21 +#define HUNT 0x31 + +/* DMA command codes */ +#define SWABORT 0x01 +#define FEICLEAR 0x02 + +/* IE0 */ +#define TXINTE BIT7 +#define RXINTE BIT6 +#define TXRDYE BIT1 +#define RXRDYE BIT0 + +/* IE1 & SR1 */ +#define UDRN BIT7 +#define IDLE BIT6 +#define SYNCD BIT4 +#define FLGD BIT4 +#define CCTS BIT3 +#define CDCD BIT2 +#define BRKD BIT1 +#define ABTD BIT1 +#define GAPD BIT1 +#define BRKE BIT0 +#define IDLD BIT0 + +/* IE2 & SR2 */ +#define EOM BIT7 +#define PMP BIT6 +#define SHRT BIT6 +#define PE BIT5 +#define ABT BIT5 +#define FRME BIT4 +#define RBIT BIT4 +#define OVRN BIT3 +#define CRCE BIT2 + + +/* + * Global linked list of SyncLink devices + */ +static SLMP_INFO *synclinkmp_device_list = NULL; +static int synclinkmp_adapter_count = -1; +static int synclinkmp_device_count = 0; + +/* + * Set this param to non-zero to load eax with the + * .text section address and breakpoint on module load. + * This is useful for use with gdb and add-symbol-file command. + */ +static int break_on_load = 0; + +/* + * Driver major number, defaults to zero to get auto + * assigned major number. May be forced as module parameter. + */ +static int ttymajor = 0; + +/* + * Array of user specified options for ISA adapters. + */ +static int debug_level = 0; +static int maxframe[MAX_DEVICES] = {0,}; + +module_param(break_on_load, bool, 0); +module_param(ttymajor, int, 0); +module_param(debug_level, int, 0); +module_param_array(maxframe, int, NULL, 0); + +static char *driver_name = "SyncLink MultiPort driver"; +static char *driver_version = "$Revision: 4.38 $"; + +static int synclinkmp_init_one(struct pci_dev *dev,const struct pci_device_id *ent); +static void synclinkmp_remove_one(struct pci_dev *dev); + +static struct pci_device_id synclinkmp_pci_tbl[] = { + { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_SCA, PCI_ANY_ID, PCI_ANY_ID, }, + { 0, }, /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, synclinkmp_pci_tbl); + +MODULE_LICENSE("GPL"); + +static struct pci_driver synclinkmp_pci_driver = { + .name = "synclinkmp", + .id_table = synclinkmp_pci_tbl, + .probe = synclinkmp_init_one, + .remove = __devexit_p(synclinkmp_remove_one), +}; + + +static struct tty_driver *serial_driver; + +/* number of characters left in xmit buffer before we ask for more */ +#define WAKEUP_CHARS 256 + + +/* tty callbacks */ + +static int open(struct tty_struct *tty, struct file * filp); +static void close(struct tty_struct *tty, struct file * filp); +static void hangup(struct tty_struct *tty); +static void set_termios(struct tty_struct *tty, struct ktermios *old_termios); + +static int write(struct tty_struct *tty, const unsigned char *buf, int count); +static int put_char(struct tty_struct *tty, unsigned char ch); +static void send_xchar(struct tty_struct *tty, char ch); +static void wait_until_sent(struct tty_struct *tty, int timeout); +static int write_room(struct tty_struct *tty); +static void flush_chars(struct tty_struct *tty); +static void flush_buffer(struct tty_struct *tty); +static void tx_hold(struct tty_struct *tty); +static void tx_release(struct tty_struct *tty); + +static int ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); +static int chars_in_buffer(struct tty_struct *tty); +static void throttle(struct tty_struct * tty); +static void unthrottle(struct tty_struct * tty); +static int set_break(struct tty_struct *tty, int break_state); + +#if SYNCLINK_GENERIC_HDLC +#define dev_to_port(D) (dev_to_hdlc(D)->priv) +static void hdlcdev_tx_done(SLMP_INFO *info); +static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size); +static int hdlcdev_init(SLMP_INFO *info); +static void hdlcdev_exit(SLMP_INFO *info); +#endif + +/* ioctl handlers */ + +static int get_stats(SLMP_INFO *info, struct mgsl_icount __user *user_icount); +static int get_params(SLMP_INFO *info, MGSL_PARAMS __user *params); +static int set_params(SLMP_INFO *info, MGSL_PARAMS __user *params); +static int get_txidle(SLMP_INFO *info, int __user *idle_mode); +static int set_txidle(SLMP_INFO *info, int idle_mode); +static int tx_enable(SLMP_INFO *info, int enable); +static int tx_abort(SLMP_INFO *info); +static int rx_enable(SLMP_INFO *info, int enable); +static int modem_input_wait(SLMP_INFO *info,int arg); +static int wait_mgsl_event(SLMP_INFO *info, int __user *mask_ptr); +static int tiocmget(struct tty_struct *tty); +static int tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear); +static int set_break(struct tty_struct *tty, int break_state); + +static void add_device(SLMP_INFO *info); +static void device_init(int adapter_num, struct pci_dev *pdev); +static int claim_resources(SLMP_INFO *info); +static void release_resources(SLMP_INFO *info); + +static int startup(SLMP_INFO *info); +static int block_til_ready(struct tty_struct *tty, struct file * filp,SLMP_INFO *info); +static int carrier_raised(struct tty_port *port); +static void shutdown(SLMP_INFO *info); +static void program_hw(SLMP_INFO *info); +static void change_params(SLMP_INFO *info); + +static bool init_adapter(SLMP_INFO *info); +static bool register_test(SLMP_INFO *info); +static bool irq_test(SLMP_INFO *info); +static bool loopback_test(SLMP_INFO *info); +static int adapter_test(SLMP_INFO *info); +static bool memory_test(SLMP_INFO *info); + +static void reset_adapter(SLMP_INFO *info); +static void reset_port(SLMP_INFO *info); +static void async_mode(SLMP_INFO *info); +static void hdlc_mode(SLMP_INFO *info); + +static void rx_stop(SLMP_INFO *info); +static void rx_start(SLMP_INFO *info); +static void rx_reset_buffers(SLMP_INFO *info); +static void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last); +static bool rx_get_frame(SLMP_INFO *info); + +static void tx_start(SLMP_INFO *info); +static void tx_stop(SLMP_INFO *info); +static void tx_load_fifo(SLMP_INFO *info); +static void tx_set_idle(SLMP_INFO *info); +static void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count); + +static void get_signals(SLMP_INFO *info); +static void set_signals(SLMP_INFO *info); +static void enable_loopback(SLMP_INFO *info, int enable); +static void set_rate(SLMP_INFO *info, u32 data_rate); + +static int bh_action(SLMP_INFO *info); +static void bh_handler(struct work_struct *work); +static void bh_receive(SLMP_INFO *info); +static void bh_transmit(SLMP_INFO *info); +static void bh_status(SLMP_INFO *info); +static void isr_timer(SLMP_INFO *info); +static void isr_rxint(SLMP_INFO *info); +static void isr_rxrdy(SLMP_INFO *info); +static void isr_txint(SLMP_INFO *info); +static void isr_txrdy(SLMP_INFO *info); +static void isr_rxdmaok(SLMP_INFO *info); +static void isr_rxdmaerror(SLMP_INFO *info); +static void isr_txdmaok(SLMP_INFO *info); +static void isr_txdmaerror(SLMP_INFO *info); +static void isr_io_pin(SLMP_INFO *info, u16 status); + +static int alloc_dma_bufs(SLMP_INFO *info); +static void free_dma_bufs(SLMP_INFO *info); +static int alloc_buf_list(SLMP_INFO *info); +static int alloc_frame_bufs(SLMP_INFO *info, SCADESC *list, SCADESC_EX *list_ex,int count); +static int alloc_tmp_rx_buf(SLMP_INFO *info); +static void free_tmp_rx_buf(SLMP_INFO *info); + +static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count); +static void trace_block(SLMP_INFO *info, const char* data, int count, int xmit); +static void tx_timeout(unsigned long context); +static void status_timeout(unsigned long context); + +static unsigned char read_reg(SLMP_INFO *info, unsigned char addr); +static void write_reg(SLMP_INFO *info, unsigned char addr, unsigned char val); +static u16 read_reg16(SLMP_INFO *info, unsigned char addr); +static void write_reg16(SLMP_INFO *info, unsigned char addr, u16 val); +static unsigned char read_status_reg(SLMP_INFO * info); +static void write_control_reg(SLMP_INFO * info); + + +static unsigned char rx_active_fifo_level = 16; // rx request FIFO activation level in bytes +static unsigned char tx_active_fifo_level = 16; // tx request FIFO activation level in bytes +static unsigned char tx_negate_fifo_level = 32; // tx request FIFO negation level in bytes + +static u32 misc_ctrl_value = 0x007e4040; +static u32 lcr1_brdr_value = 0x00800028; + +static u32 read_ahead_count = 8; + +/* DPCR, DMA Priority Control + * + * 07..05 Not used, must be 0 + * 04 BRC, bus release condition: 0=all transfers complete + * 1=release after 1 xfer on all channels + * 03 CCC, channel change condition: 0=every cycle + * 1=after each channel completes all xfers + * 02..00 PR<2..0>, priority 100=round robin + * + * 00000100 = 0x00 + */ +static unsigned char dma_priority = 0x04; + +// Number of bytes that can be written to shared RAM +// in a single write operation +static u32 sca_pci_load_interval = 64; + +/* + * 1st function defined in .text section. Calling this function in + * init_module() followed by a breakpoint allows a remote debugger + * (gdb) to get the .text address for the add-symbol-file command. + * This allows remote debugging of dynamically loadable modules. + */ +static void* synclinkmp_get_text_ptr(void); +static void* synclinkmp_get_text_ptr(void) {return synclinkmp_get_text_ptr;} + +static inline int sanity_check(SLMP_INFO *info, + char *name, const char *routine) +{ +#ifdef SANITY_CHECK + static const char *badmagic = + "Warning: bad magic number for synclinkmp_struct (%s) in %s\n"; + static const char *badinfo = + "Warning: null synclinkmp_struct for (%s) in %s\n"; + + if (!info) { + printk(badinfo, name, routine); + return 1; + } + if (info->magic != MGSL_MAGIC) { + printk(badmagic, name, routine); + return 1; + } +#else + if (!info) + return 1; +#endif + return 0; +} + +/** + * line discipline callback wrappers + * + * The wrappers maintain line discipline references + * while calling into the line discipline. + * + * ldisc_receive_buf - pass receive data to line discipline + */ + +static void ldisc_receive_buf(struct tty_struct *tty, + const __u8 *data, char *flags, int count) +{ + struct tty_ldisc *ld; + if (!tty) + return; + ld = tty_ldisc_ref(tty); + if (ld) { + if (ld->ops->receive_buf) + ld->ops->receive_buf(tty, data, flags, count); + tty_ldisc_deref(ld); + } +} + +/* tty callbacks */ + +/* Called when a port is opened. Init and enable port. + */ +static int open(struct tty_struct *tty, struct file *filp) +{ + SLMP_INFO *info; + int retval, line; + unsigned long flags; + + line = tty->index; + if ((line < 0) || (line >= synclinkmp_device_count)) { + printk("%s(%d): open with invalid line #%d.\n", + __FILE__,__LINE__,line); + return -ENODEV; + } + + info = synclinkmp_device_list; + while(info && info->line != line) + info = info->next_device; + if (sanity_check(info, tty->name, "open")) + return -ENODEV; + if ( info->init_error ) { + printk("%s(%d):%s device is not allocated, init error=%d\n", + __FILE__,__LINE__,info->device_name,info->init_error); + return -ENODEV; + } + + tty->driver_data = info; + info->port.tty = tty; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s open(), old ref count = %d\n", + __FILE__,__LINE__,tty->driver->name, info->port.count); + + /* If port is closing, signal caller to try again */ + if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){ + if (info->port.flags & ASYNC_CLOSING) + interruptible_sleep_on(&info->port.close_wait); + retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ? + -EAGAIN : -ERESTARTSYS); + goto cleanup; + } + + info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; + + spin_lock_irqsave(&info->netlock, flags); + if (info->netcount) { + retval = -EBUSY; + spin_unlock_irqrestore(&info->netlock, flags); + goto cleanup; + } + info->port.count++; + spin_unlock_irqrestore(&info->netlock, flags); + + if (info->port.count == 1) { + /* 1st open on this device, init hardware */ + retval = startup(info); + if (retval < 0) + goto cleanup; + } + + retval = block_til_ready(tty, filp, info); + if (retval) { + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s block_til_ready() returned %d\n", + __FILE__,__LINE__, info->device_name, retval); + goto cleanup; + } + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s open() success\n", + __FILE__,__LINE__, info->device_name); + retval = 0; + +cleanup: + if (retval) { + if (tty->count == 1) + info->port.tty = NULL; /* tty layer will release tty struct */ + if(info->port.count) + info->port.count--; + } + + return retval; +} + +/* Called when port is closed. Wait for remaining data to be + * sent. Disable port and free resources. + */ +static void close(struct tty_struct *tty, struct file *filp) +{ + SLMP_INFO * info = tty->driver_data; + + if (sanity_check(info, tty->name, "close")) + return; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s close() entry, count=%d\n", + __FILE__,__LINE__, info->device_name, info->port.count); + + if (tty_port_close_start(&info->port, tty, filp) == 0) + goto cleanup; + + mutex_lock(&info->port.mutex); + if (info->port.flags & ASYNC_INITIALIZED) + wait_until_sent(tty, info->timeout); + + flush_buffer(tty); + tty_ldisc_flush(tty); + shutdown(info); + mutex_unlock(&info->port.mutex); + + tty_port_close_end(&info->port, tty); + info->port.tty = NULL; +cleanup: + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__, + tty->driver->name, info->port.count); +} + +/* Called by tty_hangup() when a hangup is signaled. + * This is the same as closing all open descriptors for the port. + */ +static void hangup(struct tty_struct *tty) +{ + SLMP_INFO *info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s hangup()\n", + __FILE__,__LINE__, info->device_name ); + + if (sanity_check(info, tty->name, "hangup")) + return; + + mutex_lock(&info->port.mutex); + flush_buffer(tty); + shutdown(info); + + spin_lock_irqsave(&info->port.lock, flags); + info->port.count = 0; + info->port.flags &= ~ASYNC_NORMAL_ACTIVE; + info->port.tty = NULL; + spin_unlock_irqrestore(&info->port.lock, flags); + mutex_unlock(&info->port.mutex); + + wake_up_interruptible(&info->port.open_wait); +} + +/* Set new termios settings + */ +static void set_termios(struct tty_struct *tty, struct ktermios *old_termios) +{ + SLMP_INFO *info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s set_termios()\n", __FILE__,__LINE__, + tty->driver->name ); + + change_params(info); + + /* Handle transition to B0 status */ + if (old_termios->c_cflag & CBAUD && + !(tty->termios->c_cflag & CBAUD)) { + info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); + spin_lock_irqsave(&info->lock,flags); + set_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + } + + /* Handle transition away from B0 status */ + if (!(old_termios->c_cflag & CBAUD) && + tty->termios->c_cflag & CBAUD) { + info->serial_signals |= SerialSignal_DTR; + if (!(tty->termios->c_cflag & CRTSCTS) || + !test_bit(TTY_THROTTLED, &tty->flags)) { + info->serial_signals |= SerialSignal_RTS; + } + spin_lock_irqsave(&info->lock,flags); + set_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + } + + /* Handle turning off CRTSCTS */ + if (old_termios->c_cflag & CRTSCTS && + !(tty->termios->c_cflag & CRTSCTS)) { + tty->hw_stopped = 0; + tx_release(tty); + } +} + +/* Send a block of data + * + * Arguments: + * + * tty pointer to tty information structure + * buf pointer to buffer containing send data + * count size of send data in bytes + * + * Return Value: number of characters written + */ +static int write(struct tty_struct *tty, + const unsigned char *buf, int count) +{ + int c, ret = 0; + SLMP_INFO *info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s write() count=%d\n", + __FILE__,__LINE__,info->device_name,count); + + if (sanity_check(info, tty->name, "write")) + goto cleanup; + + if (!info->tx_buf) + goto cleanup; + + if (info->params.mode == MGSL_MODE_HDLC) { + if (count > info->max_frame_size) { + ret = -EIO; + goto cleanup; + } + if (info->tx_active) + goto cleanup; + if (info->tx_count) { + /* send accumulated data from send_char() calls */ + /* as frame and wait before accepting more data. */ + tx_load_dma_buffer(info, info->tx_buf, info->tx_count); + goto start; + } + ret = info->tx_count = count; + tx_load_dma_buffer(info, buf, count); + goto start; + } + + for (;;) { + c = min_t(int, count, + min(info->max_frame_size - info->tx_count - 1, + info->max_frame_size - info->tx_put)); + if (c <= 0) + break; + + memcpy(info->tx_buf + info->tx_put, buf, c); + + spin_lock_irqsave(&info->lock,flags); + info->tx_put += c; + if (info->tx_put >= info->max_frame_size) + info->tx_put -= info->max_frame_size; + info->tx_count += c; + spin_unlock_irqrestore(&info->lock,flags); + + buf += c; + count -= c; + ret += c; + } + + if (info->params.mode == MGSL_MODE_HDLC) { + if (count) { + ret = info->tx_count = 0; + goto cleanup; + } + tx_load_dma_buffer(info, info->tx_buf, info->tx_count); + } +start: + if (info->tx_count && !tty->stopped && !tty->hw_stopped) { + spin_lock_irqsave(&info->lock,flags); + if (!info->tx_active) + tx_start(info); + spin_unlock_irqrestore(&info->lock,flags); + } + +cleanup: + if (debug_level >= DEBUG_LEVEL_INFO) + printk( "%s(%d):%s write() returning=%d\n", + __FILE__,__LINE__,info->device_name,ret); + return ret; +} + +/* Add a character to the transmit buffer. + */ +static int put_char(struct tty_struct *tty, unsigned char ch) +{ + SLMP_INFO *info = tty->driver_data; + unsigned long flags; + int ret = 0; + + if ( debug_level >= DEBUG_LEVEL_INFO ) { + printk( "%s(%d):%s put_char(%d)\n", + __FILE__,__LINE__,info->device_name,ch); + } + + if (sanity_check(info, tty->name, "put_char")) + return 0; + + if (!info->tx_buf) + return 0; + + spin_lock_irqsave(&info->lock,flags); + + if ( (info->params.mode != MGSL_MODE_HDLC) || + !info->tx_active ) { + + if (info->tx_count < info->max_frame_size - 1) { + info->tx_buf[info->tx_put++] = ch; + if (info->tx_put >= info->max_frame_size) + info->tx_put -= info->max_frame_size; + info->tx_count++; + ret = 1; + } + } + + spin_unlock_irqrestore(&info->lock,flags); + return ret; +} + +/* Send a high-priority XON/XOFF character + */ +static void send_xchar(struct tty_struct *tty, char ch) +{ + SLMP_INFO *info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s send_xchar(%d)\n", + __FILE__,__LINE__, info->device_name, ch ); + + if (sanity_check(info, tty->name, "send_xchar")) + return; + + info->x_char = ch; + if (ch) { + /* Make sure transmit interrupts are on */ + spin_lock_irqsave(&info->lock,flags); + if (!info->tx_enabled) + tx_start(info); + spin_unlock_irqrestore(&info->lock,flags); + } +} + +/* Wait until the transmitter is empty. + */ +static void wait_until_sent(struct tty_struct *tty, int timeout) +{ + SLMP_INFO * info = tty->driver_data; + unsigned long orig_jiffies, char_time; + + if (!info ) + return; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s wait_until_sent() entry\n", + __FILE__,__LINE__, info->device_name ); + + if (sanity_check(info, tty->name, "wait_until_sent")) + return; + + if (!test_bit(ASYNCB_INITIALIZED, &info->port.flags)) + goto exit; + + orig_jiffies = jiffies; + + /* Set check interval to 1/5 of estimated time to + * send a character, and make it at least 1. The check + * interval should also be less than the timeout. + * Note: use tight timings here to satisfy the NIST-PCTS. + */ + + if ( info->params.data_rate ) { + char_time = info->timeout/(32 * 5); + if (!char_time) + char_time++; + } else + char_time = 1; + + if (timeout) + char_time = min_t(unsigned long, char_time, timeout); + + if ( info->params.mode == MGSL_MODE_HDLC ) { + while (info->tx_active) { + msleep_interruptible(jiffies_to_msecs(char_time)); + if (signal_pending(current)) + break; + if (timeout && time_after(jiffies, orig_jiffies + timeout)) + break; + } + } else { + /* + * TODO: determine if there is something similar to USC16C32 + * TXSTATUS_ALL_SENT status + */ + while ( info->tx_active && info->tx_enabled) { + msleep_interruptible(jiffies_to_msecs(char_time)); + if (signal_pending(current)) + break; + if (timeout && time_after(jiffies, orig_jiffies + timeout)) + break; + } + } + +exit: + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s wait_until_sent() exit\n", + __FILE__,__LINE__, info->device_name ); +} + +/* Return the count of free bytes in transmit buffer + */ +static int write_room(struct tty_struct *tty) +{ + SLMP_INFO *info = tty->driver_data; + int ret; + + if (sanity_check(info, tty->name, "write_room")) + return 0; + + if (info->params.mode == MGSL_MODE_HDLC) { + ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE; + } else { + ret = info->max_frame_size - info->tx_count - 1; + if (ret < 0) + ret = 0; + } + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s write_room()=%d\n", + __FILE__, __LINE__, info->device_name, ret); + + return ret; +} + +/* enable transmitter and send remaining buffered characters + */ +static void flush_chars(struct tty_struct *tty) +{ + SLMP_INFO *info = tty->driver_data; + unsigned long flags; + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):%s flush_chars() entry tx_count=%d\n", + __FILE__,__LINE__,info->device_name,info->tx_count); + + if (sanity_check(info, tty->name, "flush_chars")) + return; + + if (info->tx_count <= 0 || tty->stopped || tty->hw_stopped || + !info->tx_buf) + return; + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):%s flush_chars() entry, starting transmitter\n", + __FILE__,__LINE__,info->device_name ); + + spin_lock_irqsave(&info->lock,flags); + + if (!info->tx_active) { + if ( (info->params.mode == MGSL_MODE_HDLC) && + info->tx_count ) { + /* operating in synchronous (frame oriented) mode */ + /* copy data from circular tx_buf to */ + /* transmit DMA buffer. */ + tx_load_dma_buffer(info, + info->tx_buf,info->tx_count); + } + tx_start(info); + } + + spin_unlock_irqrestore(&info->lock,flags); +} + +/* Discard all data in the send buffer + */ +static void flush_buffer(struct tty_struct *tty) +{ + SLMP_INFO *info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s flush_buffer() entry\n", + __FILE__,__LINE__, info->device_name ); + + if (sanity_check(info, tty->name, "flush_buffer")) + return; + + spin_lock_irqsave(&info->lock,flags); + info->tx_count = info->tx_put = info->tx_get = 0; + del_timer(&info->tx_timer); + spin_unlock_irqrestore(&info->lock,flags); + + tty_wakeup(tty); +} + +/* throttle (stop) transmitter + */ +static void tx_hold(struct tty_struct *tty) +{ + SLMP_INFO *info = tty->driver_data; + unsigned long flags; + + if (sanity_check(info, tty->name, "tx_hold")) + return; + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk("%s(%d):%s tx_hold()\n", + __FILE__,__LINE__,info->device_name); + + spin_lock_irqsave(&info->lock,flags); + if (info->tx_enabled) + tx_stop(info); + spin_unlock_irqrestore(&info->lock,flags); +} + +/* release (start) transmitter + */ +static void tx_release(struct tty_struct *tty) +{ + SLMP_INFO *info = tty->driver_data; + unsigned long flags; + + if (sanity_check(info, tty->name, "tx_release")) + return; + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk("%s(%d):%s tx_release()\n", + __FILE__,__LINE__,info->device_name); + + spin_lock_irqsave(&info->lock,flags); + if (!info->tx_enabled) + tx_start(info); + spin_unlock_irqrestore(&info->lock,flags); +} + +/* Service an IOCTL request + * + * Arguments: + * + * tty pointer to tty instance data + * cmd IOCTL command code + * arg command argument/context + * + * Return Value: 0 if success, otherwise error code + */ +static int ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + SLMP_INFO *info = tty->driver_data; + void __user *argp = (void __user *)arg; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s ioctl() cmd=%08X\n", __FILE__,__LINE__, + info->device_name, cmd ); + + if (sanity_check(info, tty->name, "ioctl")) + return -ENODEV; + + if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && + (cmd != TIOCMIWAIT)) { + if (tty->flags & (1 << TTY_IO_ERROR)) + return -EIO; + } + + switch (cmd) { + case MGSL_IOCGPARAMS: + return get_params(info, argp); + case MGSL_IOCSPARAMS: + return set_params(info, argp); + case MGSL_IOCGTXIDLE: + return get_txidle(info, argp); + case MGSL_IOCSTXIDLE: + return set_txidle(info, (int)arg); + case MGSL_IOCTXENABLE: + return tx_enable(info, (int)arg); + case MGSL_IOCRXENABLE: + return rx_enable(info, (int)arg); + case MGSL_IOCTXABORT: + return tx_abort(info); + case MGSL_IOCGSTATS: + return get_stats(info, argp); + case MGSL_IOCWAITEVENT: + return wait_mgsl_event(info, argp); + case MGSL_IOCLOOPTXDONE: + return 0; // TODO: Not supported, need to document + /* Wait for modem input (DCD,RI,DSR,CTS) change + * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS) + */ + case TIOCMIWAIT: + return modem_input_wait(info,(int)arg); + + /* + * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) + * Return: write counters to the user passed counter struct + * NB: both 1->0 and 0->1 transitions are counted except for + * RI where only 0->1 is counted. + */ + default: + return -ENOIOCTLCMD; + } + return 0; +} + +static int get_icount(struct tty_struct *tty, + struct serial_icounter_struct *icount) +{ + SLMP_INFO *info = tty->driver_data; + struct mgsl_icount cnow; /* kernel counter temps */ + unsigned long flags; + + spin_lock_irqsave(&info->lock,flags); + cnow = info->icount; + spin_unlock_irqrestore(&info->lock,flags); + + icount->cts = cnow.cts; + icount->dsr = cnow.dsr; + icount->rng = cnow.rng; + icount->dcd = cnow.dcd; + icount->rx = cnow.rx; + icount->tx = cnow.tx; + icount->frame = cnow.frame; + icount->overrun = cnow.overrun; + icount->parity = cnow.parity; + icount->brk = cnow.brk; + icount->buf_overrun = cnow.buf_overrun; + + return 0; +} + +/* + * /proc fs routines.... + */ + +static inline void line_info(struct seq_file *m, SLMP_INFO *info) +{ + char stat_buf[30]; + unsigned long flags; + + seq_printf(m, "%s: SCABase=%08x Mem=%08X StatusControl=%08x LCR=%08X\n" + "\tIRQ=%d MaxFrameSize=%u\n", + info->device_name, + info->phys_sca_base, + info->phys_memory_base, + info->phys_statctrl_base, + info->phys_lcr_base, + info->irq_level, + info->max_frame_size ); + + /* output current serial signal states */ + spin_lock_irqsave(&info->lock,flags); + get_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + + stat_buf[0] = 0; + stat_buf[1] = 0; + if (info->serial_signals & SerialSignal_RTS) + strcat(stat_buf, "|RTS"); + if (info->serial_signals & SerialSignal_CTS) + strcat(stat_buf, "|CTS"); + if (info->serial_signals & SerialSignal_DTR) + strcat(stat_buf, "|DTR"); + if (info->serial_signals & SerialSignal_DSR) + strcat(stat_buf, "|DSR"); + if (info->serial_signals & SerialSignal_DCD) + strcat(stat_buf, "|CD"); + if (info->serial_signals & SerialSignal_RI) + strcat(stat_buf, "|RI"); + + if (info->params.mode == MGSL_MODE_HDLC) { + seq_printf(m, "\tHDLC txok:%d rxok:%d", + info->icount.txok, info->icount.rxok); + if (info->icount.txunder) + seq_printf(m, " txunder:%d", info->icount.txunder); + if (info->icount.txabort) + seq_printf(m, " txabort:%d", info->icount.txabort); + if (info->icount.rxshort) + seq_printf(m, " rxshort:%d", info->icount.rxshort); + if (info->icount.rxlong) + seq_printf(m, " rxlong:%d", info->icount.rxlong); + if (info->icount.rxover) + seq_printf(m, " rxover:%d", info->icount.rxover); + if (info->icount.rxcrc) + seq_printf(m, " rxlong:%d", info->icount.rxcrc); + } else { + seq_printf(m, "\tASYNC tx:%d rx:%d", + info->icount.tx, info->icount.rx); + if (info->icount.frame) + seq_printf(m, " fe:%d", info->icount.frame); + if (info->icount.parity) + seq_printf(m, " pe:%d", info->icount.parity); + if (info->icount.brk) + seq_printf(m, " brk:%d", info->icount.brk); + if (info->icount.overrun) + seq_printf(m, " oe:%d", info->icount.overrun); + } + + /* Append serial signal status to end */ + seq_printf(m, " %s\n", stat_buf+1); + + seq_printf(m, "\ttxactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", + info->tx_active,info->bh_requested,info->bh_running, + info->pending_bh); +} + +/* Called to print information about devices + */ +static int synclinkmp_proc_show(struct seq_file *m, void *v) +{ + SLMP_INFO *info; + + seq_printf(m, "synclinkmp driver:%s\n", driver_version); + + info = synclinkmp_device_list; + while( info ) { + line_info(m, info); + info = info->next_device; + } + return 0; +} + +static int synclinkmp_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, synclinkmp_proc_show, NULL); +} + +static const struct file_operations synclinkmp_proc_fops = { + .owner = THIS_MODULE, + .open = synclinkmp_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* Return the count of bytes in transmit buffer + */ +static int chars_in_buffer(struct tty_struct *tty) +{ + SLMP_INFO *info = tty->driver_data; + + if (sanity_check(info, tty->name, "chars_in_buffer")) + return 0; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s chars_in_buffer()=%d\n", + __FILE__, __LINE__, info->device_name, info->tx_count); + + return info->tx_count; +} + +/* Signal remote device to throttle send data (our receive data) + */ +static void throttle(struct tty_struct * tty) +{ + SLMP_INFO *info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s throttle() entry\n", + __FILE__,__LINE__, info->device_name ); + + if (sanity_check(info, tty->name, "throttle")) + return; + + if (I_IXOFF(tty)) + send_xchar(tty, STOP_CHAR(tty)); + + if (tty->termios->c_cflag & CRTSCTS) { + spin_lock_irqsave(&info->lock,flags); + info->serial_signals &= ~SerialSignal_RTS; + set_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + } +} + +/* Signal remote device to stop throttling send data (our receive data) + */ +static void unthrottle(struct tty_struct * tty) +{ + SLMP_INFO *info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s unthrottle() entry\n", + __FILE__,__LINE__, info->device_name ); + + if (sanity_check(info, tty->name, "unthrottle")) + return; + + if (I_IXOFF(tty)) { + if (info->x_char) + info->x_char = 0; + else + send_xchar(tty, START_CHAR(tty)); + } + + if (tty->termios->c_cflag & CRTSCTS) { + spin_lock_irqsave(&info->lock,flags); + info->serial_signals |= SerialSignal_RTS; + set_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + } +} + +/* set or clear transmit break condition + * break_state -1=set break condition, 0=clear + */ +static int set_break(struct tty_struct *tty, int break_state) +{ + unsigned char RegValue; + SLMP_INFO * info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s set_break(%d)\n", + __FILE__,__LINE__, info->device_name, break_state); + + if (sanity_check(info, tty->name, "set_break")) + return -EINVAL; + + spin_lock_irqsave(&info->lock,flags); + RegValue = read_reg(info, CTL); + if (break_state == -1) + RegValue |= BIT3; + else + RegValue &= ~BIT3; + write_reg(info, CTL, RegValue); + spin_unlock_irqrestore(&info->lock,flags); + return 0; +} + +#if SYNCLINK_GENERIC_HDLC + +/** + * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) + * set encoding and frame check sequence (FCS) options + * + * dev pointer to network device structure + * encoding serial encoding setting + * parity FCS setting + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity) +{ + SLMP_INFO *info = dev_to_port(dev); + unsigned char new_encoding; + unsigned short new_crctype; + + /* return error if TTY interface open */ + if (info->port.count) + return -EBUSY; + + switch (encoding) + { + case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; + case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; + case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; + case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; + case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; + default: return -EINVAL; + } + + switch (parity) + { + case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; + case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; + case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; + default: return -EINVAL; + } + + info->params.encoding = new_encoding; + info->params.crc_type = new_crctype; + + /* if network interface up, reprogram hardware */ + if (info->netcount) + program_hw(info); + + return 0; +} + +/** + * called by generic HDLC layer to send frame + * + * skb socket buffer containing HDLC frame + * dev pointer to network device structure + */ +static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + SLMP_INFO *info = dev_to_port(dev); + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name); + + /* stop sending until this frame completes */ + netif_stop_queue(dev); + + /* copy data to device buffers */ + info->tx_count = skb->len; + tx_load_dma_buffer(info, skb->data, skb->len); + + /* update network statistics */ + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + /* done with socket buffer, so free it */ + dev_kfree_skb(skb); + + /* save start time for transmit timeout detection */ + dev->trans_start = jiffies; + + /* start hardware transmitter if necessary */ + spin_lock_irqsave(&info->lock,flags); + if (!info->tx_active) + tx_start(info); + spin_unlock_irqrestore(&info->lock,flags); + + return NETDEV_TX_OK; +} + +/** + * called by network layer when interface enabled + * claim resources and initialize hardware + * + * dev pointer to network device structure + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_open(struct net_device *dev) +{ + SLMP_INFO *info = dev_to_port(dev); + int rc; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name); + + /* generic HDLC layer open processing */ + if ((rc = hdlc_open(dev))) + return rc; + + /* arbitrate between network and tty opens */ + spin_lock_irqsave(&info->netlock, flags); + if (info->port.count != 0 || info->netcount != 0) { + printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); + spin_unlock_irqrestore(&info->netlock, flags); + return -EBUSY; + } + info->netcount=1; + spin_unlock_irqrestore(&info->netlock, flags); + + /* claim resources and init adapter */ + if ((rc = startup(info)) != 0) { + spin_lock_irqsave(&info->netlock, flags); + info->netcount=0; + spin_unlock_irqrestore(&info->netlock, flags); + return rc; + } + + /* assert DTR and RTS, apply hardware settings */ + info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; + program_hw(info); + + /* enable network layer transmit */ + dev->trans_start = jiffies; + netif_start_queue(dev); + + /* inform generic HDLC layer of current DCD status */ + spin_lock_irqsave(&info->lock, flags); + get_signals(info); + spin_unlock_irqrestore(&info->lock, flags); + if (info->serial_signals & SerialSignal_DCD) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + return 0; +} + +/** + * called by network layer when interface is disabled + * shutdown hardware and release resources + * + * dev pointer to network device structure + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_close(struct net_device *dev) +{ + SLMP_INFO *info = dev_to_port(dev); + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name); + + netif_stop_queue(dev); + + /* shutdown adapter and release resources */ + shutdown(info); + + hdlc_close(dev); + + spin_lock_irqsave(&info->netlock, flags); + info->netcount=0; + spin_unlock_irqrestore(&info->netlock, flags); + + return 0; +} + +/** + * called by network layer to process IOCTL call to network device + * + * dev pointer to network device structure + * ifr pointer to network interface request structure + * cmd IOCTL command code + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings new_line; + sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; + SLMP_INFO *info = dev_to_port(dev); + unsigned int flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); + + /* return error if TTY interface open */ + if (info->port.count) + return -EBUSY; + + if (cmd != SIOCWANDEV) + return hdlc_ioctl(dev, ifr, cmd); + + switch(ifr->ifr_settings.type) { + case IF_GET_IFACE: /* return current sync_serial_settings */ + + ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; + if (ifr->ifr_settings.size < size) { + ifr->ifr_settings.size = size; /* data size wanted */ + return -ENOBUFS; + } + + flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | + HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | + HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | + HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); + + switch (flags){ + case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; + case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; + case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; + case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; + default: new_line.clock_type = CLOCK_DEFAULT; + } + + new_line.clock_rate = info->params.clock_speed; + new_line.loopback = info->params.loopback ? 1:0; + + if (copy_to_user(line, &new_line, size)) + return -EFAULT; + return 0; + + case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ + + if(!capable(CAP_NET_ADMIN)) + return -EPERM; + if (copy_from_user(&new_line, line, size)) + return -EFAULT; + + switch (new_line.clock_type) + { + case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; + case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; + case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; + case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; + case CLOCK_DEFAULT: flags = info->params.flags & + (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | + HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | + HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | + HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; + default: return -EINVAL; + } + + if (new_line.loopback != 0 && new_line.loopback != 1) + return -EINVAL; + + info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | + HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | + HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | + HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); + info->params.flags |= flags; + + info->params.loopback = new_line.loopback; + + if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) + info->params.clock_speed = new_line.clock_rate; + else + info->params.clock_speed = 0; + + /* if network interface up, reprogram hardware */ + if (info->netcount) + program_hw(info); + return 0; + + default: + return hdlc_ioctl(dev, ifr, cmd); + } +} + +/** + * called by network layer when transmit timeout is detected + * + * dev pointer to network device structure + */ +static void hdlcdev_tx_timeout(struct net_device *dev) +{ + SLMP_INFO *info = dev_to_port(dev); + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("hdlcdev_tx_timeout(%s)\n",dev->name); + + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + + spin_lock_irqsave(&info->lock,flags); + tx_stop(info); + spin_unlock_irqrestore(&info->lock,flags); + + netif_wake_queue(dev); +} + +/** + * called by device driver when transmit completes + * reenable network layer transmit if stopped + * + * info pointer to device instance information + */ +static void hdlcdev_tx_done(SLMP_INFO *info) +{ + if (netif_queue_stopped(info->netdev)) + netif_wake_queue(info->netdev); +} + +/** + * called by device driver when frame received + * pass frame to network layer + * + * info pointer to device instance information + * buf pointer to buffer contianing frame data + * size count of data bytes in buf + */ +static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size) +{ + struct sk_buff *skb = dev_alloc_skb(size); + struct net_device *dev = info->netdev; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("hdlcdev_rx(%s)\n",dev->name); + + if (skb == NULL) { + printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", + dev->name); + dev->stats.rx_dropped++; + return; + } + + memcpy(skb_put(skb, size), buf, size); + + skb->protocol = hdlc_type_trans(skb, dev); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += size; + + netif_rx(skb); +} + +static const struct net_device_ops hdlcdev_ops = { + .ndo_open = hdlcdev_open, + .ndo_stop = hdlcdev_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = hdlcdev_ioctl, + .ndo_tx_timeout = hdlcdev_tx_timeout, +}; + +/** + * called by device driver when adding device instance + * do generic HDLC initialization + * + * info pointer to device instance information + * + * returns 0 if success, otherwise error code + */ +static int hdlcdev_init(SLMP_INFO *info) +{ + int rc; + struct net_device *dev; + hdlc_device *hdlc; + + /* allocate and initialize network and HDLC layer objects */ + + if (!(dev = alloc_hdlcdev(info))) { + printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__); + return -ENOMEM; + } + + /* for network layer reporting purposes only */ + dev->mem_start = info->phys_sca_base; + dev->mem_end = info->phys_sca_base + SCA_BASE_SIZE - 1; + dev->irq = info->irq_level; + + /* network layer callbacks and settings */ + dev->netdev_ops = &hdlcdev_ops; + dev->watchdog_timeo = 10 * HZ; + dev->tx_queue_len = 50; + + /* generic HDLC layer callbacks and settings */ + hdlc = dev_to_hdlc(dev); + hdlc->attach = hdlcdev_attach; + hdlc->xmit = hdlcdev_xmit; + + /* register objects with HDLC layer */ + if ((rc = register_hdlc_device(dev))) { + printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__); + free_netdev(dev); + return rc; + } + + info->netdev = dev; + return 0; +} + +/** + * called by device driver when removing device instance + * do generic HDLC cleanup + * + * info pointer to device instance information + */ +static void hdlcdev_exit(SLMP_INFO *info) +{ + unregister_hdlc_device(info->netdev); + free_netdev(info->netdev); + info->netdev = NULL; +} + +#endif /* CONFIG_HDLC */ + + +/* Return next bottom half action to perform. + * Return Value: BH action code or 0 if nothing to do. + */ +static int bh_action(SLMP_INFO *info) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&info->lock,flags); + + if (info->pending_bh & BH_RECEIVE) { + info->pending_bh &= ~BH_RECEIVE; + rc = BH_RECEIVE; + } else if (info->pending_bh & BH_TRANSMIT) { + info->pending_bh &= ~BH_TRANSMIT; + rc = BH_TRANSMIT; + } else if (info->pending_bh & BH_STATUS) { + info->pending_bh &= ~BH_STATUS; + rc = BH_STATUS; + } + + if (!rc) { + /* Mark BH routine as complete */ + info->bh_running = false; + info->bh_requested = false; + } + + spin_unlock_irqrestore(&info->lock,flags); + + return rc; +} + +/* Perform bottom half processing of work items queued by ISR. + */ +static void bh_handler(struct work_struct *work) +{ + SLMP_INFO *info = container_of(work, SLMP_INFO, task); + int action; + + if (!info) + return; + + if ( debug_level >= DEBUG_LEVEL_BH ) + printk( "%s(%d):%s bh_handler() entry\n", + __FILE__,__LINE__,info->device_name); + + info->bh_running = true; + + while((action = bh_action(info)) != 0) { + + /* Process work item */ + if ( debug_level >= DEBUG_LEVEL_BH ) + printk( "%s(%d):%s bh_handler() work item action=%d\n", + __FILE__,__LINE__,info->device_name, action); + + switch (action) { + + case BH_RECEIVE: + bh_receive(info); + break; + case BH_TRANSMIT: + bh_transmit(info); + break; + case BH_STATUS: + bh_status(info); + break; + default: + /* unknown work item ID */ + printk("%s(%d):%s Unknown work item ID=%08X!\n", + __FILE__,__LINE__,info->device_name,action); + break; + } + } + + if ( debug_level >= DEBUG_LEVEL_BH ) + printk( "%s(%d):%s bh_handler() exit\n", + __FILE__,__LINE__,info->device_name); +} + +static void bh_receive(SLMP_INFO *info) +{ + if ( debug_level >= DEBUG_LEVEL_BH ) + printk( "%s(%d):%s bh_receive()\n", + __FILE__,__LINE__,info->device_name); + + while( rx_get_frame(info) ); +} + +static void bh_transmit(SLMP_INFO *info) +{ + struct tty_struct *tty = info->port.tty; + + if ( debug_level >= DEBUG_LEVEL_BH ) + printk( "%s(%d):%s bh_transmit() entry\n", + __FILE__,__LINE__,info->device_name); + + if (tty) + tty_wakeup(tty); +} + +static void bh_status(SLMP_INFO *info) +{ + if ( debug_level >= DEBUG_LEVEL_BH ) + printk( "%s(%d):%s bh_status() entry\n", + __FILE__,__LINE__,info->device_name); + + info->ri_chkcount = 0; + info->dsr_chkcount = 0; + info->dcd_chkcount = 0; + info->cts_chkcount = 0; +} + +static void isr_timer(SLMP_INFO * info) +{ + unsigned char timer = (info->port_num & 1) ? TIMER2 : TIMER0; + + /* IER2<7..4> = timer<3..0> interrupt enables (0=disabled) */ + write_reg(info, IER2, 0); + + /* TMCS, Timer Control/Status Register + * + * 07 CMF, Compare match flag (read only) 1=match + * 06 ECMI, CMF Interrupt Enable: 0=disabled + * 05 Reserved, must be 0 + * 04 TME, Timer Enable + * 03..00 Reserved, must be 0 + * + * 0000 0000 + */ + write_reg(info, (unsigned char)(timer + TMCS), 0); + + info->irq_occurred = true; + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):%s isr_timer()\n", + __FILE__,__LINE__,info->device_name); +} + +static void isr_rxint(SLMP_INFO * info) +{ + struct tty_struct *tty = info->port.tty; + struct mgsl_icount *icount = &info->icount; + unsigned char status = read_reg(info, SR1) & info->ie1_value & (FLGD + IDLD + CDCD + BRKD); + unsigned char status2 = read_reg(info, SR2) & info->ie2_value & OVRN; + + /* clear status bits */ + if (status) + write_reg(info, SR1, status); + + if (status2) + write_reg(info, SR2, status2); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):%s isr_rxint status=%02X %02x\n", + __FILE__,__LINE__,info->device_name,status,status2); + + if (info->params.mode == MGSL_MODE_ASYNC) { + if (status & BRKD) { + icount->brk++; + + /* process break detection if tty control + * is not set to ignore it + */ + if ( tty ) { + if (!(status & info->ignore_status_mask1)) { + if (info->read_status_mask1 & BRKD) { + tty_insert_flip_char(tty, 0, TTY_BREAK); + if (info->port.flags & ASYNC_SAK) + do_SAK(tty); + } + } + } + } + } + else { + if (status & (FLGD|IDLD)) { + if (status & FLGD) + info->icount.exithunt++; + else if (status & IDLD) + info->icount.rxidle++; + wake_up_interruptible(&info->event_wait_q); + } + } + + if (status & CDCD) { + /* simulate a common modem status change interrupt + * for our handler + */ + get_signals( info ); + isr_io_pin(info, + MISCSTATUS_DCD_LATCHED|(info->serial_signals&SerialSignal_DCD)); + } +} + +/* + * handle async rx data interrupts + */ +static void isr_rxrdy(SLMP_INFO * info) +{ + u16 status; + unsigned char DataByte; + struct tty_struct *tty = info->port.tty; + struct mgsl_icount *icount = &info->icount; + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):%s isr_rxrdy\n", + __FILE__,__LINE__,info->device_name); + + while((status = read_reg(info,CST0)) & BIT0) + { + int flag = 0; + bool over = false; + DataByte = read_reg(info,TRB); + + icount->rx++; + + if ( status & (PE + FRME + OVRN) ) { + printk("%s(%d):%s rxerr=%04X\n", + __FILE__,__LINE__,info->device_name,status); + + /* update error statistics */ + if (status & PE) + icount->parity++; + else if (status & FRME) + icount->frame++; + else if (status & OVRN) + icount->overrun++; + + /* discard char if tty control flags say so */ + if (status & info->ignore_status_mask2) + continue; + + status &= info->read_status_mask2; + + if ( tty ) { + if (status & PE) + flag = TTY_PARITY; + else if (status & FRME) + flag = TTY_FRAME; + if (status & OVRN) { + /* Overrun is special, since it's + * reported immediately, and doesn't + * affect the current character + */ + over = true; + } + } + } /* end of if (error) */ + + if ( tty ) { + tty_insert_flip_char(tty, DataByte, flag); + if (over) + tty_insert_flip_char(tty, 0, TTY_OVERRUN); + } + } + + if ( debug_level >= DEBUG_LEVEL_ISR ) { + printk("%s(%d):%s rx=%d brk=%d parity=%d frame=%d overrun=%d\n", + __FILE__,__LINE__,info->device_name, + icount->rx,icount->brk,icount->parity, + icount->frame,icount->overrun); + } + + if ( tty ) + tty_flip_buffer_push(tty); +} + +static void isr_txeom(SLMP_INFO * info, unsigned char status) +{ + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):%s isr_txeom status=%02x\n", + __FILE__,__LINE__,info->device_name,status); + + write_reg(info, TXDMA + DIR, 0x00); /* disable Tx DMA IRQs */ + write_reg(info, TXDMA + DSR, 0xc0); /* clear IRQs and disable DMA */ + write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */ + + if (status & UDRN) { + write_reg(info, CMD, TXRESET); + write_reg(info, CMD, TXENABLE); + } else + write_reg(info, CMD, TXBUFCLR); + + /* disable and clear tx interrupts */ + info->ie0_value &= ~TXRDYE; + info->ie1_value &= ~(IDLE + UDRN); + write_reg16(info, IE0, (unsigned short)((info->ie1_value << 8) + info->ie0_value)); + write_reg(info, SR1, (unsigned char)(UDRN + IDLE)); + + if ( info->tx_active ) { + if (info->params.mode != MGSL_MODE_ASYNC) { + if (status & UDRN) + info->icount.txunder++; + else if (status & IDLE) + info->icount.txok++; + } + + info->tx_active = false; + info->tx_count = info->tx_put = info->tx_get = 0; + + del_timer(&info->tx_timer); + + if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done ) { + info->serial_signals &= ~SerialSignal_RTS; + info->drop_rts_on_tx_done = false; + set_signals(info); + } + +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) + hdlcdev_tx_done(info); + else +#endif + { + if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) { + tx_stop(info); + return; + } + info->pending_bh |= BH_TRANSMIT; + } + } +} + + +/* + * handle tx status interrupts + */ +static void isr_txint(SLMP_INFO * info) +{ + unsigned char status = read_reg(info, SR1) & info->ie1_value & (UDRN + IDLE + CCTS); + + /* clear status bits */ + write_reg(info, SR1, status); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):%s isr_txint status=%02x\n", + __FILE__,__LINE__,info->device_name,status); + + if (status & (UDRN + IDLE)) + isr_txeom(info, status); + + if (status & CCTS) { + /* simulate a common modem status change interrupt + * for our handler + */ + get_signals( info ); + isr_io_pin(info, + MISCSTATUS_CTS_LATCHED|(info->serial_signals&SerialSignal_CTS)); + + } +} + +/* + * handle async tx data interrupts + */ +static void isr_txrdy(SLMP_INFO * info) +{ + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):%s isr_txrdy() tx_count=%d\n", + __FILE__,__LINE__,info->device_name,info->tx_count); + + if (info->params.mode != MGSL_MODE_ASYNC) { + /* disable TXRDY IRQ, enable IDLE IRQ */ + info->ie0_value &= ~TXRDYE; + info->ie1_value |= IDLE; + write_reg16(info, IE0, (unsigned short)((info->ie1_value << 8) + info->ie0_value)); + return; + } + + if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) { + tx_stop(info); + return; + } + + if ( info->tx_count ) + tx_load_fifo( info ); + else { + info->tx_active = false; + info->ie0_value &= ~TXRDYE; + write_reg(info, IE0, info->ie0_value); + } + + if (info->tx_count < WAKEUP_CHARS) + info->pending_bh |= BH_TRANSMIT; +} + +static void isr_rxdmaok(SLMP_INFO * info) +{ + /* BIT7 = EOT (end of transfer) + * BIT6 = EOM (end of message/frame) + */ + unsigned char status = read_reg(info,RXDMA + DSR) & 0xc0; + + /* clear IRQ (BIT0 must be 1 to prevent clearing DE bit) */ + write_reg(info, RXDMA + DSR, (unsigned char)(status | 1)); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):%s isr_rxdmaok(), status=%02x\n", + __FILE__,__LINE__,info->device_name,status); + + info->pending_bh |= BH_RECEIVE; +} + +static void isr_rxdmaerror(SLMP_INFO * info) +{ + /* BIT5 = BOF (buffer overflow) + * BIT4 = COF (counter overflow) + */ + unsigned char status = read_reg(info,RXDMA + DSR) & 0x30; + + /* clear IRQ (BIT0 must be 1 to prevent clearing DE bit) */ + write_reg(info, RXDMA + DSR, (unsigned char)(status | 1)); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):%s isr_rxdmaerror(), status=%02x\n", + __FILE__,__LINE__,info->device_name,status); + + info->rx_overflow = true; + info->pending_bh |= BH_RECEIVE; +} + +static void isr_txdmaok(SLMP_INFO * info) +{ + unsigned char status_reg1 = read_reg(info, SR1); + + write_reg(info, TXDMA + DIR, 0x00); /* disable Tx DMA IRQs */ + write_reg(info, TXDMA + DSR, 0xc0); /* clear IRQs and disable DMA */ + write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */ + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):%s isr_txdmaok(), status=%02x\n", + __FILE__,__LINE__,info->device_name,status_reg1); + + /* program TXRDY as FIFO empty flag, enable TXRDY IRQ */ + write_reg16(info, TRC0, 0); + info->ie0_value |= TXRDYE; + write_reg(info, IE0, info->ie0_value); +} + +static void isr_txdmaerror(SLMP_INFO * info) +{ + /* BIT5 = BOF (buffer overflow) + * BIT4 = COF (counter overflow) + */ + unsigned char status = read_reg(info,TXDMA + DSR) & 0x30; + + /* clear IRQ (BIT0 must be 1 to prevent clearing DE bit) */ + write_reg(info, TXDMA + DSR, (unsigned char)(status | 1)); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):%s isr_txdmaerror(), status=%02x\n", + __FILE__,__LINE__,info->device_name,status); +} + +/* handle input serial signal changes + */ +static void isr_io_pin( SLMP_INFO *info, u16 status ) +{ + struct mgsl_icount *icount; + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):isr_io_pin status=%04X\n", + __FILE__,__LINE__,status); + + if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED | + MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) { + icount = &info->icount; + /* update input line counters */ + if (status & MISCSTATUS_RI_LATCHED) { + icount->rng++; + if ( status & SerialSignal_RI ) + info->input_signal_events.ri_up++; + else + info->input_signal_events.ri_down++; + } + if (status & MISCSTATUS_DSR_LATCHED) { + icount->dsr++; + if ( status & SerialSignal_DSR ) + info->input_signal_events.dsr_up++; + else + info->input_signal_events.dsr_down++; + } + if (status & MISCSTATUS_DCD_LATCHED) { + if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) { + info->ie1_value &= ~CDCD; + write_reg(info, IE1, info->ie1_value); + } + icount->dcd++; + if (status & SerialSignal_DCD) { + info->input_signal_events.dcd_up++; + } else + info->input_signal_events.dcd_down++; +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) { + if (status & SerialSignal_DCD) + netif_carrier_on(info->netdev); + else + netif_carrier_off(info->netdev); + } +#endif + } + if (status & MISCSTATUS_CTS_LATCHED) + { + if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) { + info->ie1_value &= ~CCTS; + write_reg(info, IE1, info->ie1_value); + } + icount->cts++; + if ( status & SerialSignal_CTS ) + info->input_signal_events.cts_up++; + else + info->input_signal_events.cts_down++; + } + wake_up_interruptible(&info->status_event_wait_q); + wake_up_interruptible(&info->event_wait_q); + + if ( (info->port.flags & ASYNC_CHECK_CD) && + (status & MISCSTATUS_DCD_LATCHED) ) { + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s CD now %s...", info->device_name, + (status & SerialSignal_DCD) ? "on" : "off"); + if (status & SerialSignal_DCD) + wake_up_interruptible(&info->port.open_wait); + else { + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("doing serial hangup..."); + if (info->port.tty) + tty_hangup(info->port.tty); + } + } + + if ( (info->port.flags & ASYNC_CTS_FLOW) && + (status & MISCSTATUS_CTS_LATCHED) ) { + if ( info->port.tty ) { + if (info->port.tty->hw_stopped) { + if (status & SerialSignal_CTS) { + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("CTS tx start..."); + info->port.tty->hw_stopped = 0; + tx_start(info); + info->pending_bh |= BH_TRANSMIT; + return; + } + } else { + if (!(status & SerialSignal_CTS)) { + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("CTS tx stop..."); + info->port.tty->hw_stopped = 1; + tx_stop(info); + } + } + } + } + } + + info->pending_bh |= BH_STATUS; +} + +/* Interrupt service routine entry point. + * + * Arguments: + * irq interrupt number that caused interrupt + * dev_id device ID supplied during interrupt registration + * regs interrupted processor context + */ +static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id) +{ + SLMP_INFO *info = dev_id; + unsigned char status, status0, status1=0; + unsigned char dmastatus, dmastatus0, dmastatus1=0; + unsigned char timerstatus0, timerstatus1=0; + unsigned char shift; + unsigned int i; + unsigned short tmp; + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk(KERN_DEBUG "%s(%d): synclinkmp_interrupt(%d)entry.\n", + __FILE__, __LINE__, info->irq_level); + + spin_lock(&info->lock); + + for(;;) { + + /* get status for SCA0 (ports 0-1) */ + tmp = read_reg16(info, ISR0); /* get ISR0 and ISR1 in one read */ + status0 = (unsigned char)tmp; + dmastatus0 = (unsigned char)(tmp>>8); + timerstatus0 = read_reg(info, ISR2); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk(KERN_DEBUG "%s(%d):%s status0=%02x, dmastatus0=%02x, timerstatus0=%02x\n", + __FILE__, __LINE__, info->device_name, + status0, dmastatus0, timerstatus0); + + if (info->port_count == 4) { + /* get status for SCA1 (ports 2-3) */ + tmp = read_reg16(info->port_array[2], ISR0); + status1 = (unsigned char)tmp; + dmastatus1 = (unsigned char)(tmp>>8); + timerstatus1 = read_reg(info->port_array[2], ISR2); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):%s status1=%02x, dmastatus1=%02x, timerstatus1=%02x\n", + __FILE__,__LINE__,info->device_name, + status1,dmastatus1,timerstatus1); + } + + if (!status0 && !dmastatus0 && !timerstatus0 && + !status1 && !dmastatus1 && !timerstatus1) + break; + + for(i=0; i < info->port_count ; i++) { + if (info->port_array[i] == NULL) + continue; + if (i < 2) { + status = status0; + dmastatus = dmastatus0; + } else { + status = status1; + dmastatus = dmastatus1; + } + + shift = i & 1 ? 4 :0; + + if (status & BIT0 << shift) + isr_rxrdy(info->port_array[i]); + if (status & BIT1 << shift) + isr_txrdy(info->port_array[i]); + if (status & BIT2 << shift) + isr_rxint(info->port_array[i]); + if (status & BIT3 << shift) + isr_txint(info->port_array[i]); + + if (dmastatus & BIT0 << shift) + isr_rxdmaerror(info->port_array[i]); + if (dmastatus & BIT1 << shift) + isr_rxdmaok(info->port_array[i]); + if (dmastatus & BIT2 << shift) + isr_txdmaerror(info->port_array[i]); + if (dmastatus & BIT3 << shift) + isr_txdmaok(info->port_array[i]); + } + + if (timerstatus0 & (BIT5 | BIT4)) + isr_timer(info->port_array[0]); + if (timerstatus0 & (BIT7 | BIT6)) + isr_timer(info->port_array[1]); + if (timerstatus1 & (BIT5 | BIT4)) + isr_timer(info->port_array[2]); + if (timerstatus1 & (BIT7 | BIT6)) + isr_timer(info->port_array[3]); + } + + for(i=0; i < info->port_count ; i++) { + SLMP_INFO * port = info->port_array[i]; + + /* Request bottom half processing if there's something + * for it to do and the bh is not already running. + * + * Note: startup adapter diags require interrupts. + * do not request bottom half processing if the + * device is not open in a normal mode. + */ + if ( port && (port->port.count || port->netcount) && + port->pending_bh && !port->bh_running && + !port->bh_requested ) { + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk("%s(%d):%s queueing bh task.\n", + __FILE__,__LINE__,port->device_name); + schedule_work(&port->task); + port->bh_requested = true; + } + } + + spin_unlock(&info->lock); + + if ( debug_level >= DEBUG_LEVEL_ISR ) + printk(KERN_DEBUG "%s(%d):synclinkmp_interrupt(%d)exit.\n", + __FILE__, __LINE__, info->irq_level); + return IRQ_HANDLED; +} + +/* Initialize and start device. + */ +static int startup(SLMP_INFO * info) +{ + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk("%s(%d):%s tx_releaseup()\n",__FILE__,__LINE__,info->device_name); + + if (info->port.flags & ASYNC_INITIALIZED) + return 0; + + if (!info->tx_buf) { + info->tx_buf = kmalloc(info->max_frame_size, GFP_KERNEL); + if (!info->tx_buf) { + printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", + __FILE__,__LINE__,info->device_name); + return -ENOMEM; + } + } + + info->pending_bh = 0; + + memset(&info->icount, 0, sizeof(info->icount)); + + /* program hardware for current parameters */ + reset_port(info); + + change_params(info); + + mod_timer(&info->status_timer, jiffies + msecs_to_jiffies(10)); + + if (info->port.tty) + clear_bit(TTY_IO_ERROR, &info->port.tty->flags); + + info->port.flags |= ASYNC_INITIALIZED; + + return 0; +} + +/* Called by close() and hangup() to shutdown hardware + */ +static void shutdown(SLMP_INFO * info) +{ + unsigned long flags; + + if (!(info->port.flags & ASYNC_INITIALIZED)) + return; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s synclinkmp_shutdown()\n", + __FILE__,__LINE__, info->device_name ); + + /* clear status wait queue because status changes */ + /* can't happen after shutting down the hardware */ + wake_up_interruptible(&info->status_event_wait_q); + wake_up_interruptible(&info->event_wait_q); + + del_timer(&info->tx_timer); + del_timer(&info->status_timer); + + kfree(info->tx_buf); + info->tx_buf = NULL; + + spin_lock_irqsave(&info->lock,flags); + + reset_port(info); + + if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) { + info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS); + set_signals(info); + } + + spin_unlock_irqrestore(&info->lock,flags); + + if (info->port.tty) + set_bit(TTY_IO_ERROR, &info->port.tty->flags); + + info->port.flags &= ~ASYNC_INITIALIZED; +} + +static void program_hw(SLMP_INFO *info) +{ + unsigned long flags; + + spin_lock_irqsave(&info->lock,flags); + + rx_stop(info); + tx_stop(info); + + info->tx_count = info->tx_put = info->tx_get = 0; + + if (info->params.mode == MGSL_MODE_HDLC || info->netcount) + hdlc_mode(info); + else + async_mode(info); + + set_signals(info); + + info->dcd_chkcount = 0; + info->cts_chkcount = 0; + info->ri_chkcount = 0; + info->dsr_chkcount = 0; + + info->ie1_value |= (CDCD|CCTS); + write_reg(info, IE1, info->ie1_value); + + get_signals(info); + + if (info->netcount || (info->port.tty && info->port.tty->termios->c_cflag & CREAD) ) + rx_start(info); + + spin_unlock_irqrestore(&info->lock,flags); +} + +/* Reconfigure adapter based on new parameters + */ +static void change_params(SLMP_INFO *info) +{ + unsigned cflag; + int bits_per_char; + + if (!info->port.tty || !info->port.tty->termios) + return; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s change_params()\n", + __FILE__,__LINE__, info->device_name ); + + cflag = info->port.tty->termios->c_cflag; + + /* if B0 rate (hangup) specified then negate DTR and RTS */ + /* otherwise assert DTR and RTS */ + if (cflag & CBAUD) + info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; + else + info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); + + /* byte size and parity */ + + switch (cflag & CSIZE) { + case CS5: info->params.data_bits = 5; break; + case CS6: info->params.data_bits = 6; break; + case CS7: info->params.data_bits = 7; break; + case CS8: info->params.data_bits = 8; break; + /* Never happens, but GCC is too dumb to figure it out */ + default: info->params.data_bits = 7; break; + } + + if (cflag & CSTOPB) + info->params.stop_bits = 2; + else + info->params.stop_bits = 1; + + info->params.parity = ASYNC_PARITY_NONE; + if (cflag & PARENB) { + if (cflag & PARODD) + info->params.parity = ASYNC_PARITY_ODD; + else + info->params.parity = ASYNC_PARITY_EVEN; +#ifdef CMSPAR + if (cflag & CMSPAR) + info->params.parity = ASYNC_PARITY_SPACE; +#endif + } + + /* calculate number of jiffies to transmit a full + * FIFO (32 bytes) at specified data rate + */ + bits_per_char = info->params.data_bits + + info->params.stop_bits + 1; + + /* if port data rate is set to 460800 or less then + * allow tty settings to override, otherwise keep the + * current data rate. + */ + if (info->params.data_rate <= 460800) { + info->params.data_rate = tty_get_baud_rate(info->port.tty); + } + + if ( info->params.data_rate ) { + info->timeout = (32*HZ*bits_per_char) / + info->params.data_rate; + } + info->timeout += HZ/50; /* Add .02 seconds of slop */ + + if (cflag & CRTSCTS) + info->port.flags |= ASYNC_CTS_FLOW; + else + info->port.flags &= ~ASYNC_CTS_FLOW; + + if (cflag & CLOCAL) + info->port.flags &= ~ASYNC_CHECK_CD; + else + info->port.flags |= ASYNC_CHECK_CD; + + /* process tty input control flags */ + + info->read_status_mask2 = OVRN; + if (I_INPCK(info->port.tty)) + info->read_status_mask2 |= PE | FRME; + if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) + info->read_status_mask1 |= BRKD; + if (I_IGNPAR(info->port.tty)) + info->ignore_status_mask2 |= PE | FRME; + if (I_IGNBRK(info->port.tty)) { + info->ignore_status_mask1 |= BRKD; + /* If ignoring parity and break indicators, ignore + * overruns too. (For real raw support). + */ + if (I_IGNPAR(info->port.tty)) + info->ignore_status_mask2 |= OVRN; + } + + program_hw(info); +} + +static int get_stats(SLMP_INFO * info, struct mgsl_icount __user *user_icount) +{ + int err; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s get_params()\n", + __FILE__,__LINE__, info->device_name); + + if (!user_icount) { + memset(&info->icount, 0, sizeof(info->icount)); + } else { + mutex_lock(&info->port.mutex); + COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); + mutex_unlock(&info->port.mutex); + if (err) + return -EFAULT; + } + + return 0; +} + +static int get_params(SLMP_INFO * info, MGSL_PARAMS __user *user_params) +{ + int err; + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s get_params()\n", + __FILE__,__LINE__, info->device_name); + + mutex_lock(&info->port.mutex); + COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS)); + mutex_unlock(&info->port.mutex); + if (err) { + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):%s get_params() user buffer copy failed\n", + __FILE__,__LINE__,info->device_name); + return -EFAULT; + } + + return 0; +} + +static int set_params(SLMP_INFO * info, MGSL_PARAMS __user *new_params) +{ + unsigned long flags; + MGSL_PARAMS tmp_params; + int err; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s set_params\n", + __FILE__,__LINE__,info->device_name ); + COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); + if (err) { + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):%s set_params() user buffer copy failed\n", + __FILE__,__LINE__,info->device_name); + return -EFAULT; + } + + mutex_lock(&info->port.mutex); + spin_lock_irqsave(&info->lock,flags); + memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); + spin_unlock_irqrestore(&info->lock,flags); + + change_params(info); + mutex_unlock(&info->port.mutex); + + return 0; +} + +static int get_txidle(SLMP_INFO * info, int __user *idle_mode) +{ + int err; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s get_txidle()=%d\n", + __FILE__,__LINE__, info->device_name, info->idle_mode); + + COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int)); + if (err) { + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):%s get_txidle() user buffer copy failed\n", + __FILE__,__LINE__,info->device_name); + return -EFAULT; + } + + return 0; +} + +static int set_txidle(SLMP_INFO * info, int idle_mode) +{ + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s set_txidle(%d)\n", + __FILE__,__LINE__,info->device_name, idle_mode ); + + spin_lock_irqsave(&info->lock,flags); + info->idle_mode = idle_mode; + tx_set_idle( info ); + spin_unlock_irqrestore(&info->lock,flags); + return 0; +} + +static int tx_enable(SLMP_INFO * info, int enable) +{ + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s tx_enable(%d)\n", + __FILE__,__LINE__,info->device_name, enable); + + spin_lock_irqsave(&info->lock,flags); + if ( enable ) { + if ( !info->tx_enabled ) { + tx_start(info); + } + } else { + if ( info->tx_enabled ) + tx_stop(info); + } + spin_unlock_irqrestore(&info->lock,flags); + return 0; +} + +/* abort send HDLC frame + */ +static int tx_abort(SLMP_INFO * info) +{ + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s tx_abort()\n", + __FILE__,__LINE__,info->device_name); + + spin_lock_irqsave(&info->lock,flags); + if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC ) { + info->ie1_value &= ~UDRN; + info->ie1_value |= IDLE; + write_reg(info, IE1, info->ie1_value); /* disable tx status interrupts */ + write_reg(info, SR1, (unsigned char)(IDLE + UDRN)); /* clear pending */ + + write_reg(info, TXDMA + DSR, 0); /* disable DMA channel */ + write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */ + + write_reg(info, CMD, TXABORT); + } + spin_unlock_irqrestore(&info->lock,flags); + return 0; +} + +static int rx_enable(SLMP_INFO * info, int enable) +{ + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s rx_enable(%d)\n", + __FILE__,__LINE__,info->device_name,enable); + + spin_lock_irqsave(&info->lock,flags); + if ( enable ) { + if ( !info->rx_enabled ) + rx_start(info); + } else { + if ( info->rx_enabled ) + rx_stop(info); + } + spin_unlock_irqrestore(&info->lock,flags); + return 0; +} + +/* wait for specified event to occur + */ +static int wait_mgsl_event(SLMP_INFO * info, int __user *mask_ptr) +{ + unsigned long flags; + int s; + int rc=0; + struct mgsl_icount cprev, cnow; + int events; + int mask; + struct _input_signal_events oldsigs, newsigs; + DECLARE_WAITQUEUE(wait, current); + + COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int)); + if (rc) { + return -EFAULT; + } + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s wait_mgsl_event(%d)\n", + __FILE__,__LINE__,info->device_name,mask); + + spin_lock_irqsave(&info->lock,flags); + + /* return immediately if state matches requested events */ + get_signals(info); + s = info->serial_signals; + + events = mask & + ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + + ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + + ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + + ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); + if (events) { + spin_unlock_irqrestore(&info->lock,flags); + goto exit; + } + + /* save current irq counts */ + cprev = info->icount; + oldsigs = info->input_signal_events; + + /* enable hunt and idle irqs if needed */ + if (mask & (MgslEvent_ExitHuntMode+MgslEvent_IdleReceived)) { + unsigned char oldval = info->ie1_value; + unsigned char newval = oldval + + (mask & MgslEvent_ExitHuntMode ? FLGD:0) + + (mask & MgslEvent_IdleReceived ? IDLD:0); + if ( oldval != newval ) { + info->ie1_value = newval; + write_reg(info, IE1, info->ie1_value); + } + } + + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&info->event_wait_q, &wait); + + spin_unlock_irqrestore(&info->lock,flags); + + for(;;) { + schedule(); + if (signal_pending(current)) { + rc = -ERESTARTSYS; + break; + } + + /* get current irq counts */ + spin_lock_irqsave(&info->lock,flags); + cnow = info->icount; + newsigs = info->input_signal_events; + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&info->lock,flags); + + /* if no change, wait aborted for some reason */ + if (newsigs.dsr_up == oldsigs.dsr_up && + newsigs.dsr_down == oldsigs.dsr_down && + newsigs.dcd_up == oldsigs.dcd_up && + newsigs.dcd_down == oldsigs.dcd_down && + newsigs.cts_up == oldsigs.cts_up && + newsigs.cts_down == oldsigs.cts_down && + newsigs.ri_up == oldsigs.ri_up && + newsigs.ri_down == oldsigs.ri_down && + cnow.exithunt == cprev.exithunt && + cnow.rxidle == cprev.rxidle) { + rc = -EIO; + break; + } + + events = mask & + ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + + (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + + (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + + (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + + (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + + (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + + (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + + (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + + (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + + (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); + if (events) + break; + + cprev = cnow; + oldsigs = newsigs; + } + + remove_wait_queue(&info->event_wait_q, &wait); + set_current_state(TASK_RUNNING); + + + if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { + spin_lock_irqsave(&info->lock,flags); + if (!waitqueue_active(&info->event_wait_q)) { + /* disable enable exit hunt mode/idle rcvd IRQs */ + info->ie1_value &= ~(FLGD|IDLD); + write_reg(info, IE1, info->ie1_value); + } + spin_unlock_irqrestore(&info->lock,flags); + } +exit: + if ( rc == 0 ) + PUT_USER(rc, events, mask_ptr); + + return rc; +} + +static int modem_input_wait(SLMP_INFO *info,int arg) +{ + unsigned long flags; + int rc; + struct mgsl_icount cprev, cnow; + DECLARE_WAITQUEUE(wait, current); + + /* save current irq counts */ + spin_lock_irqsave(&info->lock,flags); + cprev = info->icount; + add_wait_queue(&info->status_event_wait_q, &wait); + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&info->lock,flags); + + for(;;) { + schedule(); + if (signal_pending(current)) { + rc = -ERESTARTSYS; + break; + } + + /* get new irq counts */ + spin_lock_irqsave(&info->lock,flags); + cnow = info->icount; + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&info->lock,flags); + + /* if no change, wait aborted for some reason */ + if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && + cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { + rc = -EIO; + break; + } + + /* check for change in caller specified modem input */ + if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || + (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || + (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || + (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { + rc = 0; + break; + } + + cprev = cnow; + } + remove_wait_queue(&info->status_event_wait_q, &wait); + set_current_state(TASK_RUNNING); + return rc; +} + +/* return the state of the serial control and status signals + */ +static int tiocmget(struct tty_struct *tty) +{ + SLMP_INFO *info = tty->driver_data; + unsigned int result; + unsigned long flags; + + spin_lock_irqsave(&info->lock,flags); + get_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + + result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) + + ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) + + ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) + + ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) + + ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) + + ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0); + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s tiocmget() value=%08X\n", + __FILE__,__LINE__, info->device_name, result ); + return result; +} + +/* set modem control signals (DTR/RTS) + */ +static int tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear) +{ + SLMP_INFO *info = tty->driver_data; + unsigned long flags; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s tiocmset(%x,%x)\n", + __FILE__,__LINE__,info->device_name, set, clear); + + if (set & TIOCM_RTS) + info->serial_signals |= SerialSignal_RTS; + if (set & TIOCM_DTR) + info->serial_signals |= SerialSignal_DTR; + if (clear & TIOCM_RTS) + info->serial_signals &= ~SerialSignal_RTS; + if (clear & TIOCM_DTR) + info->serial_signals &= ~SerialSignal_DTR; + + spin_lock_irqsave(&info->lock,flags); + set_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + + return 0; +} + +static int carrier_raised(struct tty_port *port) +{ + SLMP_INFO *info = container_of(port, SLMP_INFO, port); + unsigned long flags; + + spin_lock_irqsave(&info->lock,flags); + get_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + + return (info->serial_signals & SerialSignal_DCD) ? 1 : 0; +} + +static void dtr_rts(struct tty_port *port, int on) +{ + SLMP_INFO *info = container_of(port, SLMP_INFO, port); + unsigned long flags; + + spin_lock_irqsave(&info->lock,flags); + if (on) + info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; + else + info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); + set_signals(info); + spin_unlock_irqrestore(&info->lock,flags); +} + +/* Block the current process until the specified port is ready to open. + */ +static int block_til_ready(struct tty_struct *tty, struct file *filp, + SLMP_INFO *info) +{ + DECLARE_WAITQUEUE(wait, current); + int retval; + bool do_clocal = false; + bool extra_count = false; + unsigned long flags; + int cd; + struct tty_port *port = &info->port; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s block_til_ready()\n", + __FILE__,__LINE__, tty->driver->name ); + + if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ + /* nonblock mode is set or port is not enabled */ + /* just verify that callout device is not active */ + port->flags |= ASYNC_NORMAL_ACTIVE; + return 0; + } + + if (tty->termios->c_cflag & CLOCAL) + do_clocal = true; + + /* Wait for carrier detect and the line to become + * free (i.e., not in use by the callout). While we are in + * this loop, port->count is dropped by one, so that + * close() knows when to free things. We restore it upon + * exit, either normal or abnormal. + */ + + retval = 0; + add_wait_queue(&port->open_wait, &wait); + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s block_til_ready() before block, count=%d\n", + __FILE__,__LINE__, tty->driver->name, port->count ); + + spin_lock_irqsave(&info->lock, flags); + if (!tty_hung_up_p(filp)) { + extra_count = true; + port->count--; + } + spin_unlock_irqrestore(&info->lock, flags); + port->blocked_open++; + + while (1) { + if (tty->termios->c_cflag & CBAUD) + tty_port_raise_dtr_rts(port); + + set_current_state(TASK_INTERRUPTIBLE); + + if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){ + retval = (port->flags & ASYNC_HUP_NOTIFY) ? + -EAGAIN : -ERESTARTSYS; + break; + } + + cd = tty_port_carrier_raised(port); + + if (!(port->flags & ASYNC_CLOSING) && (do_clocal || cd)) + break; + + if (signal_pending(current)) { + retval = -ERESTARTSYS; + break; + } + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s block_til_ready() count=%d\n", + __FILE__,__LINE__, tty->driver->name, port->count ); + + tty_unlock(); + schedule(); + tty_lock(); + } + + set_current_state(TASK_RUNNING); + remove_wait_queue(&port->open_wait, &wait); + + if (extra_count) + port->count++; + port->blocked_open--; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s block_til_ready() after, count=%d\n", + __FILE__,__LINE__, tty->driver->name, port->count ); + + if (!retval) + port->flags |= ASYNC_NORMAL_ACTIVE; + + return retval; +} + +static int alloc_dma_bufs(SLMP_INFO *info) +{ + unsigned short BuffersPerFrame; + unsigned short BufferCount; + + // Force allocation to start at 64K boundary for each port. + // This is necessary because *all* buffer descriptors for a port + // *must* be in the same 64K block. All descriptors on a port + // share a common 'base' address (upper 8 bits of 24 bits) programmed + // into the CBP register. + info->port_array[0]->last_mem_alloc = (SCA_MEM_SIZE/4) * info->port_num; + + /* Calculate the number of DMA buffers necessary to hold the */ + /* largest allowable frame size. Note: If the max frame size is */ + /* not an even multiple of the DMA buffer size then we need to */ + /* round the buffer count per frame up one. */ + + BuffersPerFrame = (unsigned short)(info->max_frame_size/SCABUFSIZE); + if ( info->max_frame_size % SCABUFSIZE ) + BuffersPerFrame++; + + /* calculate total number of data buffers (SCABUFSIZE) possible + * in one ports memory (SCA_MEM_SIZE/4) after allocating memory + * for the descriptor list (BUFFERLISTSIZE). + */ + BufferCount = (SCA_MEM_SIZE/4 - BUFFERLISTSIZE)/SCABUFSIZE; + + /* limit number of buffers to maximum amount of descriptors */ + if (BufferCount > BUFFERLISTSIZE/sizeof(SCADESC)) + BufferCount = BUFFERLISTSIZE/sizeof(SCADESC); + + /* use enough buffers to transmit one max size frame */ + info->tx_buf_count = BuffersPerFrame + 1; + + /* never use more than half the available buffers for transmit */ + if (info->tx_buf_count > (BufferCount/2)) + info->tx_buf_count = BufferCount/2; + + if (info->tx_buf_count > SCAMAXDESC) + info->tx_buf_count = SCAMAXDESC; + + /* use remaining buffers for receive */ + info->rx_buf_count = BufferCount - info->tx_buf_count; + + if (info->rx_buf_count > SCAMAXDESC) + info->rx_buf_count = SCAMAXDESC; + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk("%s(%d):%s Allocating %d TX and %d RX DMA buffers.\n", + __FILE__,__LINE__, info->device_name, + info->tx_buf_count,info->rx_buf_count); + + if ( alloc_buf_list( info ) < 0 || + alloc_frame_bufs(info, + info->rx_buf_list, + info->rx_buf_list_ex, + info->rx_buf_count) < 0 || + alloc_frame_bufs(info, + info->tx_buf_list, + info->tx_buf_list_ex, + info->tx_buf_count) < 0 || + alloc_tmp_rx_buf(info) < 0 ) { + printk("%s(%d):%s Can't allocate DMA buffer memory\n", + __FILE__,__LINE__, info->device_name); + return -ENOMEM; + } + + rx_reset_buffers( info ); + + return 0; +} + +/* Allocate DMA buffers for the transmit and receive descriptor lists. + */ +static int alloc_buf_list(SLMP_INFO *info) +{ + unsigned int i; + + /* build list in adapter shared memory */ + info->buffer_list = info->memory_base + info->port_array[0]->last_mem_alloc; + info->buffer_list_phys = info->port_array[0]->last_mem_alloc; + info->port_array[0]->last_mem_alloc += BUFFERLISTSIZE; + + memset(info->buffer_list, 0, BUFFERLISTSIZE); + + /* Save virtual address pointers to the receive and */ + /* transmit buffer lists. (Receive 1st). These pointers will */ + /* be used by the processor to access the lists. */ + info->rx_buf_list = (SCADESC *)info->buffer_list; + + info->tx_buf_list = (SCADESC *)info->buffer_list; + info->tx_buf_list += info->rx_buf_count; + + /* Build links for circular buffer entry lists (tx and rx) + * + * Note: links are physical addresses read by the SCA device + * to determine the next buffer entry to use. + */ + + for ( i = 0; i < info->rx_buf_count; i++ ) { + /* calculate and store physical address of this buffer entry */ + info->rx_buf_list_ex[i].phys_entry = + info->buffer_list_phys + (i * sizeof(SCABUFSIZE)); + + /* calculate and store physical address of */ + /* next entry in cirular list of entries */ + info->rx_buf_list[i].next = info->buffer_list_phys; + if ( i < info->rx_buf_count - 1 ) + info->rx_buf_list[i].next += (i + 1) * sizeof(SCADESC); + + info->rx_buf_list[i].length = SCABUFSIZE; + } + + for ( i = 0; i < info->tx_buf_count; i++ ) { + /* calculate and store physical address of this buffer entry */ + info->tx_buf_list_ex[i].phys_entry = info->buffer_list_phys + + ((info->rx_buf_count + i) * sizeof(SCADESC)); + + /* calculate and store physical address of */ + /* next entry in cirular list of entries */ + + info->tx_buf_list[i].next = info->buffer_list_phys + + info->rx_buf_count * sizeof(SCADESC); + + if ( i < info->tx_buf_count - 1 ) + info->tx_buf_list[i].next += (i + 1) * sizeof(SCADESC); + } + + return 0; +} + +/* Allocate the frame DMA buffers used by the specified buffer list. + */ +static int alloc_frame_bufs(SLMP_INFO *info, SCADESC *buf_list,SCADESC_EX *buf_list_ex,int count) +{ + int i; + unsigned long phys_addr; + + for ( i = 0; i < count; i++ ) { + buf_list_ex[i].virt_addr = info->memory_base + info->port_array[0]->last_mem_alloc; + phys_addr = info->port_array[0]->last_mem_alloc; + info->port_array[0]->last_mem_alloc += SCABUFSIZE; + + buf_list[i].buf_ptr = (unsigned short)phys_addr; + buf_list[i].buf_base = (unsigned char)(phys_addr >> 16); + } + + return 0; +} + +static void free_dma_bufs(SLMP_INFO *info) +{ + info->buffer_list = NULL; + info->rx_buf_list = NULL; + info->tx_buf_list = NULL; +} + +/* allocate buffer large enough to hold max_frame_size. + * This buffer is used to pass an assembled frame to the line discipline. + */ +static int alloc_tmp_rx_buf(SLMP_INFO *info) +{ + info->tmp_rx_buf = kmalloc(info->max_frame_size, GFP_KERNEL); + if (info->tmp_rx_buf == NULL) + return -ENOMEM; + return 0; +} + +static void free_tmp_rx_buf(SLMP_INFO *info) +{ + kfree(info->tmp_rx_buf); + info->tmp_rx_buf = NULL; +} + +static int claim_resources(SLMP_INFO *info) +{ + if (request_mem_region(info->phys_memory_base,SCA_MEM_SIZE,"synclinkmp") == NULL) { + printk( "%s(%d):%s mem addr conflict, Addr=%08X\n", + __FILE__,__LINE__,info->device_name, info->phys_memory_base); + info->init_error = DiagStatus_AddressConflict; + goto errout; + } + else + info->shared_mem_requested = true; + + if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclinkmp") == NULL) { + printk( "%s(%d):%s lcr mem addr conflict, Addr=%08X\n", + __FILE__,__LINE__,info->device_name, info->phys_lcr_base); + info->init_error = DiagStatus_AddressConflict; + goto errout; + } + else + info->lcr_mem_requested = true; + + if (request_mem_region(info->phys_sca_base + info->sca_offset,SCA_BASE_SIZE,"synclinkmp") == NULL) { + printk( "%s(%d):%s sca mem addr conflict, Addr=%08X\n", + __FILE__,__LINE__,info->device_name, info->phys_sca_base); + info->init_error = DiagStatus_AddressConflict; + goto errout; + } + else + info->sca_base_requested = true; + + if (request_mem_region(info->phys_statctrl_base + info->statctrl_offset,SCA_REG_SIZE,"synclinkmp") == NULL) { + printk( "%s(%d):%s stat/ctrl mem addr conflict, Addr=%08X\n", + __FILE__,__LINE__,info->device_name, info->phys_statctrl_base); + info->init_error = DiagStatus_AddressConflict; + goto errout; + } + else + info->sca_statctrl_requested = true; + + info->memory_base = ioremap_nocache(info->phys_memory_base, + SCA_MEM_SIZE); + if (!info->memory_base) { + printk( "%s(%d):%s Cant map shared memory, MemAddr=%08X\n", + __FILE__,__LINE__,info->device_name, info->phys_memory_base ); + info->init_error = DiagStatus_CantAssignPciResources; + goto errout; + } + + info->lcr_base = ioremap_nocache(info->phys_lcr_base, PAGE_SIZE); + if (!info->lcr_base) { + printk( "%s(%d):%s Cant map LCR memory, MemAddr=%08X\n", + __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); + info->init_error = DiagStatus_CantAssignPciResources; + goto errout; + } + info->lcr_base += info->lcr_offset; + + info->sca_base = ioremap_nocache(info->phys_sca_base, PAGE_SIZE); + if (!info->sca_base) { + printk( "%s(%d):%s Cant map SCA memory, MemAddr=%08X\n", + __FILE__,__LINE__,info->device_name, info->phys_sca_base ); + info->init_error = DiagStatus_CantAssignPciResources; + goto errout; + } + info->sca_base += info->sca_offset; + + info->statctrl_base = ioremap_nocache(info->phys_statctrl_base, + PAGE_SIZE); + if (!info->statctrl_base) { + printk( "%s(%d):%s Cant map SCA Status/Control memory, MemAddr=%08X\n", + __FILE__,__LINE__,info->device_name, info->phys_statctrl_base ); + info->init_error = DiagStatus_CantAssignPciResources; + goto errout; + } + info->statctrl_base += info->statctrl_offset; + + if ( !memory_test(info) ) { + printk( "%s(%d):Shared Memory Test failed for device %s MemAddr=%08X\n", + __FILE__,__LINE__,info->device_name, info->phys_memory_base ); + info->init_error = DiagStatus_MemoryError; + goto errout; + } + + return 0; + +errout: + release_resources( info ); + return -ENODEV; +} + +static void release_resources(SLMP_INFO *info) +{ + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):%s release_resources() entry\n", + __FILE__,__LINE__,info->device_name ); + + if ( info->irq_requested ) { + free_irq(info->irq_level, info); + info->irq_requested = false; + } + + if ( info->shared_mem_requested ) { + release_mem_region(info->phys_memory_base,SCA_MEM_SIZE); + info->shared_mem_requested = false; + } + if ( info->lcr_mem_requested ) { + release_mem_region(info->phys_lcr_base + info->lcr_offset,128); + info->lcr_mem_requested = false; + } + if ( info->sca_base_requested ) { + release_mem_region(info->phys_sca_base + info->sca_offset,SCA_BASE_SIZE); + info->sca_base_requested = false; + } + if ( info->sca_statctrl_requested ) { + release_mem_region(info->phys_statctrl_base + info->statctrl_offset,SCA_REG_SIZE); + info->sca_statctrl_requested = false; + } + + if (info->memory_base){ + iounmap(info->memory_base); + info->memory_base = NULL; + } + + if (info->sca_base) { + iounmap(info->sca_base - info->sca_offset); + info->sca_base=NULL; + } + + if (info->statctrl_base) { + iounmap(info->statctrl_base - info->statctrl_offset); + info->statctrl_base=NULL; + } + + if (info->lcr_base){ + iounmap(info->lcr_base - info->lcr_offset); + info->lcr_base = NULL; + } + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):%s release_resources() exit\n", + __FILE__,__LINE__,info->device_name ); +} + +/* Add the specified device instance data structure to the + * global linked list of devices and increment the device count. + */ +static void add_device(SLMP_INFO *info) +{ + info->next_device = NULL; + info->line = synclinkmp_device_count; + sprintf(info->device_name,"ttySLM%dp%d",info->adapter_num,info->port_num); + + if (info->line < MAX_DEVICES) { + if (maxframe[info->line]) + info->max_frame_size = maxframe[info->line]; + } + + synclinkmp_device_count++; + + if ( !synclinkmp_device_list ) + synclinkmp_device_list = info; + else { + SLMP_INFO *current_dev = synclinkmp_device_list; + while( current_dev->next_device ) + current_dev = current_dev->next_device; + current_dev->next_device = info; + } + + if ( info->max_frame_size < 4096 ) + info->max_frame_size = 4096; + else if ( info->max_frame_size > 65535 ) + info->max_frame_size = 65535; + + printk( "SyncLink MultiPort %s: " + "Mem=(%08x %08X %08x %08X) IRQ=%d MaxFrameSize=%u\n", + info->device_name, + info->phys_sca_base, + info->phys_memory_base, + info->phys_statctrl_base, + info->phys_lcr_base, + info->irq_level, + info->max_frame_size ); + +#if SYNCLINK_GENERIC_HDLC + hdlcdev_init(info); +#endif +} + +static const struct tty_port_operations port_ops = { + .carrier_raised = carrier_raised, + .dtr_rts = dtr_rts, +}; + +/* Allocate and initialize a device instance structure + * + * Return Value: pointer to SLMP_INFO if success, otherwise NULL + */ +static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev) +{ + SLMP_INFO *info; + + info = kzalloc(sizeof(SLMP_INFO), + GFP_KERNEL); + + if (!info) { + printk("%s(%d) Error can't allocate device instance data for adapter %d, port %d\n", + __FILE__,__LINE__, adapter_num, port_num); + } else { + tty_port_init(&info->port); + info->port.ops = &port_ops; + info->magic = MGSL_MAGIC; + INIT_WORK(&info->task, bh_handler); + info->max_frame_size = 4096; + info->port.close_delay = 5*HZ/10; + info->port.closing_wait = 30*HZ; + init_waitqueue_head(&info->status_event_wait_q); + init_waitqueue_head(&info->event_wait_q); + spin_lock_init(&info->netlock); + memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); + info->idle_mode = HDLC_TXIDLE_FLAGS; + info->adapter_num = adapter_num; + info->port_num = port_num; + + /* Copy configuration info to device instance data */ + info->irq_level = pdev->irq; + info->phys_lcr_base = pci_resource_start(pdev,0); + info->phys_sca_base = pci_resource_start(pdev,2); + info->phys_memory_base = pci_resource_start(pdev,3); + info->phys_statctrl_base = pci_resource_start(pdev,4); + + /* Because veremap only works on page boundaries we must map + * a larger area than is actually implemented for the LCR + * memory range. We map a full page starting at the page boundary. + */ + info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1); + info->phys_lcr_base &= ~(PAGE_SIZE-1); + + info->sca_offset = info->phys_sca_base & (PAGE_SIZE-1); + info->phys_sca_base &= ~(PAGE_SIZE-1); + + info->statctrl_offset = info->phys_statctrl_base & (PAGE_SIZE-1); + info->phys_statctrl_base &= ~(PAGE_SIZE-1); + + info->bus_type = MGSL_BUS_TYPE_PCI; + info->irq_flags = IRQF_SHARED; + + setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info); + setup_timer(&info->status_timer, status_timeout, + (unsigned long)info); + + /* Store the PCI9050 misc control register value because a flaw + * in the PCI9050 prevents LCR registers from being read if + * BIOS assigns an LCR base address with bit 7 set. + * + * Only the misc control register is accessed for which only + * write access is needed, so set an initial value and change + * bits to the device instance data as we write the value + * to the actual misc control register. + */ + info->misc_ctrl_value = 0x087e4546; + + /* initial port state is unknown - if startup errors + * occur, init_error will be set to indicate the + * problem. Once the port is fully initialized, + * this value will be set to 0 to indicate the + * port is available. + */ + info->init_error = -1; + } + + return info; +} + +static void device_init(int adapter_num, struct pci_dev *pdev) +{ + SLMP_INFO *port_array[SCA_MAX_PORTS]; + int port; + + /* allocate device instances for up to SCA_MAX_PORTS devices */ + for ( port = 0; port < SCA_MAX_PORTS; ++port ) { + port_array[port] = alloc_dev(adapter_num,port,pdev); + if( port_array[port] == NULL ) { + for ( --port; port >= 0; --port ) + kfree(port_array[port]); + return; + } + } + + /* give copy of port_array to all ports and add to device list */ + for ( port = 0; port < SCA_MAX_PORTS; ++port ) { + memcpy(port_array[port]->port_array,port_array,sizeof(port_array)); + add_device( port_array[port] ); + spin_lock_init(&port_array[port]->lock); + } + + /* Allocate and claim adapter resources */ + if ( !claim_resources(port_array[0]) ) { + + alloc_dma_bufs(port_array[0]); + + /* copy resource information from first port to others */ + for ( port = 1; port < SCA_MAX_PORTS; ++port ) { + port_array[port]->lock = port_array[0]->lock; + port_array[port]->irq_level = port_array[0]->irq_level; + port_array[port]->memory_base = port_array[0]->memory_base; + port_array[port]->sca_base = port_array[0]->sca_base; + port_array[port]->statctrl_base = port_array[0]->statctrl_base; + port_array[port]->lcr_base = port_array[0]->lcr_base; + alloc_dma_bufs(port_array[port]); + } + + if ( request_irq(port_array[0]->irq_level, + synclinkmp_interrupt, + port_array[0]->irq_flags, + port_array[0]->device_name, + port_array[0]) < 0 ) { + printk( "%s(%d):%s Cant request interrupt, IRQ=%d\n", + __FILE__,__LINE__, + port_array[0]->device_name, + port_array[0]->irq_level ); + } + else { + port_array[0]->irq_requested = true; + adapter_test(port_array[0]); + } + } +} + +static const struct tty_operations ops = { + .open = open, + .close = close, + .write = write, + .put_char = put_char, + .flush_chars = flush_chars, + .write_room = write_room, + .chars_in_buffer = chars_in_buffer, + .flush_buffer = flush_buffer, + .ioctl = ioctl, + .throttle = throttle, + .unthrottle = unthrottle, + .send_xchar = send_xchar, + .break_ctl = set_break, + .wait_until_sent = wait_until_sent, + .set_termios = set_termios, + .stop = tx_hold, + .start = tx_release, + .hangup = hangup, + .tiocmget = tiocmget, + .tiocmset = tiocmset, + .get_icount = get_icount, + .proc_fops = &synclinkmp_proc_fops, +}; + + +static void synclinkmp_cleanup(void) +{ + int rc; + SLMP_INFO *info; + SLMP_INFO *tmp; + + printk("Unloading %s %s\n", driver_name, driver_version); + + if (serial_driver) { + if ((rc = tty_unregister_driver(serial_driver))) + printk("%s(%d) failed to unregister tty driver err=%d\n", + __FILE__,__LINE__,rc); + put_tty_driver(serial_driver); + } + + /* reset devices */ + info = synclinkmp_device_list; + while(info) { + reset_port(info); + info = info->next_device; + } + + /* release devices */ + info = synclinkmp_device_list; + while(info) { +#if SYNCLINK_GENERIC_HDLC + hdlcdev_exit(info); +#endif + free_dma_bufs(info); + free_tmp_rx_buf(info); + if ( info->port_num == 0 ) { + if (info->sca_base) + write_reg(info, LPR, 1); /* set low power mode */ + release_resources(info); + } + tmp = info; + info = info->next_device; + kfree(tmp); + } + + pci_unregister_driver(&synclinkmp_pci_driver); +} + +/* Driver initialization entry point. + */ + +static int __init synclinkmp_init(void) +{ + int rc; + + if (break_on_load) { + synclinkmp_get_text_ptr(); + BREAKPOINT(); + } + + printk("%s %s\n", driver_name, driver_version); + + if ((rc = pci_register_driver(&synclinkmp_pci_driver)) < 0) { + printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc); + return rc; + } + + serial_driver = alloc_tty_driver(128); + if (!serial_driver) { + rc = -ENOMEM; + goto error; + } + + /* Initialize the tty_driver structure */ + + serial_driver->owner = THIS_MODULE; + serial_driver->driver_name = "synclinkmp"; + serial_driver->name = "ttySLM"; + serial_driver->major = ttymajor; + serial_driver->minor_start = 64; + serial_driver->type = TTY_DRIVER_TYPE_SERIAL; + serial_driver->subtype = SERIAL_TYPE_NORMAL; + serial_driver->init_termios = tty_std_termios; + serial_driver->init_termios.c_cflag = + B9600 | CS8 | CREAD | HUPCL | CLOCAL; + serial_driver->init_termios.c_ispeed = 9600; + serial_driver->init_termios.c_ospeed = 9600; + serial_driver->flags = TTY_DRIVER_REAL_RAW; + tty_set_operations(serial_driver, &ops); + if ((rc = tty_register_driver(serial_driver)) < 0) { + printk("%s(%d):Couldn't register serial driver\n", + __FILE__,__LINE__); + put_tty_driver(serial_driver); + serial_driver = NULL; + goto error; + } + + printk("%s %s, tty major#%d\n", + driver_name, driver_version, + serial_driver->major); + + return 0; + +error: + synclinkmp_cleanup(); + return rc; +} + +static void __exit synclinkmp_exit(void) +{ + synclinkmp_cleanup(); +} + +module_init(synclinkmp_init); +module_exit(synclinkmp_exit); + +/* Set the port for internal loopback mode. + * The TxCLK and RxCLK signals are generated from the BRG and + * the TxD is looped back to the RxD internally. + */ +static void enable_loopback(SLMP_INFO *info, int enable) +{ + if (enable) { + /* MD2 (Mode Register 2) + * 01..00 CNCT<1..0> Channel Connection 11=Local Loopback + */ + write_reg(info, MD2, (unsigned char)(read_reg(info, MD2) | (BIT1 + BIT0))); + + /* degate external TxC clock source */ + info->port_array[0]->ctrlreg_value |= (BIT0 << (info->port_num * 2)); + write_control_reg(info); + + /* RXS/TXS (Rx/Tx clock source) + * 07 Reserved, must be 0 + * 06..04 Clock Source, 100=BRG + * 03..00 Clock Divisor, 0000=1 + */ + write_reg(info, RXS, 0x40); + write_reg(info, TXS, 0x40); + + } else { + /* MD2 (Mode Register 2) + * 01..00 CNCT<1..0> Channel connection, 0=normal + */ + write_reg(info, MD2, (unsigned char)(read_reg(info, MD2) & ~(BIT1 + BIT0))); + + /* RXS/TXS (Rx/Tx clock source) + * 07 Reserved, must be 0 + * 06..04 Clock Source, 000=RxC/TxC Pin + * 03..00 Clock Divisor, 0000=1 + */ + write_reg(info, RXS, 0x00); + write_reg(info, TXS, 0x00); + } + + /* set LinkSpeed if available, otherwise default to 2Mbps */ + if (info->params.clock_speed) + set_rate(info, info->params.clock_speed); + else + set_rate(info, 3686400); +} + +/* Set the baud rate register to the desired speed + * + * data_rate data rate of clock in bits per second + * A data rate of 0 disables the AUX clock. + */ +static void set_rate( SLMP_INFO *info, u32 data_rate ) +{ + u32 TMCValue; + unsigned char BRValue; + u32 Divisor=0; + + /* fBRG = fCLK/(TMC * 2^BR) + */ + if (data_rate != 0) { + Divisor = 14745600/data_rate; + if (!Divisor) + Divisor = 1; + + TMCValue = Divisor; + + BRValue = 0; + if (TMCValue != 1 && TMCValue != 2) { + /* BRValue of 0 provides 50/50 duty cycle *only* when + * TMCValue is 1 or 2. BRValue of 1 to 9 always provides + * 50/50 duty cycle. + */ + BRValue = 1; + TMCValue >>= 1; + } + + /* while TMCValue is too big for TMC register, divide + * by 2 and increment BR exponent. + */ + for(; TMCValue > 256 && BRValue < 10; BRValue++) + TMCValue >>= 1; + + write_reg(info, TXS, + (unsigned char)((read_reg(info, TXS) & 0xf0) | BRValue)); + write_reg(info, RXS, + (unsigned char)((read_reg(info, RXS) & 0xf0) | BRValue)); + write_reg(info, TMC, (unsigned char)TMCValue); + } + else { + write_reg(info, TXS,0); + write_reg(info, RXS,0); + write_reg(info, TMC, 0); + } +} + +/* Disable receiver + */ +static void rx_stop(SLMP_INFO *info) +{ + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):%s rx_stop()\n", + __FILE__,__LINE__, info->device_name ); + + write_reg(info, CMD, RXRESET); + + info->ie0_value &= ~RXRDYE; + write_reg(info, IE0, info->ie0_value); /* disable Rx data interrupts */ + + write_reg(info, RXDMA + DSR, 0); /* disable Rx DMA */ + write_reg(info, RXDMA + DCMD, SWABORT); /* reset/init Rx DMA */ + write_reg(info, RXDMA + DIR, 0); /* disable Rx DMA interrupts */ + + info->rx_enabled = false; + info->rx_overflow = false; +} + +/* enable the receiver + */ +static void rx_start(SLMP_INFO *info) +{ + int i; + + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):%s rx_start()\n", + __FILE__,__LINE__, info->device_name ); + + write_reg(info, CMD, RXRESET); + + if ( info->params.mode == MGSL_MODE_HDLC ) { + /* HDLC, disabe IRQ on rxdata */ + info->ie0_value &= ~RXRDYE; + write_reg(info, IE0, info->ie0_value); + + /* Reset all Rx DMA buffers and program rx dma */ + write_reg(info, RXDMA + DSR, 0); /* disable Rx DMA */ + write_reg(info, RXDMA + DCMD, SWABORT); /* reset/init Rx DMA */ + + for (i = 0; i < info->rx_buf_count; i++) { + info->rx_buf_list[i].status = 0xff; + + // throttle to 4 shared memory writes at a time to prevent + // hogging local bus (keep latency time for DMA requests low). + if (!(i % 4)) + read_status_reg(info); + } + info->current_rx_buf = 0; + + /* set current/1st descriptor address */ + write_reg16(info, RXDMA + CDA, + info->rx_buf_list_ex[0].phys_entry); + + /* set new last rx descriptor address */ + write_reg16(info, RXDMA + EDA, + info->rx_buf_list_ex[info->rx_buf_count - 1].phys_entry); + + /* set buffer length (shared by all rx dma data buffers) */ + write_reg16(info, RXDMA + BFL, SCABUFSIZE); + + write_reg(info, RXDMA + DIR, 0x60); /* enable Rx DMA interrupts (EOM/BOF) */ + write_reg(info, RXDMA + DSR, 0xf2); /* clear Rx DMA IRQs, enable Rx DMA */ + } else { + /* async, enable IRQ on rxdata */ + info->ie0_value |= RXRDYE; + write_reg(info, IE0, info->ie0_value); + } + + write_reg(info, CMD, RXENABLE); + + info->rx_overflow = false; + info->rx_enabled = true; +} + +/* Enable the transmitter and send a transmit frame if + * one is loaded in the DMA buffers. + */ +static void tx_start(SLMP_INFO *info) +{ + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):%s tx_start() tx_count=%d\n", + __FILE__,__LINE__, info->device_name,info->tx_count ); + + if (!info->tx_enabled ) { + write_reg(info, CMD, TXRESET); + write_reg(info, CMD, TXENABLE); + info->tx_enabled = true; + } + + if ( info->tx_count ) { + + /* If auto RTS enabled and RTS is inactive, then assert */ + /* RTS and set a flag indicating that the driver should */ + /* negate RTS when the transmission completes. */ + + info->drop_rts_on_tx_done = false; + + if (info->params.mode != MGSL_MODE_ASYNC) { + + if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) { + get_signals( info ); + if ( !(info->serial_signals & SerialSignal_RTS) ) { + info->serial_signals |= SerialSignal_RTS; + set_signals( info ); + info->drop_rts_on_tx_done = true; + } + } + + write_reg16(info, TRC0, + (unsigned short)(((tx_negate_fifo_level-1)<<8) + tx_active_fifo_level)); + + write_reg(info, TXDMA + DSR, 0); /* disable DMA channel */ + write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */ + + /* set TX CDA (current descriptor address) */ + write_reg16(info, TXDMA + CDA, + info->tx_buf_list_ex[0].phys_entry); + + /* set TX EDA (last descriptor address) */ + write_reg16(info, TXDMA + EDA, + info->tx_buf_list_ex[info->last_tx_buf].phys_entry); + + /* enable underrun IRQ */ + info->ie1_value &= ~IDLE; + info->ie1_value |= UDRN; + write_reg(info, IE1, info->ie1_value); + write_reg(info, SR1, (unsigned char)(IDLE + UDRN)); + + write_reg(info, TXDMA + DIR, 0x40); /* enable Tx DMA interrupts (EOM) */ + write_reg(info, TXDMA + DSR, 0xf2); /* clear Tx DMA IRQs, enable Tx DMA */ + + mod_timer(&info->tx_timer, jiffies + + msecs_to_jiffies(5000)); + } + else { + tx_load_fifo(info); + /* async, enable IRQ on txdata */ + info->ie0_value |= TXRDYE; + write_reg(info, IE0, info->ie0_value); + } + + info->tx_active = true; + } +} + +/* stop the transmitter and DMA + */ +static void tx_stop( SLMP_INFO *info ) +{ + if (debug_level >= DEBUG_LEVEL_ISR) + printk("%s(%d):%s tx_stop()\n", + __FILE__,__LINE__, info->device_name ); + + del_timer(&info->tx_timer); + + write_reg(info, TXDMA + DSR, 0); /* disable DMA channel */ + write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */ + + write_reg(info, CMD, TXRESET); + + info->ie1_value &= ~(UDRN + IDLE); + write_reg(info, IE1, info->ie1_value); /* disable tx status interrupts */ + write_reg(info, SR1, (unsigned char)(IDLE + UDRN)); /* clear pending */ + + info->ie0_value &= ~TXRDYE; + write_reg(info, IE0, info->ie0_value); /* disable tx data interrupts */ + + info->tx_enabled = false; + info->tx_active = false; +} + +/* Fill the transmit FIFO until the FIFO is full or + * there is no more data to load. + */ +static void tx_load_fifo(SLMP_INFO *info) +{ + u8 TwoBytes[2]; + + /* do nothing is now tx data available and no XON/XOFF pending */ + + if ( !info->tx_count && !info->x_char ) + return; + + /* load the Transmit FIFO until FIFOs full or all data sent */ + + while( info->tx_count && (read_reg(info,SR0) & BIT1) ) { + + /* there is more space in the transmit FIFO and */ + /* there is more data in transmit buffer */ + + if ( (info->tx_count > 1) && !info->x_char ) { + /* write 16-bits */ + TwoBytes[0] = info->tx_buf[info->tx_get++]; + if (info->tx_get >= info->max_frame_size) + info->tx_get -= info->max_frame_size; + TwoBytes[1] = info->tx_buf[info->tx_get++]; + if (info->tx_get >= info->max_frame_size) + info->tx_get -= info->max_frame_size; + + write_reg16(info, TRB, *((u16 *)TwoBytes)); + + info->tx_count -= 2; + info->icount.tx += 2; + } else { + /* only 1 byte left to transmit or 1 FIFO slot left */ + + if (info->x_char) { + /* transmit pending high priority char */ + write_reg(info, TRB, info->x_char); + info->x_char = 0; + } else { + write_reg(info, TRB, info->tx_buf[info->tx_get++]); + if (info->tx_get >= info->max_frame_size) + info->tx_get -= info->max_frame_size; + info->tx_count--; + } + info->icount.tx++; + } + } +} + +/* Reset a port to a known state + */ +static void reset_port(SLMP_INFO *info) +{ + if (info->sca_base) { + + tx_stop(info); + rx_stop(info); + + info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS); + set_signals(info); + + /* disable all port interrupts */ + info->ie0_value = 0; + info->ie1_value = 0; + info->ie2_value = 0; + write_reg(info, IE0, info->ie0_value); + write_reg(info, IE1, info->ie1_value); + write_reg(info, IE2, info->ie2_value); + + write_reg(info, CMD, CHRESET); + } +} + +/* Reset all the ports to a known state. + */ +static void reset_adapter(SLMP_INFO *info) +{ + int i; + + for ( i=0; i < SCA_MAX_PORTS; ++i) { + if (info->port_array[i]) + reset_port(info->port_array[i]); + } +} + +/* Program port for asynchronous communications. + */ +static void async_mode(SLMP_INFO *info) +{ + + unsigned char RegValue; + + tx_stop(info); + rx_stop(info); + + /* MD0, Mode Register 0 + * + * 07..05 PRCTL<2..0>, Protocol Mode, 000=async + * 04 AUTO, Auto-enable (RTS/CTS/DCD) + * 03 Reserved, must be 0 + * 02 CRCCC, CRC Calculation, 0=disabled + * 01..00 STOP<1..0> Stop bits (00=1,10=2) + * + * 0000 0000 + */ + RegValue = 0x00; + if (info->params.stop_bits != 1) + RegValue |= BIT1; + write_reg(info, MD0, RegValue); + + /* MD1, Mode Register 1 + * + * 07..06 BRATE<1..0>, bit rate, 00=1/1 01=1/16 10=1/32 11=1/64 + * 05..04 TXCHR<1..0>, tx char size, 00=8 bits,01=7,10=6,11=5 + * 03..02 RXCHR<1..0>, rx char size + * 01..00 PMPM<1..0>, Parity mode, 00=none 10=even 11=odd + * + * 0100 0000 + */ + RegValue = 0x40; + switch (info->params.data_bits) { + case 7: RegValue |= BIT4 + BIT2; break; + case 6: RegValue |= BIT5 + BIT3; break; + case 5: RegValue |= BIT5 + BIT4 + BIT3 + BIT2; break; + } + if (info->params.parity != ASYNC_PARITY_NONE) { + RegValue |= BIT1; + if (info->params.parity == ASYNC_PARITY_ODD) + RegValue |= BIT0; + } + write_reg(info, MD1, RegValue); + + /* MD2, Mode Register 2 + * + * 07..02 Reserved, must be 0 + * 01..00 CNCT<1..0> Channel connection, 00=normal 11=local loopback + * + * 0000 0000 + */ + RegValue = 0x00; + if (info->params.loopback) + RegValue |= (BIT1 + BIT0); + write_reg(info, MD2, RegValue); + + /* RXS, Receive clock source + * + * 07 Reserved, must be 0 + * 06..04 RXCS<2..0>, clock source, 000=RxC Pin, 100=BRG, 110=DPLL + * 03..00 RXBR<3..0>, rate divisor, 0000=1 + */ + RegValue=BIT6; + write_reg(info, RXS, RegValue); + + /* TXS, Transmit clock source + * + * 07 Reserved, must be 0 + * 06..04 RXCS<2..0>, clock source, 000=TxC Pin, 100=BRG, 110=Receive Clock + * 03..00 RXBR<3..0>, rate divisor, 0000=1 + */ + RegValue=BIT6; + write_reg(info, TXS, RegValue); + + /* Control Register + * + * 6,4,2,0 CLKSEL<3..0>, 0 = TcCLK in, 1 = Auxclk out + */ + info->port_array[0]->ctrlreg_value |= (BIT0 << (info->port_num * 2)); + write_control_reg(info); + + tx_set_idle(info); + + /* RRC Receive Ready Control 0 + * + * 07..05 Reserved, must be 0 + * 04..00 RRC<4..0> Rx FIFO trigger active 0x00 = 1 byte + */ + write_reg(info, RRC, 0x00); + + /* TRC0 Transmit Ready Control 0 + * + * 07..05 Reserved, must be 0 + * 04..00 TRC<4..0> Tx FIFO trigger active 0x10 = 16 bytes + */ + write_reg(info, TRC0, 0x10); + + /* TRC1 Transmit Ready Control 1 + * + * 07..05 Reserved, must be 0 + * 04..00 TRC<4..0> Tx FIFO trigger inactive 0x1e = 31 bytes (full-1) + */ + write_reg(info, TRC1, 0x1e); + + /* CTL, MSCI control register + * + * 07..06 Reserved, set to 0 + * 05 UDRNC, underrun control, 0=abort 1=CRC+flag (HDLC/BSC) + * 04 IDLC, idle control, 0=mark 1=idle register + * 03 BRK, break, 0=off 1 =on (async) + * 02 SYNCLD, sync char load enable (BSC) 1=enabled + * 01 GOP, go active on poll (LOOP mode) 1=enabled + * 00 RTS, RTS output control, 0=active 1=inactive + * + * 0001 0001 + */ + RegValue = 0x10; + if (!(info->serial_signals & SerialSignal_RTS)) + RegValue |= 0x01; + write_reg(info, CTL, RegValue); + + /* enable status interrupts */ + info->ie0_value |= TXINTE + RXINTE; + write_reg(info, IE0, info->ie0_value); + + /* enable break detect interrupt */ + info->ie1_value = BRKD; + write_reg(info, IE1, info->ie1_value); + + /* enable rx overrun interrupt */ + info->ie2_value = OVRN; + write_reg(info, IE2, info->ie2_value); + + set_rate( info, info->params.data_rate * 16 ); +} + +/* Program the SCA for HDLC communications. + */ +static void hdlc_mode(SLMP_INFO *info) +{ + unsigned char RegValue; + u32 DpllDivisor; + + // Can't use DPLL because SCA outputs recovered clock on RxC when + // DPLL mode selected. This causes output contention with RxC receiver. + // Use of DPLL would require external hardware to disable RxC receiver + // when DPLL mode selected. + info->params.flags &= ~(HDLC_FLAG_TXC_DPLL + HDLC_FLAG_RXC_DPLL); + + /* disable DMA interrupts */ + write_reg(info, TXDMA + DIR, 0); + write_reg(info, RXDMA + DIR, 0); + + /* MD0, Mode Register 0 + * + * 07..05 PRCTL<2..0>, Protocol Mode, 100=HDLC + * 04 AUTO, Auto-enable (RTS/CTS/DCD) + * 03 Reserved, must be 0 + * 02 CRCCC, CRC Calculation, 1=enabled + * 01 CRC1, CRC selection, 0=CRC-16,1=CRC-CCITT-16 + * 00 CRC0, CRC initial value, 1 = all 1s + * + * 1000 0001 + */ + RegValue = 0x81; + if (info->params.flags & HDLC_FLAG_AUTO_CTS) + RegValue |= BIT4; + if (info->params.flags & HDLC_FLAG_AUTO_DCD) + RegValue |= BIT4; + if (info->params.crc_type == HDLC_CRC_16_CCITT) + RegValue |= BIT2 + BIT1; + write_reg(info, MD0, RegValue); + + /* MD1, Mode Register 1 + * + * 07..06 ADDRS<1..0>, Address detect, 00=no addr check + * 05..04 TXCHR<1..0>, tx char size, 00=8 bits + * 03..02 RXCHR<1..0>, rx char size, 00=8 bits + * 01..00 PMPM<1..0>, Parity mode, 00=no parity + * + * 0000 0000 + */ + RegValue = 0x00; + write_reg(info, MD1, RegValue); + + /* MD2, Mode Register 2 + * + * 07 NRZFM, 0=NRZ, 1=FM + * 06..05 CODE<1..0> Encoding, 00=NRZ + * 04..03 DRATE<1..0> DPLL Divisor, 00=8 + * 02 Reserved, must be 0 + * 01..00 CNCT<1..0> Channel connection, 0=normal + * + * 0000 0000 + */ + RegValue = 0x00; + switch(info->params.encoding) { + case HDLC_ENCODING_NRZI: RegValue |= BIT5; break; + case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT7 + BIT5; break; /* aka FM1 */ + case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT7 + BIT6; break; /* aka FM0 */ + case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT7; break; /* aka Manchester */ +#if 0 + case HDLC_ENCODING_NRZB: /* not supported */ + case HDLC_ENCODING_NRZI_MARK: /* not supported */ + case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: /* not supported */ +#endif + } + if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) { + DpllDivisor = 16; + RegValue |= BIT3; + } else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) { + DpllDivisor = 8; + } else { + DpllDivisor = 32; + RegValue |= BIT4; + } + write_reg(info, MD2, RegValue); + + + /* RXS, Receive clock source + * + * 07 Reserved, must be 0 + * 06..04 RXCS<2..0>, clock source, 000=RxC Pin, 100=BRG, 110=DPLL + * 03..00 RXBR<3..0>, rate divisor, 0000=1 + */ + RegValue=0; + if (info->params.flags & HDLC_FLAG_RXC_BRG) + RegValue |= BIT6; + if (info->params.flags & HDLC_FLAG_RXC_DPLL) + RegValue |= BIT6 + BIT5; + write_reg(info, RXS, RegValue); + + /* TXS, Transmit clock source + * + * 07 Reserved, must be 0 + * 06..04 RXCS<2..0>, clock source, 000=TxC Pin, 100=BRG, 110=Receive Clock + * 03..00 RXBR<3..0>, rate divisor, 0000=1 + */ + RegValue=0; + if (info->params.flags & HDLC_FLAG_TXC_BRG) + RegValue |= BIT6; + if (info->params.flags & HDLC_FLAG_TXC_DPLL) + RegValue |= BIT6 + BIT5; + write_reg(info, TXS, RegValue); + + if (info->params.flags & HDLC_FLAG_RXC_DPLL) + set_rate(info, info->params.clock_speed * DpllDivisor); + else + set_rate(info, info->params.clock_speed); + + /* GPDATA (General Purpose I/O Data Register) + * + * 6,4,2,0 CLKSEL<3..0>, 0 = TcCLK in, 1 = Auxclk out + */ + if (info->params.flags & HDLC_FLAG_TXC_BRG) + info->port_array[0]->ctrlreg_value |= (BIT0 << (info->port_num * 2)); + else + info->port_array[0]->ctrlreg_value &= ~(BIT0 << (info->port_num * 2)); + write_control_reg(info); + + /* RRC Receive Ready Control 0 + * + * 07..05 Reserved, must be 0 + * 04..00 RRC<4..0> Rx FIFO trigger active + */ + write_reg(info, RRC, rx_active_fifo_level); + + /* TRC0 Transmit Ready Control 0 + * + * 07..05 Reserved, must be 0 + * 04..00 TRC<4..0> Tx FIFO trigger active + */ + write_reg(info, TRC0, tx_active_fifo_level); + + /* TRC1 Transmit Ready Control 1 + * + * 07..05 Reserved, must be 0 + * 04..00 TRC<4..0> Tx FIFO trigger inactive 0x1f = 32 bytes (full) + */ + write_reg(info, TRC1, (unsigned char)(tx_negate_fifo_level - 1)); + + /* DMR, DMA Mode Register + * + * 07..05 Reserved, must be 0 + * 04 TMOD, Transfer Mode: 1=chained-block + * 03 Reserved, must be 0 + * 02 NF, Number of Frames: 1=multi-frame + * 01 CNTE, Frame End IRQ Counter enable: 0=disabled + * 00 Reserved, must be 0 + * + * 0001 0100 + */ + write_reg(info, TXDMA + DMR, 0x14); + write_reg(info, RXDMA + DMR, 0x14); + + /* Set chain pointer base (upper 8 bits of 24 bit addr) */ + write_reg(info, RXDMA + CPB, + (unsigned char)(info->buffer_list_phys >> 16)); + + /* Set chain pointer base (upper 8 bits of 24 bit addr) */ + write_reg(info, TXDMA + CPB, + (unsigned char)(info->buffer_list_phys >> 16)); + + /* enable status interrupts. other code enables/disables + * the individual sources for these two interrupt classes. + */ + info->ie0_value |= TXINTE + RXINTE; + write_reg(info, IE0, info->ie0_value); + + /* CTL, MSCI control register + * + * 07..06 Reserved, set to 0 + * 05 UDRNC, underrun control, 0=abort 1=CRC+flag (HDLC/BSC) + * 04 IDLC, idle control, 0=mark 1=idle register + * 03 BRK, break, 0=off 1 =on (async) + * 02 SYNCLD, sync char load enable (BSC) 1=enabled + * 01 GOP, go active on poll (LOOP mode) 1=enabled + * 00 RTS, RTS output control, 0=active 1=inactive + * + * 0001 0001 + */ + RegValue = 0x10; + if (!(info->serial_signals & SerialSignal_RTS)) + RegValue |= 0x01; + write_reg(info, CTL, RegValue); + + /* preamble not supported ! */ + + tx_set_idle(info); + tx_stop(info); + rx_stop(info); + + set_rate(info, info->params.clock_speed); + + if (info->params.loopback) + enable_loopback(info,1); +} + +/* Set the transmit HDLC idle mode + */ +static void tx_set_idle(SLMP_INFO *info) +{ + unsigned char RegValue = 0xff; + + /* Map API idle mode to SCA register bits */ + switch(info->idle_mode) { + case HDLC_TXIDLE_FLAGS: RegValue = 0x7e; break; + case HDLC_TXIDLE_ALT_ZEROS_ONES: RegValue = 0xaa; break; + case HDLC_TXIDLE_ZEROS: RegValue = 0x00; break; + case HDLC_TXIDLE_ONES: RegValue = 0xff; break; + case HDLC_TXIDLE_ALT_MARK_SPACE: RegValue = 0xaa; break; + case HDLC_TXIDLE_SPACE: RegValue = 0x00; break; + case HDLC_TXIDLE_MARK: RegValue = 0xff; break; + } + + write_reg(info, IDL, RegValue); +} + +/* Query the adapter for the state of the V24 status (input) signals. + */ +static void get_signals(SLMP_INFO *info) +{ + u16 status = read_reg(info, SR3); + u16 gpstatus = read_status_reg(info); + u16 testbit; + + /* clear all serial signals except DTR and RTS */ + info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS; + + /* set serial signal bits to reflect MISR */ + + if (!(status & BIT3)) + info->serial_signals |= SerialSignal_CTS; + + if ( !(status & BIT2)) + info->serial_signals |= SerialSignal_DCD; + + testbit = BIT1 << (info->port_num * 2); // Port 0..3 RI is GPDATA<1,3,5,7> + if (!(gpstatus & testbit)) + info->serial_signals |= SerialSignal_RI; + + testbit = BIT0 << (info->port_num * 2); // Port 0..3 DSR is GPDATA<0,2,4,6> + if (!(gpstatus & testbit)) + info->serial_signals |= SerialSignal_DSR; +} + +/* Set the state of DTR and RTS based on contents of + * serial_signals member of device context. + */ +static void set_signals(SLMP_INFO *info) +{ + unsigned char RegValue; + u16 EnableBit; + + RegValue = read_reg(info, CTL); + if (info->serial_signals & SerialSignal_RTS) + RegValue &= ~BIT0; + else + RegValue |= BIT0; + write_reg(info, CTL, RegValue); + + // Port 0..3 DTR is ctrl reg <1,3,5,7> + EnableBit = BIT1 << (info->port_num*2); + if (info->serial_signals & SerialSignal_DTR) + info->port_array[0]->ctrlreg_value &= ~EnableBit; + else + info->port_array[0]->ctrlreg_value |= EnableBit; + write_control_reg(info); +} + +/*******************/ +/* DMA Buffer Code */ +/*******************/ + +/* Set the count for all receive buffers to SCABUFSIZE + * and set the current buffer to the first buffer. This effectively + * makes all buffers free and discards any data in buffers. + */ +static void rx_reset_buffers(SLMP_INFO *info) +{ + rx_free_frame_buffers(info, 0, info->rx_buf_count - 1); +} + +/* Free the buffers used by a received frame + * + * info pointer to device instance data + * first index of 1st receive buffer of frame + * last index of last receive buffer of frame + */ +static void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last) +{ + bool done = false; + + while(!done) { + /* reset current buffer for reuse */ + info->rx_buf_list[first].status = 0xff; + + if (first == last) { + done = true; + /* set new last rx descriptor address */ + write_reg16(info, RXDMA + EDA, info->rx_buf_list_ex[first].phys_entry); + } + + first++; + if (first == info->rx_buf_count) + first = 0; + } + + /* set current buffer to next buffer after last buffer of frame */ + info->current_rx_buf = first; +} + +/* Return a received frame from the receive DMA buffers. + * Only frames received without errors are returned. + * + * Return Value: true if frame returned, otherwise false + */ +static bool rx_get_frame(SLMP_INFO *info) +{ + unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */ + unsigned short status; + unsigned int framesize = 0; + bool ReturnCode = false; + unsigned long flags; + struct tty_struct *tty = info->port.tty; + unsigned char addr_field = 0xff; + SCADESC *desc; + SCADESC_EX *desc_ex; + +CheckAgain: + /* assume no frame returned, set zero length */ + framesize = 0; + addr_field = 0xff; + + /* + * current_rx_buf points to the 1st buffer of the next available + * receive frame. To find the last buffer of the frame look for + * a non-zero status field in the buffer entries. (The status + * field is set by the 16C32 after completing a receive frame. + */ + StartIndex = EndIndex = info->current_rx_buf; + + for ( ;; ) { + desc = &info->rx_buf_list[EndIndex]; + desc_ex = &info->rx_buf_list_ex[EndIndex]; + + if (desc->status == 0xff) + goto Cleanup; /* current desc still in use, no frames available */ + + if (framesize == 0 && info->params.addr_filter != 0xff) + addr_field = desc_ex->virt_addr[0]; + + framesize += desc->length; + + /* Status != 0 means last buffer of frame */ + if (desc->status) + break; + + EndIndex++; + if (EndIndex == info->rx_buf_count) + EndIndex = 0; + + if (EndIndex == info->current_rx_buf) { + /* all buffers have been 'used' but none mark */ + /* the end of a frame. Reset buffers and receiver. */ + if ( info->rx_enabled ){ + spin_lock_irqsave(&info->lock,flags); + rx_start(info); + spin_unlock_irqrestore(&info->lock,flags); + } + goto Cleanup; + } + + } + + /* check status of receive frame */ + + /* frame status is byte stored after frame data + * + * 7 EOM (end of msg), 1 = last buffer of frame + * 6 Short Frame, 1 = short frame + * 5 Abort, 1 = frame aborted + * 4 Residue, 1 = last byte is partial + * 3 Overrun, 1 = overrun occurred during frame reception + * 2 CRC, 1 = CRC error detected + * + */ + status = desc->status; + + /* ignore CRC bit if not using CRC (bit is undefined) */ + /* Note:CRC is not save to data buffer */ + if (info->params.crc_type == HDLC_CRC_NONE) + status &= ~BIT2; + + if (framesize == 0 || + (addr_field != 0xff && addr_field != info->params.addr_filter)) { + /* discard 0 byte frames, this seems to occur sometime + * when remote is idling flags. + */ + rx_free_frame_buffers(info, StartIndex, EndIndex); + goto CheckAgain; + } + + if (framesize < 2) + status |= BIT6; + + if (status & (BIT6+BIT5+BIT3+BIT2)) { + /* received frame has errors, + * update counts and mark frame size as 0 + */ + if (status & BIT6) + info->icount.rxshort++; + else if (status & BIT5) + info->icount.rxabort++; + else if (status & BIT3) + info->icount.rxover++; + else + info->icount.rxcrc++; + + framesize = 0; +#if SYNCLINK_GENERIC_HDLC + { + info->netdev->stats.rx_errors++; + info->netdev->stats.rx_frame_errors++; + } +#endif + } + + if ( debug_level >= DEBUG_LEVEL_BH ) + printk("%s(%d):%s rx_get_frame() status=%04X size=%d\n", + __FILE__,__LINE__,info->device_name,status,framesize); + + if ( debug_level >= DEBUG_LEVEL_DATA ) + trace_block(info,info->rx_buf_list_ex[StartIndex].virt_addr, + min_t(int, framesize,SCABUFSIZE),0); + + if (framesize) { + if (framesize > info->max_frame_size) + info->icount.rxlong++; + else { + /* copy dma buffer(s) to contiguous intermediate buffer */ + int copy_count = framesize; + int index = StartIndex; + unsigned char *ptmp = info->tmp_rx_buf; + info->tmp_rx_buf_count = framesize; + + info->icount.rxok++; + + while(copy_count) { + int partial_count = min(copy_count,SCABUFSIZE); + memcpy( ptmp, + info->rx_buf_list_ex[index].virt_addr, + partial_count ); + ptmp += partial_count; + copy_count -= partial_count; + + if ( ++index == info->rx_buf_count ) + index = 0; + } + +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) + hdlcdev_rx(info,info->tmp_rx_buf,framesize); + else +#endif + ldisc_receive_buf(tty,info->tmp_rx_buf, + info->flag_buf, framesize); + } + } + /* Free the buffers used by this frame. */ + rx_free_frame_buffers( info, StartIndex, EndIndex ); + + ReturnCode = true; + +Cleanup: + if ( info->rx_enabled && info->rx_overflow ) { + /* Receiver is enabled, but needs to restarted due to + * rx buffer overflow. If buffers are empty, restart receiver. + */ + if (info->rx_buf_list[EndIndex].status == 0xff) { + spin_lock_irqsave(&info->lock,flags); + rx_start(info); + spin_unlock_irqrestore(&info->lock,flags); + } + } + + return ReturnCode; +} + +/* load the transmit DMA buffer with data + */ +static void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count) +{ + unsigned short copy_count; + unsigned int i = 0; + SCADESC *desc; + SCADESC_EX *desc_ex; + + if ( debug_level >= DEBUG_LEVEL_DATA ) + trace_block(info,buf, min_t(int, count,SCABUFSIZE), 1); + + /* Copy source buffer to one or more DMA buffers, starting with + * the first transmit dma buffer. + */ + for(i=0;;) + { + copy_count = min_t(unsigned short,count,SCABUFSIZE); + + desc = &info->tx_buf_list[i]; + desc_ex = &info->tx_buf_list_ex[i]; + + load_pci_memory(info, desc_ex->virt_addr,buf,copy_count); + + desc->length = copy_count; + desc->status = 0; + + buf += copy_count; + count -= copy_count; + + if (!count) + break; + + i++; + if (i >= info->tx_buf_count) + i = 0; + } + + info->tx_buf_list[i].status = 0x81; /* set EOM and EOT status */ + info->last_tx_buf = ++i; +} + +static bool register_test(SLMP_INFO *info) +{ + static unsigned char testval[] = {0x00, 0xff, 0xaa, 0x55, 0x69, 0x96}; + static unsigned int count = ARRAY_SIZE(testval); + unsigned int i; + bool rc = true; + unsigned long flags; + + spin_lock_irqsave(&info->lock,flags); + reset_port(info); + + /* assume failure */ + info->init_error = DiagStatus_AddressFailure; + + /* Write bit patterns to various registers but do it out of */ + /* sync, then read back and verify values. */ + + for (i = 0 ; i < count ; i++) { + write_reg(info, TMC, testval[i]); + write_reg(info, IDL, testval[(i+1)%count]); + write_reg(info, SA0, testval[(i+2)%count]); + write_reg(info, SA1, testval[(i+3)%count]); + + if ( (read_reg(info, TMC) != testval[i]) || + (read_reg(info, IDL) != testval[(i+1)%count]) || + (read_reg(info, SA0) != testval[(i+2)%count]) || + (read_reg(info, SA1) != testval[(i+3)%count]) ) + { + rc = false; + break; + } + } + + reset_port(info); + spin_unlock_irqrestore(&info->lock,flags); + + return rc; +} + +static bool irq_test(SLMP_INFO *info) +{ + unsigned long timeout; + unsigned long flags; + + unsigned char timer = (info->port_num & 1) ? TIMER2 : TIMER0; + + spin_lock_irqsave(&info->lock,flags); + reset_port(info); + + /* assume failure */ + info->init_error = DiagStatus_IrqFailure; + info->irq_occurred = false; + + /* setup timer0 on SCA0 to interrupt */ + + /* IER2<7..4> = timer<3..0> interrupt enables (1=enabled) */ + write_reg(info, IER2, (unsigned char)((info->port_num & 1) ? BIT6 : BIT4)); + + write_reg(info, (unsigned char)(timer + TEPR), 0); /* timer expand prescale */ + write_reg16(info, (unsigned char)(timer + TCONR), 1); /* timer constant */ + + + /* TMCS, Timer Control/Status Register + * + * 07 CMF, Compare match flag (read only) 1=match + * 06 ECMI, CMF Interrupt Enable: 1=enabled + * 05 Reserved, must be 0 + * 04 TME, Timer Enable + * 03..00 Reserved, must be 0 + * + * 0101 0000 + */ + write_reg(info, (unsigned char)(timer + TMCS), 0x50); + + spin_unlock_irqrestore(&info->lock,flags); + + timeout=100; + while( timeout-- && !info->irq_occurred ) { + msleep_interruptible(10); + } + + spin_lock_irqsave(&info->lock,flags); + reset_port(info); + spin_unlock_irqrestore(&info->lock,flags); + + return info->irq_occurred; +} + +/* initialize individual SCA device (2 ports) + */ +static bool sca_init(SLMP_INFO *info) +{ + /* set wait controller to single mem partition (low), no wait states */ + write_reg(info, PABR0, 0); /* wait controller addr boundary 0 */ + write_reg(info, PABR1, 0); /* wait controller addr boundary 1 */ + write_reg(info, WCRL, 0); /* wait controller low range */ + write_reg(info, WCRM, 0); /* wait controller mid range */ + write_reg(info, WCRH, 0); /* wait controller high range */ + + /* DPCR, DMA Priority Control + * + * 07..05 Not used, must be 0 + * 04 BRC, bus release condition: 0=all transfers complete + * 03 CCC, channel change condition: 0=every cycle + * 02..00 PR<2..0>, priority 100=round robin + * + * 00000100 = 0x04 + */ + write_reg(info, DPCR, dma_priority); + + /* DMA Master Enable, BIT7: 1=enable all channels */ + write_reg(info, DMER, 0x80); + + /* enable all interrupt classes */ + write_reg(info, IER0, 0xff); /* TxRDY,RxRDY,TxINT,RxINT (ports 0-1) */ + write_reg(info, IER1, 0xff); /* DMIB,DMIA (channels 0-3) */ + write_reg(info, IER2, 0xf0); /* TIRQ (timers 0-3) */ + + /* ITCR, interrupt control register + * 07 IPC, interrupt priority, 0=MSCI->DMA + * 06..05 IAK<1..0>, Acknowledge cycle, 00=non-ack cycle + * 04 VOS, Vector Output, 0=unmodified vector + * 03..00 Reserved, must be 0 + */ + write_reg(info, ITCR, 0); + + return true; +} + +/* initialize adapter hardware + */ +static bool init_adapter(SLMP_INFO *info) +{ + int i; + + /* Set BIT30 of Local Control Reg 0x50 to reset SCA */ + volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50); + u32 readval; + + info->misc_ctrl_value |= BIT30; + *MiscCtrl = info->misc_ctrl_value; + + /* + * Force at least 170ns delay before clearing + * reset bit. Each read from LCR takes at least + * 30ns so 10 times for 300ns to be safe. + */ + for(i=0;i<10;i++) + readval = *MiscCtrl; + + info->misc_ctrl_value &= ~BIT30; + *MiscCtrl = info->misc_ctrl_value; + + /* init control reg (all DTRs off, all clksel=input) */ + info->ctrlreg_value = 0xaa; + write_control_reg(info); + + { + volatile u32 *LCR1BRDR = (u32 *)(info->lcr_base + 0x2c); + lcr1_brdr_value &= ~(BIT5 + BIT4 + BIT3); + + switch(read_ahead_count) + { + case 16: + lcr1_brdr_value |= BIT5 + BIT4 + BIT3; + break; + case 8: + lcr1_brdr_value |= BIT5 + BIT4; + break; + case 4: + lcr1_brdr_value |= BIT5 + BIT3; + break; + case 0: + lcr1_brdr_value |= BIT5; + break; + } + + *LCR1BRDR = lcr1_brdr_value; + *MiscCtrl = misc_ctrl_value; + } + + sca_init(info->port_array[0]); + sca_init(info->port_array[2]); + + return true; +} + +/* Loopback an HDLC frame to test the hardware + * interrupt and DMA functions. + */ +static bool loopback_test(SLMP_INFO *info) +{ +#define TESTFRAMESIZE 20 + + unsigned long timeout; + u16 count = TESTFRAMESIZE; + unsigned char buf[TESTFRAMESIZE]; + bool rc = false; + unsigned long flags; + + struct tty_struct *oldtty = info->port.tty; + u32 speed = info->params.clock_speed; + + info->params.clock_speed = 3686400; + info->port.tty = NULL; + + /* assume failure */ + info->init_error = DiagStatus_DmaFailure; + + /* build and send transmit frame */ + for (count = 0; count < TESTFRAMESIZE;++count) + buf[count] = (unsigned char)count; + + memset(info->tmp_rx_buf,0,TESTFRAMESIZE); + + /* program hardware for HDLC and enabled receiver */ + spin_lock_irqsave(&info->lock,flags); + hdlc_mode(info); + enable_loopback(info,1); + rx_start(info); + info->tx_count = count; + tx_load_dma_buffer(info,buf,count); + tx_start(info); + spin_unlock_irqrestore(&info->lock,flags); + + /* wait for receive complete */ + /* Set a timeout for waiting for interrupt. */ + for ( timeout = 100; timeout; --timeout ) { + msleep_interruptible(10); + + if (rx_get_frame(info)) { + rc = true; + break; + } + } + + /* verify received frame length and contents */ + if (rc && + ( info->tmp_rx_buf_count != count || + memcmp(buf, info->tmp_rx_buf,count))) { + rc = false; + } + + spin_lock_irqsave(&info->lock,flags); + reset_adapter(info); + spin_unlock_irqrestore(&info->lock,flags); + + info->params.clock_speed = speed; + info->port.tty = oldtty; + + return rc; +} + +/* Perform diagnostics on hardware + */ +static int adapter_test( SLMP_INFO *info ) +{ + unsigned long flags; + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):Testing device %s\n", + __FILE__,__LINE__,info->device_name ); + + spin_lock_irqsave(&info->lock,flags); + init_adapter(info); + spin_unlock_irqrestore(&info->lock,flags); + + info->port_array[0]->port_count = 0; + + if ( register_test(info->port_array[0]) && + register_test(info->port_array[1])) { + + info->port_array[0]->port_count = 2; + + if ( register_test(info->port_array[2]) && + register_test(info->port_array[3]) ) + info->port_array[0]->port_count += 2; + } + else { + printk( "%s(%d):Register test failure for device %s Addr=%08lX\n", + __FILE__,__LINE__,info->device_name, (unsigned long)(info->phys_sca_base)); + return -ENODEV; + } + + if ( !irq_test(info->port_array[0]) || + !irq_test(info->port_array[1]) || + (info->port_count == 4 && !irq_test(info->port_array[2])) || + (info->port_count == 4 && !irq_test(info->port_array[3]))) { + printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n", + __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) ); + return -ENODEV; + } + + if (!loopback_test(info->port_array[0]) || + !loopback_test(info->port_array[1]) || + (info->port_count == 4 && !loopback_test(info->port_array[2])) || + (info->port_count == 4 && !loopback_test(info->port_array[3]))) { + printk( "%s(%d):DMA test failure for device %s\n", + __FILE__,__LINE__,info->device_name); + return -ENODEV; + } + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):device %s passed diagnostics\n", + __FILE__,__LINE__,info->device_name ); + + info->port_array[0]->init_error = 0; + info->port_array[1]->init_error = 0; + if ( info->port_count > 2 ) { + info->port_array[2]->init_error = 0; + info->port_array[3]->init_error = 0; + } + + return 0; +} + +/* Test the shared memory on a PCI adapter. + */ +static bool memory_test(SLMP_INFO *info) +{ + static unsigned long testval[] = { 0x0, 0x55555555, 0xaaaaaaaa, + 0x66666666, 0x99999999, 0xffffffff, 0x12345678 }; + unsigned long count = ARRAY_SIZE(testval); + unsigned long i; + unsigned long limit = SCA_MEM_SIZE/sizeof(unsigned long); + unsigned long * addr = (unsigned long *)info->memory_base; + + /* Test data lines with test pattern at one location. */ + + for ( i = 0 ; i < count ; i++ ) { + *addr = testval[i]; + if ( *addr != testval[i] ) + return false; + } + + /* Test address lines with incrementing pattern over */ + /* entire address range. */ + + for ( i = 0 ; i < limit ; i++ ) { + *addr = i * 4; + addr++; + } + + addr = (unsigned long *)info->memory_base; + + for ( i = 0 ; i < limit ; i++ ) { + if ( *addr != i * 4 ) + return false; + addr++; + } + + memset( info->memory_base, 0, SCA_MEM_SIZE ); + return true; +} + +/* Load data into PCI adapter shared memory. + * + * The PCI9050 releases control of the local bus + * after completing the current read or write operation. + * + * While the PCI9050 write FIFO not empty, the + * PCI9050 treats all of the writes as a single transaction + * and does not release the bus. This causes DMA latency problems + * at high speeds when copying large data blocks to the shared memory. + * + * This function breaks a write into multiple transations by + * interleaving a read which flushes the write FIFO and 'completes' + * the write transation. This allows any pending DMA request to gain control + * of the local bus in a timely fasion. + */ +static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count) +{ + /* A load interval of 16 allows for 4 32-bit writes at */ + /* 136ns each for a maximum latency of 542ns on the local bus.*/ + + unsigned short interval = count / sca_pci_load_interval; + unsigned short i; + + for ( i = 0 ; i < interval ; i++ ) + { + memcpy(dest, src, sca_pci_load_interval); + read_status_reg(info); + dest += sca_pci_load_interval; + src += sca_pci_load_interval; + } + + memcpy(dest, src, count % sca_pci_load_interval); +} + +static void trace_block(SLMP_INFO *info,const char* data, int count, int xmit) +{ + int i; + int linecount; + if (xmit) + printk("%s tx data:\n",info->device_name); + else + printk("%s rx data:\n",info->device_name); + + while(count) { + if (count > 16) + linecount = 16; + else + linecount = count; + + for(i=0;i<linecount;i++) + printk("%02X ",(unsigned char)data[i]); + for(;i<17;i++) + printk(" "); + for(i=0;i<linecount;i++) { + if (data[i]>=040 && data[i]<=0176) + printk("%c",data[i]); + else + printk("."); + } + printk("\n"); + + data += linecount; + count -= linecount; + } +} /* end of trace_block() */ + +/* called when HDLC frame times out + * update stats and do tx completion processing + */ +static void tx_timeout(unsigned long context) +{ + SLMP_INFO *info = (SLMP_INFO*)context; + unsigned long flags; + + if ( debug_level >= DEBUG_LEVEL_INFO ) + printk( "%s(%d):%s tx_timeout()\n", + __FILE__,__LINE__,info->device_name); + if(info->tx_active && info->params.mode == MGSL_MODE_HDLC) { + info->icount.txtimeout++; + } + spin_lock_irqsave(&info->lock,flags); + info->tx_active = false; + info->tx_count = info->tx_put = info->tx_get = 0; + + spin_unlock_irqrestore(&info->lock,flags); + +#if SYNCLINK_GENERIC_HDLC + if (info->netcount) + hdlcdev_tx_done(info); + else +#endif + bh_transmit(info); +} + +/* called to periodically check the DSR/RI modem signal input status + */ +static void status_timeout(unsigned long context) +{ + u16 status = 0; + SLMP_INFO *info = (SLMP_INFO*)context; + unsigned long flags; + unsigned char delta; + + + spin_lock_irqsave(&info->lock,flags); + get_signals(info); + spin_unlock_irqrestore(&info->lock,flags); + + /* check for DSR/RI state change */ + + delta = info->old_signals ^ info->serial_signals; + info->old_signals = info->serial_signals; + + if (delta & SerialSignal_DSR) + status |= MISCSTATUS_DSR_LATCHED|(info->serial_signals&SerialSignal_DSR); + + if (delta & SerialSignal_RI) + status |= MISCSTATUS_RI_LATCHED|(info->serial_signals&SerialSignal_RI); + + if (delta & SerialSignal_DCD) + status |= MISCSTATUS_DCD_LATCHED|(info->serial_signals&SerialSignal_DCD); + + if (delta & SerialSignal_CTS) + status |= MISCSTATUS_CTS_LATCHED|(info->serial_signals&SerialSignal_CTS); + + if (status) + isr_io_pin(info,status); + + mod_timer(&info->status_timer, jiffies + msecs_to_jiffies(10)); +} + + +/* Register Access Routines - + * All registers are memory mapped + */ +#define CALC_REGADDR() \ + unsigned char * RegAddr = (unsigned char*)(info->sca_base + Addr); \ + if (info->port_num > 1) \ + RegAddr += 256; /* port 0-1 SCA0, 2-3 SCA1 */ \ + if ( info->port_num & 1) { \ + if (Addr > 0x7f) \ + RegAddr += 0x40; /* DMA access */ \ + else if (Addr > 0x1f && Addr < 0x60) \ + RegAddr += 0x20; /* MSCI access */ \ + } + + +static unsigned char read_reg(SLMP_INFO * info, unsigned char Addr) +{ + CALC_REGADDR(); + return *RegAddr; +} +static void write_reg(SLMP_INFO * info, unsigned char Addr, unsigned char Value) +{ + CALC_REGADDR(); + *RegAddr = Value; +} + +static u16 read_reg16(SLMP_INFO * info, unsigned char Addr) +{ + CALC_REGADDR(); + return *((u16 *)RegAddr); +} + +static void write_reg16(SLMP_INFO * info, unsigned char Addr, u16 Value) +{ + CALC_REGADDR(); + *((u16 *)RegAddr) = Value; +} + +static unsigned char read_status_reg(SLMP_INFO * info) +{ + unsigned char *RegAddr = (unsigned char *)info->statctrl_base; + return *RegAddr; +} + +static void write_control_reg(SLMP_INFO * info) +{ + unsigned char *RegAddr = (unsigned char *)info->statctrl_base; + *RegAddr = info->port_array[0]->ctrlreg_value; +} + + +static int __devinit synclinkmp_init_one (struct pci_dev *dev, + const struct pci_device_id *ent) +{ + if (pci_enable_device(dev)) { + printk("error enabling pci device %p\n", dev); + return -EIO; + } + device_init( ++synclinkmp_adapter_count, dev ); + return 0; +} + +static void __devexit synclinkmp_remove_one (struct pci_dev *dev) +{ +} diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index c556ed9db13..81f13958e75 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -46,7 +46,7 @@ #include <asm/irq_regs.h> /* Whether we react on sysrq keys or just ignore them */ -static int __read_mostly sysrq_enabled = 1; +static int __read_mostly sysrq_enabled = SYSRQ_DEFAULT_ENABLE; static bool __read_mostly sysrq_always_enabled; static bool sysrq_on(void) @@ -571,6 +571,7 @@ struct sysrq_state { unsigned int alt_use; bool active; bool need_reinject; + bool reinjecting; }; static void sysrq_reinject_alt_sysrq(struct work_struct *work) @@ -581,6 +582,10 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work) unsigned int alt_code = sysrq->alt_use; if (sysrq->need_reinject) { + /* we do not want the assignment to be reordered */ + sysrq->reinjecting = true; + mb(); + /* Simulate press and release of Alt + SysRq */ input_inject_event(handle, EV_KEY, alt_code, 1); input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1); @@ -589,6 +594,9 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work) input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0); input_inject_event(handle, EV_KEY, alt_code, 0); input_inject_event(handle, EV_SYN, SYN_REPORT, 1); + + mb(); + sysrq->reinjecting = false; } } @@ -599,6 +607,13 @@ static bool sysrq_filter(struct input_handle *handle, bool was_active = sysrq->active; bool suppress; + /* + * Do not filter anything if we are in the process of re-injecting + * Alt+SysRq combination. + */ + if (sysrq->reinjecting) + return false; + switch (type) { case EV_SYN: @@ -629,7 +644,7 @@ static bool sysrq_filter(struct input_handle *handle, sysrq->alt_use = sysrq->alt; /* * If nothing else will be pressed we'll need - * to * re-inject Alt-SysRq keysroke. + * to re-inject Alt-SysRq keysroke. */ sysrq->need_reinject = true; } diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c index f64582b0f62..7c586692062 100644 --- a/drivers/tty/tty_audit.c +++ b/drivers/tty/tty_audit.c @@ -95,8 +95,10 @@ static void tty_audit_buf_push(struct task_struct *tsk, uid_t loginuid, { if (buf->valid == 0) return; - if (audit_enabled == 0) + if (audit_enabled == 0) { + buf->valid = 0; return; + } tty_audit_log("tty", tsk, loginuid, sessionid, buf->major, buf->minor, buf->data, buf->valid); buf->valid = 0; diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 464d09d9787..936a4ead6c2 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -90,7 +90,6 @@ #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/smp_lock.h> #include <linux/device.h> #include <linux/wait.h> #include <linux/bitops.h> @@ -2465,12 +2464,12 @@ out: * Locking: none (up to the driver) */ -static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p) +static int tty_tiocmget(struct tty_struct *tty, int __user *p) { int retval = -EINVAL; if (tty->ops->tiocmget) { - retval = tty->ops->tiocmget(tty, file); + retval = tty->ops->tiocmget(tty); if (retval >= 0) retval = put_user(retval, p); @@ -2481,7 +2480,6 @@ static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p /** * tty_tiocmset - set modem status * @tty: tty device - * @file: user file pointer * @cmd: command - clear bits, set bits or set all * @p: pointer to desired bits * @@ -2491,7 +2489,7 @@ static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p * Locking: none (up to the driver) */ -static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd, +static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd, unsigned __user *p) { int retval; @@ -2518,7 +2516,7 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int } set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP; clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP; - return tty->ops->tiocmset(tty, file, set, clear); + return tty->ops->tiocmset(tty, set, clear); } static int tty_tiocgicount(struct tty_struct *tty, void __user *arg) @@ -2627,6 +2625,11 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return put_user(tty->ldisc->ops->num, (int __user *)p); case TIOCSETD: return tiocsetd(tty, p); + case TIOCVHANGUP: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + tty_vhangup(tty); + return 0; case TIOCGDEV: { unsigned int ret = new_encode_dev(tty_devnum(real_tty)); @@ -2655,11 +2658,11 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return send_break(tty, arg ? arg*100 : 250); case TIOCMGET: - return tty_tiocmget(tty, file, p); + return tty_tiocmget(tty, p); case TIOCMSET: case TIOCMBIC: case TIOCMBIS: - return tty_tiocmset(tty, file, cmd, p); + return tty_tiocmset(tty, cmd, p); case TIOCGICOUNT: retval = tty_tiocgicount(tty, p); /* For the moment allow fall through to the old method */ @@ -2677,7 +2680,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } if (tty->ops->ioctl) { - retval = (tty->ops->ioctl)(tty, file, cmd, arg); + retval = (tty->ops->ioctl)(tty, cmd, arg); if (retval != -ENOIOCTLCMD) return retval; } @@ -2705,7 +2708,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd, return -EINVAL; if (tty->ops->compat_ioctl) { - retval = (tty->ops->compat_ioctl)(tty, file, cmd, arg); + retval = (tty->ops->compat_ioctl)(tty, cmd, arg); if (retval != -ENOIOCTLCMD) return retval; } @@ -3256,8 +3259,8 @@ static ssize_t show_cons_active(struct device *dev, struct console *c; ssize_t count = 0; - acquire_console_sem(); - for (c = console_drivers; c; c = c->next) { + console_lock(); + for_each_console(c) { if (!c->device) continue; if (!c->write) @@ -3271,7 +3274,7 @@ static ssize_t show_cons_active(struct device *dev, while (i--) count += sprintf(buf + count, "%s%d%c", cs[i]->name, cs[i]->index, i ? ' ':'\n'); - release_console_sem(); + console_unlock(); return count; } @@ -3306,7 +3309,7 @@ int __init tty_init(void) if (IS_ERR(consdev)) consdev = NULL; else - device_create_file(consdev, &dev_attr_active); + WARN_ON(device_create_file(consdev, &dev_attr_active) < 0); #ifdef CONFIG_VT vty_init(&console_fops); diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index 0c188997145..1a1135d580a 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -486,7 +486,7 @@ int tty_termios_hw_change(struct ktermios *a, struct ktermios *b) EXPORT_SYMBOL(tty_termios_hw_change); /** - * change_termios - update termios values + * tty_set_termios - update termios values * @tty: tty to update * @new_termios: desired new value * @@ -497,7 +497,7 @@ EXPORT_SYMBOL(tty_termios_hw_change); * Locking: termios_mutex */ -static void change_termios(struct tty_struct *tty, struct ktermios *new_termios) +int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) { struct ktermios old_termios; struct tty_ldisc *ld; @@ -553,7 +553,9 @@ static void change_termios(struct tty_struct *tty, struct ktermios *new_termios) tty_ldisc_deref(ld); } mutex_unlock(&tty->termios_mutex); + return 0; } +EXPORT_SYMBOL_GPL(tty_set_termios); /** * set_termios - set termios values for a tty @@ -562,7 +564,7 @@ static void change_termios(struct tty_struct *tty, struct ktermios *new_termios) * @opt: option information * * Helper function to prepare termios data and run necessary other - * functions before using change_termios to do the actual changes. + * functions before using tty_set_termios to do the actual changes. * * Locking: * Called functions take ldisc and termios_mutex locks @@ -620,7 +622,7 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt) return -EINTR; } - change_termios(tty, &tmp_termios); + tty_set_termios(tty, &tmp_termios); /* FIXME: Arguably if tmp_termios == tty->termios AND the actual requested termios was not tmp_termios then we may @@ -797,7 +799,7 @@ static int set_sgttyb(struct tty_struct *tty, struct sgttyb __user *sgttyb) termios.c_ospeed); #endif mutex_unlock(&tty->termios_mutex); - change_termios(tty, &termios); + tty_set_termios(tty, &termios); return 0; } #endif @@ -951,6 +953,8 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file, int ret = 0; struct ktermios kterm; + BUG_ON(file == NULL); + if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_MASTER) real_tty = tty->link; diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 4214d58276f..0fc564a9770 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -34,8 +34,6 @@ #include <linux/vt_kern.h> #include <linux/selection.h> -#include <linux/smp_lock.h> /* For the moment */ - #include <linux/kmod.h> #include <linux/nsproxy.h> @@ -535,6 +533,19 @@ static int tty_ldisc_halt(struct tty_struct *tty) } /** + * tty_ldisc_flush_works - flush all works of a tty + * @tty: tty device to flush works for + * + * Sync flush all works belonging to @tty. + */ +static void tty_ldisc_flush_works(struct tty_struct *tty) +{ + flush_work_sync(&tty->hangup_work); + flush_work_sync(&tty->SAK_work); + flush_delayed_work_sync(&tty->buf.work); +} + +/** * tty_ldisc_wait_idle - wait for the ldisc to become idle * @tty: tty to wait for * @@ -653,7 +664,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) mutex_unlock(&tty->ldisc_mutex); - flush_scheduled_work(); + tty_ldisc_flush_works(tty); retval = tty_ldisc_wait_idle(tty); @@ -905,7 +916,7 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty) tty_unlock(); tty_ldisc_halt(tty); - flush_scheduled_work(); + tty_ldisc_flush_works(tty); tty_lock(); mutex_lock(&tty->ldisc_mutex); diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index e95d7876ca6..6dd3c68c13a 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -654,7 +654,8 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag) if (value >= ARRAY_SIZE(fn_handler)) return; if ((kbd->kbdmode == VC_RAW || - kbd->kbdmode == VC_MEDIUMRAW) && + kbd->kbdmode == VC_MEDIUMRAW || + kbd->kbdmode == VC_OFF) && value != KVAL(K_SAK)) return; /* SAK is allowed even in raw mode */ fn_handler[value](vc); @@ -1295,7 +1296,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw) if (rc == NOTIFY_STOP) return; - if (raw_mode && type != KT_SPEC && type != KT_SHIFT) + if ((raw_mode || kbd->kbdmode == VC_OFF) && type != KT_SPEC && type != KT_SHIFT) return; (*k_handler[type])(vc, keysym & 0xff, !down); diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c index ebae344ce91..adf0ad2a885 100644 --- a/drivers/tty/vt/selection.c +++ b/drivers/tty/vt/selection.c @@ -26,7 +26,6 @@ #include <linux/selection.h> #include <linux/tiocl.h> #include <linux/console.h> -#include <linux/smp_lock.h> /* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */ #define isspace(c) ((c) == ' ') @@ -316,9 +315,9 @@ int paste_selection(struct tty_struct *tty) /* always called with BTM from vt_ioctl */ WARN_ON(!tty_locked()); - acquire_console_sem(); + console_lock(); poke_blanked_console(); - release_console_sem(); + console_unlock(); ld = tty_ldisc_ref(tty); if (!ld) { diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index eab3a1ff99e..1564261e80c 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -28,13 +28,11 @@ #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/init.h> -#include <linux/mutex.h> #include <linux/vt_kern.h> #include <linux/selection.h> #include <linux/kbd_kern.h> #include <linux/console.h> #include <linux/device.h> -#include <linux/smp_lock.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/poll.h> @@ -51,6 +49,8 @@ #undef addr #define HEADER_SIZE 4 +#define CON_BUF_SIZE (CONFIG_BASE_SMALL ? 256 : PAGE_SIZE) + struct vcs_poll_data { struct notifier_block notifier; unsigned int cons_num; @@ -131,21 +131,45 @@ vcs_poll_data_get(struct file *file) return poll; } +/* + * Returns VC for inode. + * Must be called with console_lock. + */ +static struct vc_data* +vcs_vc(struct inode *inode, int *viewed) +{ + unsigned int currcons = iminor(inode) & 127; + + WARN_CONSOLE_UNLOCKED(); + + if (currcons == 0) { + currcons = fg_console; + if (viewed) + *viewed = 1; + } else { + currcons--; + if (viewed) + *viewed = 0; + } + return vc_cons[currcons].d; +} + +/* + * Returns size for VC carried by inode. + * Must be called with console_lock. + */ static int vcs_size(struct inode *inode) { int size; int minor = iminor(inode); - int currcons = minor & 127; struct vc_data *vc; - if (currcons == 0) - currcons = fg_console; - else - currcons--; - if (!vc_cons_allocated(currcons)) + WARN_CONSOLE_UNLOCKED(); + + vc = vcs_vc(inode, NULL); + if (!vc) return -ENXIO; - vc = vc_cons[currcons].d; size = vc->vc_rows * vc->vc_cols; @@ -158,11 +182,13 @@ static loff_t vcs_lseek(struct file *file, loff_t offset, int orig) { int size; - mutex_lock(&con_buf_mtx); + console_lock(); size = vcs_size(file->f_path.dentry->d_inode); + console_unlock(); + if (size < 0) + return size; switch (orig) { default: - mutex_unlock(&con_buf_mtx); return -EINVAL; case 2: offset += size; @@ -173,11 +199,9 @@ static loff_t vcs_lseek(struct file *file, loff_t offset, int orig) break; } if (offset < 0 || offset > size) { - mutex_unlock(&con_buf_mtx); return -EINVAL; } file->f_pos = offset; - mutex_unlock(&con_buf_mtx); return file->f_pos; } @@ -190,33 +214,28 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) struct vc_data *vc; struct vcs_poll_data *poll; long pos; - long viewed, attr, read; - int col, maxcol; + long attr, read; + int col, maxcol, viewed; unsigned short *org = NULL; ssize_t ret; + char *con_buf; - mutex_lock(&con_buf_mtx); + con_buf = (char *) __get_free_page(GFP_KERNEL); + if (!con_buf) + return -ENOMEM; pos = *ppos; /* Select the proper current console and verify * sanity of the situation under the console lock. */ - acquire_console_sem(); + console_lock(); attr = (currcons & 128); - currcons = (currcons & 127); - if (currcons == 0) { - currcons = fg_console; - viewed = 1; - } else { - currcons--; - viewed = 0; - } ret = -ENXIO; - if (!vc_cons_allocated(currcons)) + vc = vcs_vc(inode, &viewed); + if (!vc) goto unlock_out; - vc = vc_cons[currcons].d; ret = -EINVAL; if (pos < 0) @@ -237,6 +256,12 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) * could sleep. */ size = vcs_size(inode); + if (size < 0) { + if (read) + break; + ret = size; + goto unlock_out; + } if (pos >= size) break; if (count > size - pos) @@ -336,9 +361,9 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) * the pagefault handling code may want to call printk(). */ - release_console_sem(); + console_unlock(); ret = copy_to_user(buf, con_buf_start, orig_count); - acquire_console_sem(); + console_lock(); if (ret) { read += (orig_count - ret); @@ -354,8 +379,8 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) if (read) ret = read; unlock_out: - release_console_sem(); - mutex_unlock(&con_buf_mtx); + console_unlock(); + free_page((unsigned long) con_buf); return ret; } @@ -366,35 +391,29 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) unsigned int currcons = iminor(inode); struct vc_data *vc; long pos; - long viewed, attr, size, written; + long attr, size, written; char *con_buf0; - int col, maxcol; + int col, maxcol, viewed; u16 *org0 = NULL, *org = NULL; size_t ret; + char *con_buf; - mutex_lock(&con_buf_mtx); + con_buf = (char *) __get_free_page(GFP_KERNEL); + if (!con_buf) + return -ENOMEM; pos = *ppos; /* Select the proper current console and verify * sanity of the situation under the console lock. */ - acquire_console_sem(); + console_lock(); attr = (currcons & 128); - currcons = (currcons & 127); - - if (currcons == 0) { - currcons = fg_console; - viewed = 1; - } else { - currcons--; - viewed = 0; - } ret = -ENXIO; - if (!vc_cons_allocated(currcons)) + vc = vcs_vc(inode, &viewed); + if (!vc) goto unlock_out; - vc = vc_cons[currcons].d; size = vcs_size(inode); ret = -EINVAL; @@ -414,9 +433,9 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) /* Temporarily drop the console lock so that we can read * in the write data from userspace safely. */ - release_console_sem(); + console_unlock(); ret = copy_from_user(con_buf, buf, this_round); - acquire_console_sem(); + console_lock(); if (ret) { this_round -= ret; @@ -436,6 +455,12 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) * Return data written up to now on failure. */ size = vcs_size(inode); + if (size < 0) { + if (written) + break; + ret = size; + goto unlock_out; + } if (pos >= size) break; if (this_round > size - pos) @@ -542,10 +567,8 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) vcs_scr_updated(vc); unlock_out: - release_console_sem(); - - mutex_unlock(&con_buf_mtx); - + console_unlock(); + free_page((unsigned long) con_buf); return ret; } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 76407eca9ab..c83cdfb56fc 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -89,7 +89,6 @@ #include <linux/mutex.h> #include <linux/vt_kern.h> #include <linux/selection.h> -#include <linux/smp_lock.h> #include <linux/tiocl.h> #include <linux/kbd_kern.h> #include <linux/consolemap.h> @@ -1003,9 +1002,9 @@ static int vt_resize(struct tty_struct *tty, struct winsize *ws) struct vc_data *vc = tty->driver_data; int ret; - acquire_console_sem(); + console_lock(); ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row); - release_console_sem(); + console_unlock(); return ret; } @@ -1271,7 +1270,7 @@ static void default_attr(struct vc_data *vc) vc->vc_color = vc->vc_def_color; } -/* console_sem is held */ +/* console_lock is held */ static void csi_m(struct vc_data *vc) { int i; @@ -1415,7 +1414,7 @@ int mouse_reporting(void) return vc_cons[fg_console].d->vc_report_mouse; } -/* console_sem is held */ +/* console_lock is held */ static void set_mode(struct vc_data *vc, int on_off) { int i; @@ -1485,7 +1484,7 @@ static void set_mode(struct vc_data *vc, int on_off) } } -/* console_sem is held */ +/* console_lock is held */ static void setterm_command(struct vc_data *vc) { switch(vc->vc_par[0]) { @@ -1545,7 +1544,7 @@ static void setterm_command(struct vc_data *vc) } } -/* console_sem is held */ +/* console_lock is held */ static void csi_at(struct vc_data *vc, unsigned int nr) { if (nr > vc->vc_cols - vc->vc_x) @@ -1555,7 +1554,7 @@ static void csi_at(struct vc_data *vc, unsigned int nr) insert_char(vc, nr); } -/* console_sem is held */ +/* console_lock is held */ static void csi_L(struct vc_data *vc, unsigned int nr) { if (nr > vc->vc_rows - vc->vc_y) @@ -1566,7 +1565,7 @@ static void csi_L(struct vc_data *vc, unsigned int nr) vc->vc_need_wrap = 0; } -/* console_sem is held */ +/* console_lock is held */ static void csi_P(struct vc_data *vc, unsigned int nr) { if (nr > vc->vc_cols - vc->vc_x) @@ -1576,7 +1575,7 @@ static void csi_P(struct vc_data *vc, unsigned int nr) delete_char(vc, nr); } -/* console_sem is held */ +/* console_lock is held */ static void csi_M(struct vc_data *vc, unsigned int nr) { if (nr > vc->vc_rows - vc->vc_y) @@ -1587,7 +1586,7 @@ static void csi_M(struct vc_data *vc, unsigned int nr) vc->vc_need_wrap = 0; } -/* console_sem is held (except via vc_init->reset_terminal */ +/* console_lock is held (except via vc_init->reset_terminal */ static void save_cur(struct vc_data *vc) { vc->vc_saved_x = vc->vc_x; @@ -1603,7 +1602,7 @@ static void save_cur(struct vc_data *vc) vc->vc_saved_G1 = vc->vc_G1_charset; } -/* console_sem is held */ +/* console_lock is held */ static void restore_cur(struct vc_data *vc) { gotoxy(vc, vc->vc_saved_x, vc->vc_saved_y); @@ -1625,7 +1624,7 @@ enum { ESnormal, ESesc, ESsquare, ESgetpars, ESgotpars, ESfunckey, EShash, ESsetG0, ESsetG1, ESpercent, ESignore, ESnonstd, ESpalette }; -/* console_sem is held (except via vc_init()) */ +/* console_lock is held (except via vc_init()) */ static void reset_terminal(struct vc_data *vc, int do_clear) { vc->vc_top = 0; @@ -1685,7 +1684,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear) csi_J(vc, 2); } -/* console_sem is held */ +/* console_lock is held */ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) { /* @@ -2068,18 +2067,6 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) } } -/* This is a temporary buffer used to prepare a tty console write - * so that we can easily avoid touching user space while holding the - * console spinlock. It is allocated in con_init and is shared by - * this code and the vc_screen read/write tty calls. - * - * We have to allocate this statically in the kernel data section - * since console_init (and thus con_init) are called before any - * kernel memory allocation is available. - */ -char con_buf[CON_BUF_SIZE]; -DEFINE_MUTEX(con_buf_mtx); - /* is_double_width() is based on the wcwidth() implementation by * Markus Kuhn -- 2007-05-26 (Unicode 5.0) * Latest version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c @@ -2119,7 +2106,7 @@ static int is_double_width(uint32_t ucs) return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1); } -/* acquires console_sem */ +/* acquires console_lock */ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count) { #ifdef VT_BUF_VRAM_ONLY @@ -2147,20 +2134,20 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co might_sleep(); - acquire_console_sem(); + console_lock(); vc = tty->driver_data; if (vc == NULL) { printk(KERN_ERR "vt: argh, driver_data is NULL !\n"); - release_console_sem(); + console_unlock(); return 0; } currcons = vc->vc_num; if (!vc_cons_allocated(currcons)) { - /* could this happen? */ - printk_once("con_write: tty %d not allocated\n", currcons+1); - release_console_sem(); - return 0; + /* could this happen? */ + pr_warn_once("con_write: tty %d not allocated\n", currcons+1); + console_unlock(); + return 0; } himask = vc->vc_hi_font_mask; @@ -2375,7 +2362,7 @@ rescan_last_byte: } FLUSH console_conditional_schedule(); - release_console_sem(); + console_unlock(); notify_update(vc); return n; #undef FLUSH @@ -2388,11 +2375,11 @@ rescan_last_byte: * us to do the switches asynchronously (needed when we want * to switch due to a keyboard interrupt). Synchronization * with other console code and prevention of re-entrancy is - * ensured with console_sem. + * ensured with console_lock. */ static void console_callback(struct work_struct *ignored) { - acquire_console_sem(); + console_lock(); if (want_console >= 0) { if (want_console != fg_console && @@ -2422,7 +2409,7 @@ static void console_callback(struct work_struct *ignored) } notify_update(vc_cons[fg_console].d); - release_console_sem(); + console_unlock(); } int set_console(int nr) @@ -2603,7 +2590,7 @@ static struct console vt_console_driver = { */ /* - * Generally a bit racy with respect to console_sem(). + * Generally a bit racy with respect to console_lock();. * * There are some functions which don't need it. * @@ -2629,17 +2616,17 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) switch (type) { case TIOCL_SETSEL: - acquire_console_sem(); + console_lock(); ret = set_selection((struct tiocl_selection __user *)(p+1), tty); - release_console_sem(); + console_unlock(); break; case TIOCL_PASTESEL: ret = paste_selection(tty); break; case TIOCL_UNBLANKSCREEN: - acquire_console_sem(); + console_lock(); unblank_screen(); - release_console_sem(); + console_unlock(); break; case TIOCL_SELLOADLUT: ret = sel_loadlut(p); @@ -2688,10 +2675,10 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) } break; case TIOCL_BLANKSCREEN: /* until explicitly unblanked, not only poked */ - acquire_console_sem(); + console_lock(); ignore_poke = 1; do_blank_screen(0); - release_console_sem(); + console_unlock(); break; case TIOCL_BLANKEDSCREEN: ret = console_blanked; @@ -2790,11 +2777,11 @@ static void con_flush_chars(struct tty_struct *tty) return; /* if we race with con_close(), vt may be null */ - acquire_console_sem(); + console_lock(); vc = tty->driver_data; if (vc) set_cursor(vc); - release_console_sem(); + console_unlock(); } /* @@ -2805,7 +2792,7 @@ static int con_open(struct tty_struct *tty, struct file *filp) unsigned int currcons = tty->index; int ret = 0; - acquire_console_sem(); + console_lock(); if (tty->driver_data == NULL) { ret = vc_allocate(currcons); if (ret == 0) { @@ -2813,7 +2800,7 @@ static int con_open(struct tty_struct *tty, struct file *filp) /* Still being freed */ if (vc->port.tty) { - release_console_sem(); + console_unlock(); return -ERESTARTSYS; } tty->driver_data = vc; @@ -2827,11 +2814,11 @@ static int con_open(struct tty_struct *tty, struct file *filp) tty->termios->c_iflag |= IUTF8; else tty->termios->c_iflag &= ~IUTF8; - release_console_sem(); + console_unlock(); return ret; } } - release_console_sem(); + console_unlock(); return ret; } @@ -2844,9 +2831,9 @@ static void con_shutdown(struct tty_struct *tty) { struct vc_data *vc = tty->driver_data; BUG_ON(vc == NULL); - acquire_console_sem(); + console_lock(); vc->port.tty = NULL; - release_console_sem(); + console_unlock(); tty_shutdown(tty); } @@ -2893,13 +2880,13 @@ static int __init con_init(void) struct vc_data *vc; unsigned int currcons = 0, i; - acquire_console_sem(); + console_lock(); if (conswitchp) display_desc = conswitchp->con_startup(); if (!display_desc) { fg_console = 0; - release_console_sem(); + console_unlock(); return 0; } @@ -2940,13 +2927,13 @@ static int __init con_init(void) gotoxy(vc, vc->vc_x, vc->vc_y); csi_J(vc, 0); update_screen(vc); - printk("Console: %s %s %dx%d", + pr_info("Console: %s %s %dx%d", vc->vc_can_do_color ? "colour" : "mono", display_desc, vc->vc_cols, vc->vc_rows); printable = 1; printk("\n"); - release_console_sem(); + console_unlock(); #ifdef CONFIG_VT_CONSOLE register_console(&vt_console_driver); @@ -2994,7 +2981,7 @@ int __init vty_init(const struct file_operations *console_fops) if (IS_ERR(tty0dev)) tty0dev = NULL; else - device_create_file(tty0dev, &dev_attr_active); + WARN_ON(device_create_file(tty0dev, &dev_attr_active) < 0); vcs_init(); @@ -3037,7 +3024,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last, if (!try_module_get(owner)) return -ENODEV; - acquire_console_sem(); + console_lock(); /* check if driver is registered */ for (i = 0; i < MAX_NR_CON_DRIVER; i++) { @@ -3103,7 +3090,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last, clear_buffer_attributes(vc); } - printk("Console: switching "); + pr_info("Console: switching "); if (!deflt) printk("consoles %d-%d ", first+1, last+1); if (j >= 0) { @@ -3122,7 +3109,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last, retval = 0; err: - release_console_sem(); + console_unlock(); module_put(owner); return retval; }; @@ -3171,7 +3158,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) if (!try_module_get(owner)) return -ENODEV; - acquire_console_sem(); + console_lock(); /* check if driver is registered and if it is unbindable */ for (i = 0; i < MAX_NR_CON_DRIVER; i++) { @@ -3185,7 +3172,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) } if (retval) { - release_console_sem(); + console_unlock(); goto err; } @@ -3204,12 +3191,12 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) } if (retval) { - release_console_sem(); + console_unlock(); goto err; } if (!con_is_bound(csw)) { - release_console_sem(); + console_unlock(); goto err; } @@ -3238,7 +3225,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) if (!con_is_bound(csw)) con_driver->flag &= ~CON_DRIVER_FLAG_INIT; - release_console_sem(); + console_unlock(); /* ignore return value, binding should not fail */ bind_con_driver(defcsw, first, last, deflt); err: @@ -3538,14 +3525,14 @@ int register_con_driver(const struct consw *csw, int first, int last) if (!try_module_get(owner)) return -ENODEV; - acquire_console_sem(); + console_lock(); for (i = 0; i < MAX_NR_CON_DRIVER; i++) { con_driver = ®istered_con_driver[i]; /* already registered */ if (con_driver->con == csw) - retval = -EINVAL; + retval = -EBUSY; } if (retval) @@ -3592,7 +3579,7 @@ int register_con_driver(const struct consw *csw, int first, int last) } err: - release_console_sem(); + console_unlock(); module_put(owner); return retval; } @@ -3613,7 +3600,7 @@ int unregister_con_driver(const struct consw *csw) { int i, retval = -ENODEV; - acquire_console_sem(); + console_lock(); /* cannot unregister a bound driver */ if (con_is_bound(csw)) @@ -3639,7 +3626,7 @@ int unregister_con_driver(const struct consw *csw) } } err: - release_console_sem(); + console_unlock(); return retval; } EXPORT_SYMBOL(unregister_con_driver); @@ -3656,7 +3643,12 @@ int take_over_console(const struct consw *csw, int first, int last, int deflt) int err; err = register_con_driver(csw, first, last); - + /* if we get an busy error we still want to bind the console driver + * and return success, as we may have unbound the console driver + * but not unregistered it. + */ + if (err == -EBUSY) + err = 0; if (!err) bind_con_driver(csw, first, last, deflt); @@ -3804,7 +3796,8 @@ void do_unblank_screen(int leaving_gfx) return; if (!vc_cons_allocated(fg_console)) { /* impossible */ - printk("unblank_screen: tty %d not allocated ??\n", fg_console+1); + pr_warning("unblank_screen: tty %d not allocated ??\n", + fg_console+1); return; } vc = vc_cons[fg_console].d; @@ -3934,9 +3927,9 @@ int con_set_cmap(unsigned char __user *arg) { int rc; - acquire_console_sem(); + console_lock(); rc = set_get_cmap (arg,1); - release_console_sem(); + console_unlock(); return rc; } @@ -3945,9 +3938,9 @@ int con_get_cmap(unsigned char __user *arg) { int rc; - acquire_console_sem(); + console_lock(); rc = set_get_cmap (arg,0); - release_console_sem(); + console_unlock(); return rc; } @@ -3994,12 +3987,12 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op) } else font.data = NULL; - acquire_console_sem(); + console_lock(); if (vc->vc_sw->con_font_get) rc = vc->vc_sw->con_font_get(vc, &font); else rc = -ENOSYS; - release_console_sem(); + console_unlock(); if (rc) goto out; @@ -4076,12 +4069,12 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op) font.data = memdup_user(op->data, size); if (IS_ERR(font.data)) return PTR_ERR(font.data); - acquire_console_sem(); + console_lock(); if (vc->vc_sw->con_font_set) rc = vc->vc_sw->con_font_set(vc, &font, op->flags); else rc = -ENOSYS; - release_console_sem(); + console_unlock(); kfree(font.data); return rc; } @@ -4103,12 +4096,12 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op) else name[MAX_FONT_NAME - 1] = 0; - acquire_console_sem(); + console_lock(); if (vc->vc_sw->con_font_default) rc = vc->vc_sw->con_font_default(vc, &font, s); else rc = -ENOSYS; - release_console_sem(); + console_unlock(); if (!rc) { op->width = font.width; op->height = font.height; @@ -4124,7 +4117,7 @@ static int con_font_copy(struct vc_data *vc, struct console_font_op *op) if (vc->vc_mode != KD_TEXT) return -EINVAL; - acquire_console_sem(); + console_lock(); if (!vc->vc_sw->con_font_copy) rc = -ENOSYS; else if (con < 0 || !vc_cons_allocated(con)) @@ -4133,7 +4126,7 @@ static int con_font_copy(struct vc_data *vc, struct console_font_op *op) rc = 0; else rc = vc->vc_sw->con_font_copy(vc, con); - release_console_sem(); + console_unlock(); return rc; } diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 6b68a0fb461..937d1721998 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -27,7 +27,6 @@ #include <linux/console.h> #include <linux/consolemap.h> #include <linux/signal.h> -#include <linux/smp_lock.h> #include <linux/timex.h> #include <asm/io.h> @@ -495,7 +494,7 @@ do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, int perm, struct vc_ * We handle the console-specific ioctl's here. We allow the * capability to modify any console, not just the fg_console. */ -int vt_ioctl(struct tty_struct *tty, struct file * file, +int vt_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct vc_data *vc = tty->driver_data; @@ -649,12 +648,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, /* * explicitly blank/unblank the screen if switching modes */ - acquire_console_sem(); + console_lock(); if (arg == KD_TEXT) do_unblank_screen(1); else do_blank_screen(1); - release_console_sem(); + console_unlock(); break; case KDGETMODE: @@ -688,6 +687,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, kbd->kbdmode = VC_UNICODE; compute_shiftstate(); break; + case K_OFF: + kbd->kbdmode = VC_OFF; + break; default: ret = -EINVAL; goto out; @@ -893,7 +895,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, ret = -EINVAL; goto out; } - acquire_console_sem(); + console_lock(); vc->vt_mode = tmp; /* the frsig is ignored, so we set it to 0 */ vc->vt_mode.frsig = 0; @@ -901,7 +903,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, vc->vt_pid = get_pid(task_pid(current)); /* no switch is required -- saw@shade.msu.ru */ vc->vt_newvt = -1; - release_console_sem(); + console_unlock(); break; } @@ -910,9 +912,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, struct vt_mode tmp; int rc; - acquire_console_sem(); + console_lock(); memcpy(&tmp, &vc->vt_mode, sizeof(struct vt_mode)); - release_console_sem(); + console_unlock(); rc = copy_to_user(up, &tmp, sizeof(struct vt_mode)); if (rc) @@ -965,9 +967,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, ret = -ENXIO; else { arg--; - acquire_console_sem(); + console_lock(); ret = vc_allocate(arg); - release_console_sem(); + console_unlock(); if (ret) break; set_console(arg); @@ -990,7 +992,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, ret = -ENXIO; else { vsa.console--; - acquire_console_sem(); + console_lock(); ret = vc_allocate(vsa.console); if (ret == 0) { struct vc_data *nvc; @@ -1003,12 +1005,13 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, put_pid(nvc->vt_pid); nvc->vt_pid = get_pid(task_pid(current)); } - release_console_sem(); + console_unlock(); if (ret) break; /* Commence switch and lock */ - set_console(arg); + set_console(vsa.console); } + break; } /* @@ -1044,7 +1047,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, /* * Switching-from response */ - acquire_console_sem(); + console_lock(); if (vc->vt_newvt >= 0) { if (arg == 0) /* @@ -1063,7 +1066,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, vc->vt_newvt = -1; ret = vc_allocate(newvt); if (ret) { - release_console_sem(); + console_unlock(); break; } /* @@ -1083,7 +1086,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, if (arg != VT_ACKACQ) ret = -EINVAL; } - release_console_sem(); + console_unlock(); break; /* @@ -1096,20 +1099,20 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, } if (arg == 0) { /* deallocate all unused consoles, but leave 0 */ - acquire_console_sem(); + console_lock(); for (i=1; i<MAX_NR_CONSOLES; i++) if (! VT_BUSY(i)) vc_deallocate(i); - release_console_sem(); + console_unlock(); } else { /* deallocate a single console, if possible */ arg--; if (VT_BUSY(arg)) ret = -EBUSY; else if (arg) { /* leave 0 */ - acquire_console_sem(); + console_lock(); vc_deallocate(arg); - release_console_sem(); + console_unlock(); } } break; @@ -1126,7 +1129,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, get_user(cc, &vtsizes->v_cols)) ret = -EFAULT; else { - acquire_console_sem(); + console_lock(); for (i = 0; i < MAX_NR_CONSOLES; i++) { vc = vc_cons[i].d; @@ -1135,7 +1138,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, vc_resize(vc_cons[i].d, cc, ll); } } - release_console_sem(); + console_unlock(); } break; } @@ -1187,14 +1190,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, for (i = 0; i < MAX_NR_CONSOLES; i++) { if (!vc_cons[i].d) continue; - acquire_console_sem(); + console_lock(); if (vlin) vc_cons[i].d->vc_scan_lines = vlin; if (clin) vc_cons[i].d->vc_font.height = clin; vc_cons[i].d->vc_resize_user = 1; vc_resize(vc_cons[i].d, cc, ll); - release_console_sem(); + console_unlock(); } break; } @@ -1367,7 +1370,7 @@ void vc_SAK(struct work_struct *work) struct vc_data *vc; struct tty_struct *tty; - acquire_console_sem(); + console_lock(); vc = vc_con->d; if (vc) { tty = vc->port.tty; @@ -1379,7 +1382,7 @@ void vc_SAK(struct work_struct *work) __do_SAK(tty); reset_vc(vc); } - release_console_sem(); + console_unlock(); } #ifdef CONFIG_COMPAT @@ -1491,7 +1494,7 @@ compat_unimap_ioctl(unsigned int cmd, struct compat_unimapdesc __user *user_ud, return 0; } -long vt_compat_ioctl(struct tty_struct *tty, struct file * file, +long vt_compat_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct vc_data *vc = tty->driver_data; @@ -1577,7 +1580,7 @@ out: fallback: tty_unlock(); - return vt_ioctl(tty, file, cmd, arg); + return vt_ioctl(tty, cmd, arg); } @@ -1737,10 +1740,10 @@ int vt_move_to_console(unsigned int vt, int alloc) { int prev; - acquire_console_sem(); + console_lock(); /* Graphics mode - up to X */ if (disable_vt_switch) { - release_console_sem(); + console_unlock(); return 0; } prev = fg_console; @@ -1748,7 +1751,7 @@ int vt_move_to_console(unsigned int vt, int alloc) if (alloc && vc_allocate(vt)) { /* we can't have a free VC for now. Too bad, * we don't want to mess the screen for now. */ - release_console_sem(); + console_unlock(); return -ENOSPC; } @@ -1758,10 +1761,10 @@ int vt_move_to_console(unsigned int vt, int alloc) * Let the calling function know so it can decide * what to do. */ - release_console_sem(); + console_unlock(); return -EIO; } - release_console_sem(); + console_unlock(); tty_lock(); if (vt_waitactive(vt + 1)) { pr_debug("Suspend: Can't switch VCs."); @@ -1781,8 +1784,8 @@ int vt_move_to_console(unsigned int vt, int alloc) */ void pm_set_vt_switch(int do_switch) { - acquire_console_sem(); + console_lock(); disable_vt_switch = !do_switch; - release_console_sem(); + console_unlock(); } EXPORT_SYMBOL(pm_set_vt_switch); |