summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/genrtc.c8
-rw-r--r--drivers/char/keyboard.c38
-rw-r--r--drivers/edac/e752x_edac.c17
-rw-r--r--drivers/infiniband/hw/ipath/ipath_debug.h15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c18
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c36
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c21
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h10
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_pe800.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_registers.h31
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c14
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c39
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h3
-rw-r--r--drivers/infiniband/hw/ipath/ips_common.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/input/evdev.c21
-rw-r--r--drivers/input/input.c11
-rw-r--r--drivers/input/keyboard/spitzkbd.c4
-rw-r--r--drivers/input/misc/wistron_btns.c30
-rw-r--r--drivers/input/mouse/psmouse-base.c4
-rw-r--r--drivers/input/touchscreen/ads7846.c414
-rw-r--r--drivers/md/raid1.c29
-rw-r--r--drivers/md/raid10.c46
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c2
-rw-r--r--drivers/mmc/at91_mci.c3
-rw-r--r--drivers/mmc/au1xmmc.c4
-rw-r--r--drivers/mmc/imxmmc.c60
-rw-r--r--drivers/mmc/mmc.c62
-rw-r--r--drivers/mmc/mmc_block.c6
-rw-r--r--drivers/mmc/mmci.c3
-rw-r--r--drivers/mmc/pxamci.c13
-rw-r--r--drivers/mmc/sdhci.c4
-rw-r--r--drivers/mmc/wbsd.c4
-rw-r--r--drivers/net/forcedeth.c312
-rw-r--r--drivers/net/hamradio/dmascc.c1
-rw-r--r--drivers/net/hamradio/scc.c1
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/tg3.c82
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/via-rhine.c6
-rw-r--r--drivers/rtc/rtc-dev.c17
-rw-r--r--drivers/s390/net/qeth_main.c1
-rw-r--r--drivers/s390/s390mach.c3
-rw-r--r--drivers/sn/ioc3.c2
-rw-r--r--drivers/sn/ioc4.c2
50 files changed, 1027 insertions, 412 deletions
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index d3a2bc36129..588fca542a9 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -200,13 +200,13 @@ static ssize_t gen_rtc_read(struct file *file, char __user *buf,
/* first test allows optimizer to nuke this case for 32-bit machines */
if (sizeof (int) != sizeof (long) && count == sizeof (unsigned int)) {
unsigned int uidata = data;
- retval = put_user(uidata, (unsigned long __user *)buf);
+ retval = put_user(uidata, (unsigned int __user *)buf) ?:
+ sizeof(unsigned int);
}
else {
- retval = put_user(data, (unsigned long __user *)buf);
+ retval = put_user(data, (unsigned long __user *)buf) ?:
+ sizeof(unsigned long);
}
- if (!retval)
- retval = sizeof(unsigned long);
out:
current->state = TASK_RUNNING;
remove_wait_queue(&gen_rtc_wait, &wait);
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index 935670a3cd9..5755b7e5f18 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -860,9 +860,32 @@ static void k_slock(struct vc_data *vc, unsigned char value, char up_flag, struc
}
/* by default, 300ms interval for combination release */
-static long brl_timeout = 300;
-MODULE_PARM_DESC(brl_timeout, "Braille keys release delay in ms (0 for combination on first release, < 0 for dead characters)");
-module_param(brl_timeout, long, 0644);
+static unsigned brl_timeout = 300;
+MODULE_PARM_DESC(brl_timeout, "Braille keys release delay in ms (0 for commit on first key release)");
+module_param(brl_timeout, uint, 0644);
+
+static unsigned brl_nbchords = 1;
+MODULE_PARM_DESC(brl_nbchords, "Number of chords that produce a braille pattern (0 for dead chords)");
+module_param(brl_nbchords, uint, 0644);
+
+static void k_brlcommit(struct vc_data *vc, unsigned int pattern, char up_flag, struct pt_regs *regs)
+{
+ static unsigned long chords;
+ static unsigned committed;
+
+ if (!brl_nbchords)
+ k_deadunicode(vc, BRL_UC_ROW | pattern, up_flag, regs);
+ else {
+ committed |= pattern;
+ chords++;
+ if (chords == brl_nbchords) {
+ k_unicode(vc, BRL_UC_ROW | committed, up_flag, regs);
+ chords = 0;
+ committed = 0;
+ }
+ }
+}
+
static void k_brl(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
{
static unsigned pressed,committing;
@@ -882,11 +905,6 @@ static void k_brl(struct vc_data *vc, unsigned char value, char up_flag, struct
if (value > 8)
return;
- if (brl_timeout < 0) {
- k_deadunicode(vc, BRL_UC_ROW | (1 << (value - 1)), up_flag, regs);
- return;
- }
-
if (up_flag) {
if (brl_timeout) {
if (!committing ||
@@ -897,13 +915,13 @@ static void k_brl(struct vc_data *vc, unsigned char value, char up_flag, struct
pressed &= ~(1 << (value - 1));
if (!pressed) {
if (committing) {
- k_unicode(vc, BRL_UC_ROW | committing, 0, regs);
+ k_brlcommit(vc, committing, 0, regs);
committing = 0;
}
}
} else {
if (committing) {
- k_unicode(vc, BRL_UC_ROW | committing, 0, regs);
+ k_brlcommit(vc, committing, 0, regs);
committing = 0;
}
pressed &= ~(1 << (value - 1));
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 66572c5323a..fce31936e6d 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -25,6 +25,8 @@
#include <linux/slab.h>
#include "edac_mc.h"
+static int force_function_unhide;
+
#define e752x_printk(level, fmt, arg...) \
edac_printk(level, "e752x", fmt, ##arg)
@@ -782,8 +784,16 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
debugf0("%s(): mci\n", __func__);
debugf0("Starting Probe1\n");
- /* enable device 0 function 1 */
+ /* check to see if device 0 function 1 is enabled; if it isn't, we
+ * assume the BIOS has reserved it for a reason and is expecting
+ * exclusive access, we take care not to violate that assumption and
+ * fail the probe. */
pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
+ if (!force_function_unhide && !(stat8 & (1 << 5))) {
+ printk(KERN_INFO "Contact your BIOS vendor to see if the "
+ "E752x error registers can be safely un-hidden\n");
+ goto fail;
+ }
stat8 |= (1 << 5);
pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
@@ -1063,3 +1073,8 @@ module_exit(e752x_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
MODULE_DESCRIPTION("MC support for Intel e752x memory controllers");
+
+module_param(force_function_unhide, int, 0444);
+MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
+" 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access");
+
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
index 593e28969c6..46762387f5f 100644
--- a/drivers/infiniband/hw/ipath/ipath_debug.h
+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
@@ -60,11 +60,11 @@
#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */
#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */
#define __IPATH_SMADBG 0x8000 /* sma packet debug */
-#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) general debug on */
-#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings on */
-#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors on */
-#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump on */
-#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump on */
+#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */
+#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */
+#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */
+#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump */
+#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump */
#else /* _IPATH_DEBUGGING */
@@ -79,11 +79,12 @@
#define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */
#define __IPATH_VERBDBG 0x0 /* very verbose debug */
#define __IPATH_PKTDBG 0x0 /* print packet data */
-#define __IPATH_PROCDBG 0x0 /* print process startup (init)/exit messages */
+#define __IPATH_PROCDBG 0x0 /* process startup (init)/exit messages */
/* print mmap/nopage stuff, not using VDBG any more */
#define __IPATH_MMDBG 0x0
#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */
-#define __IPATH_SMADBG 0x0 /* print process startup (init)/exit messages */#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
+#define __IPATH_SMADBG 0x0 /* process startup (init)/exit messages */
+#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
#define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */
#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */
#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index 7d3fb6996b4..28ddceb260e 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -277,13 +277,14 @@ static int ipath_diag_open(struct inode *in, struct file *fp)
bail:
spin_unlock_irqrestore(&ipath_devs_lock, flags);
- mutex_unlock(&ipath_mutex);
/* Only expose a way to reset the device if we
make it into diag mode. */
if (ret == 0)
ipath_expose_reset(&dd->pcidev->dev);
+ mutex_unlock(&ipath_mutex);
+
return ret;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index e7617c3982e..398add4d4cb 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -418,9 +418,19 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
if (ret) {
- dev_info(&pdev->dev, "pci_set_dma_mask unit %u "
- "fails: %d\n", dd->ipath_unit, ret);
- goto bail_regions;
+ /*
+ * if the 64 bit setup fails, try 32 bit. Some systems
+ * do not setup 64 bit maps on systems with 2GB or less
+ * memory installed.
+ */
+ ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (ret) {
+ dev_info(&pdev->dev, "pci_set_dma_mask unit %u "
+ "fails: %d\n", dd->ipath_unit, ret);
+ goto bail_regions;
+ }
+ else
+ ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
}
pci_set_master(pdev);
@@ -1949,7 +1959,7 @@ int ipath_reset_device(int unit)
}
if (dd->ipath_pd)
- for (i = 1; i < dd->ipath_portcnt; i++) {
+ for (i = 1; i < dd->ipath_cfgports; i++) {
if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
ipath_dbg("unit %u port %d is in use "
"(PID %u cmd %s), can't reset\n",
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 2823ff9c0c6..16f640e1c16 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -53,13 +53,19 @@ MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
/*
* Number of buffers reserved for driver (layered drivers and SMA
- * send). Reserved at end of buffer list.
+ * send). Reserved at end of buffer list. Initialized based on
+ * number of PIO buffers if not set via module interface.
+ * The problem with this is that it's global, but we'll use different
+ * numbers for different chip types. So the default value is not
+ * very useful. I've redefined it for the 1.3 release so that it's
+ * zero unless set by the user to something else, in which case we
+ * try to respect it.
*/
-static ushort ipath_kpiobufs = 32;
+static ushort ipath_kpiobufs;
static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp);
-module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_uint,
+module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort,
&ipath_kpiobufs, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");
@@ -531,8 +537,11 @@ static int init_housekeeping(struct ipath_devdata *dd,
* Don't clear ipath_flags as 8bit mode was set before
* entering this func. However, we do set the linkstate to
* unknown, so we can watch for a transition.
+ * PRESENT is set because we want register reads to work,
+ * and the kernel infrastructure saw it in config space;
+ * We clear it if we have failures.
*/
- dd->ipath_flags |= IPATH_LINKUNK;
+ dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT;
dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED |
IPATH_LINKDOWN | IPATH_LINKINIT);
@@ -560,6 +569,7 @@ static int init_housekeeping(struct ipath_devdata *dd,
|| (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) {
ipath_dev_err(dd, "Register read failures from chip, "
"giving up initialization\n");
+ dd->ipath_flags &= ~IPATH_PRESENT;
ret = -ENODEV;
goto done;
}
@@ -682,16 +692,14 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
*/
dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2)
/ (sizeof(u64) * BITS_PER_BYTE / 2);
- if (!ipath_kpiobufs) /* have to have at least 1, for SMA */
- kpiobufs = ipath_kpiobufs = 1;
- else if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) <
- (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT)) {
- dev_info(&dd->pcidev->dev, "Too few PIO buffers (%u) "
- "for %u ports to have %u each!\n",
- dd->ipath_piobcnt2k + dd->ipath_piobcnt4k,
- dd->ipath_cfgports, IPATH_MIN_USER_PORT_BUFCNT);
- kpiobufs = 1; /* reserve just the minimum for SMA/ether */
- } else
+ if (ipath_kpiobufs == 0) {
+ /* not set by user, or set explictly to default */
+ if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128)
+ kpiobufs = 32;
+ else
+ kpiobufs = 16;
+ }
+ else
kpiobufs = ipath_kpiobufs;
if (kpiobufs >
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 0bcb428041f..3e72a1fe3d7 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -665,14 +665,14 @@ static void handle_layer_pioavail(struct ipath_devdata *dd)
ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
if (ret > 0)
- goto clear;
+ goto set;
ret = __ipath_verbs_piobufavail(dd);
if (ret > 0)
- goto clear;
+ goto set;
return;
-clear:
+set:
set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
@@ -719,11 +719,24 @@ static void handle_rcv(struct ipath_devdata *dd, u32 istat)
irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
{
struct ipath_devdata *dd = data;
- u32 istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
+ u32 istat;
ipath_err_t estat = 0;
static unsigned unexpected = 0;
irqreturn_t ret;
+ if(!(dd->ipath_flags & IPATH_PRESENT)) {
+ /* this is mostly so we don't try to touch the chip while
+ * it is being reset */
+ /*
+ * This return value is perhaps odd, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+ }
+
+ istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
if (unlikely(!istat)) {
ipath_stats.sps_nullintr++;
ret = IRQ_NONE; /* not our interrupt, or already handled */
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 0ce5f19c9d6..e6507f8115b 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -731,7 +731,7 @@ u64 ipath_read_kreg64_port(const struct ipath_devdata *, ipath_kreg,
static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
ipath_ureg regno, int port)
{
- if (!dd->ipath_kregbase)
+ if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
return 0;
return readl(regno + (u64 __iomem *)
@@ -762,7 +762,7 @@ static inline void ipath_write_ureg(const struct ipath_devdata *dd,
static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
ipath_kreg regno)
{
- if (!dd->ipath_kregbase)
+ if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
return -1;
return readl((u32 __iomem *) & dd->ipath_kregbase[regno]);
}
@@ -770,7 +770,7 @@ static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd,
ipath_kreg regno)
{
- if (!dd->ipath_kregbase)
+ if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
return -1;
return readq(&dd->ipath_kregbase[regno]);
@@ -786,7 +786,7 @@ static inline void ipath_write_kreg(const struct ipath_devdata *dd,
static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
ipath_sreg regno)
{
- if (!dd->ipath_kregbase)
+ if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
return 0;
return readq(regno + (u64 __iomem *)
@@ -797,7 +797,7 @@ static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
ipath_sreg regno)
{
- if (!dd->ipath_kregbase)
+ if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
return 0;
return readl(regno + (u64 __iomem *)
(dd->ipath_cregbase +
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
index 69ed1100701..9cb5258ffed 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.c
+++ b/drivers/infiniband/hw/ipath/ipath_layer.c
@@ -46,13 +46,15 @@
/* Acquire before ipath_devs_lock. */
static DEFINE_MUTEX(ipath_layer_mutex);
+static int ipath_verbs_registered;
+
u16 ipath_layer_rcv_opcode;
+
static int (*layer_intr)(void *, u32);
static int (*layer_rcv)(void *, void *, struct sk_buff *);
static int (*layer_rcv_lid)(void *, void *);
static int (*verbs_piobufavail)(void *);
static void (*verbs_rcv)(void *, void *, void *, u32);
-static int ipath_verbs_registered;
static void *(*layer_add_one)(int, struct ipath_devdata *);
static void (*layer_remove_one)(void *);
@@ -586,6 +588,8 @@ void ipath_verbs_unregister(void)
verbs_rcv = NULL;
verbs_timer_cb = NULL;
+ ipath_verbs_registered = 0;
+
mutex_unlock(&ipath_layer_mutex);
}
diff --git a/drivers/infiniband/hw/ipath/ipath_pe800.c b/drivers/infiniband/hw/ipath/ipath_pe800.c
index e1dc4f75706..6318067ab5e 100644
--- a/drivers/infiniband/hw/ipath/ipath_pe800.c
+++ b/drivers/infiniband/hw/ipath/ipath_pe800.c
@@ -972,6 +972,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
/* Use ERROR so it shows up in logs, etc. */
ipath_dev_err(dd, "Resetting PE-800 unit %u\n",
dd->ipath_unit);
+ /* keep chip from being accessed in a few places */
+ dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT);
val = dd->ipath_control | INFINIPATH_C_RESET;
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
mb();
@@ -997,6 +999,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
if ((r = pci_enable_device(dd->pcidev)))
ipath_dev_err(dd, "pci_enable_device failed after "
"reset: %d\n", r);
+ /* whether it worked or not, mark as present, again */
+ dd->ipath_flags |= IPATH_PRESENT;
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
if (val == dd->ipath_revision) {
ipath_cdbg(VERBOSE, "Got matching revision "
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
index 1e59750c5f6..402126eb79c 100644
--- a/drivers/infiniband/hw/ipath/ipath_registers.h
+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
@@ -34,8 +34,9 @@
#define _IPATH_REGISTERS_H
/*
- * This file should only be included by kernel source, and by the diags.
- * It defines the registers, and their contents, for the InfiniPath HT-400 chip
+ * This file should only be included by kernel source, and by the diags. It
+ * defines the registers, and their contents, for the InfiniPath HT-400
+ * chip.
*/
/*
@@ -156,8 +157,10 @@
#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8
#define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL
#define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1
-#define INFINIPATH_IBCC_LINKINITCMD_POLL 2 /* cycle through TS1/TS2 till OK */
-#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 /* wait for TS1, then go on */
+/* cycle through TS1/TS2 till OK */
+#define INFINIPATH_IBCC_LINKINITCMD_POLL 2
+/* wait for TS1, then go on */
+#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3
#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16
#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL
#define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */
@@ -182,7 +185,8 @@
#define INFINIPATH_IBCS_LINKSTATE_SHIFT 4
#define INFINIPATH_IBCS_TXREADY 0x40000000
#define INFINIPATH_IBCS_TXCREDITOK 0x80000000
-/* link training states (shift by INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */
+/* link training states (shift by
+ INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */
#define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00
#define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01
#define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02
@@ -267,10 +271,12 @@
/* kr_serdesconfig0 bits */
#define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */
#define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */
-#define INFINIPATH_SERDC0_TXIDLE 0xF000ULL /* tx idle enables (per lane) */
-#define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL /* rx detect enables (per lane) */
-#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL /* L1 Power down; use with RXDETECT,
- Otherwise not used on IB side */
+/* tx idle enables (per lane) */
+#define INFINIPATH_SERDC0_TXIDLE 0xF000ULL
+/* rx detect enables (per lane) */
+#define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL
+/* L1 Power down; use with RXDETECT, Otherwise not used on IB side */
+#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL
/* kr_xgxsconfig bits */
#define INFINIPATH_XGXS_RESET 0x7ULL
@@ -390,12 +396,13 @@ struct ipath_kregs {
ipath_kreg kr_txintmemsize;
ipath_kreg kr_xgxsconfig;
ipath_kreg kr_ibpllcfg;
- /* use these two (and the following N ports) only with ipath_k*_kreg64_port();
- * not *kreg64() */
+ /* use these two (and the following N ports) only with
+ * ipath_k*_kreg64_port(); not *kreg64() */
ipath_kreg kr_rcvhdraddr;
ipath_kreg kr_rcvhdrtailaddr;
- /* remaining registers are not present on all types of infinipath chips */
+ /* remaining registers are not present on all types of infinipath
+ chips */
ipath_kreg kr_rcvpktledcnt;
ipath_kreg kr_pcierbuftestreg0;
ipath_kreg kr_pcierbuftestreg1;
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index f232e77b78e..eb81424b3c5 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -531,19 +531,12 @@ int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
}
wqe->wr.num_sge = j;
qp->s_head = next;
- /*
- * Wake up the send tasklet if the QP is not waiting
- * for an RNR timeout.
- */
- next = qp->s_rnr_timeout;
spin_unlock_irqrestore(&qp->s_lock, flags);
- if (next == 0) {
- if (qp->ibqp.qp_type == IB_QPT_UC)
- ipath_do_uc_send((unsigned long) qp);
- else
- ipath_do_rc_send((unsigned long) qp);
- }
+ if (qp->ibqp.qp_type == IB_QPT_UC)
+ ipath_do_uc_send((unsigned long) qp);
+ else
+ ipath_do_rc_send((unsigned long) qp);
ret = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index 32acd8048b4..f323791cc49 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -711,10 +711,22 @@ static struct attribute_group dev_attr_group = {
* enters diag mode. A device reset is quite likely to crash the
* machine entirely, so we don't want to normally make it
* available.
+ *
+ * Called with ipath_mutex held.
*/
int ipath_expose_reset(struct device *dev)
{
- return device_create_file(dev, &dev_attr_reset);
+ static int exposed;
+ int ret;
+
+ if (!exposed) {
+ ret = device_create_file(dev, &dev_attr_reset);
+ exposed = 1;
+ }
+ else
+ ret = 0;
+
+ return ret;
}
int ipath_driver_create_group(struct device_driver *drv)
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 01cfb30ee16..e606daf8321 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -46,8 +46,10 @@
* This is called from ipath_post_ud_send() to forward a WQE addressed
* to the same HCA.
*/
-static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss,
- u32 length, struct ib_send_wr *wr, struct ib_wc *wc)
+static void ipath_ud_loopback(struct ipath_qp *sqp,
+ struct ipath_sge_state *ss,
+ u32 length, struct ib_send_wr *wr,
+ struct ib_wc *wc)
{
struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
struct ipath_qp *qp;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 8d2558a01f3..cb9e387c301 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -449,7 +449,6 @@ static void ipath_ib_timer(void *arg)
{
struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
struct ipath_qp *resend = NULL;
- struct ipath_qp *rnr = NULL;
struct list_head *last;
struct ipath_qp *qp;
unsigned long flags;
@@ -465,32 +464,18 @@ static void ipath_ib_timer(void *arg)
last = &dev->pending[dev->pending_index];
while (!list_empty(last)) {
qp = list_entry(last->next, struct ipath_qp, timerwait);
- if (last->next == LIST_POISON1 ||
- last->next != &qp->timerwait ||
- qp->timerwait.prev != last) {
- INIT_LIST_HEAD(last);
- } else {
- list_del(&qp->timerwait);
- qp->timerwait.prev = (struct list_head *) resend;
- resend = qp;
- atomic_inc(&qp->refcount);
- }
+ list_del(&qp->timerwait);
+ qp->timer_next = resend;
+ resend = qp;
+ atomic_inc(&qp->refcount);
}
last = &dev->rnrwait;
if (!list_empty(last)) {
qp = list_entry(last->next, struct ipath_qp, timerwait);
if (--qp->s_rnr_timeout == 0) {
do {
- if (last->next == LIST_POISON1 ||
- last->next != &qp->timerwait ||
- qp->timerwait.prev != last) {
- INIT_LIST_HEAD(last);
- break;
- }
list_del(&qp->timerwait);
- qp->timerwait.prev =
- (struct list_head *) rnr;
- rnr = qp;
+ tasklet_hi_schedule(&qp->s_task);
if (list_empty(last))
break;
qp = list_entry(last->next, struct ipath_qp,
@@ -530,8 +515,7 @@ static void ipath_ib_timer(void *arg)
spin_unlock_irqrestore(&dev->pending_lock, flags);
/* XXX What if timer fires again while this is running? */
- for (qp = resend; qp != NULL;
- qp = (struct ipath_qp *) qp->timerwait.prev) {
+ for (qp = resend; qp != NULL; qp = qp->timer_next) {
struct ib_wc wc;
spin_lock_irqsave(&qp->s_lock, flags);
@@ -545,9 +529,6 @@ static void ipath_ib_timer(void *arg)
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
- for (qp = rnr; qp != NULL;
- qp = (struct ipath_qp *) qp->timerwait.prev)
- tasklet_hi_schedule(&qp->s_task);
}
/**
@@ -556,9 +537,9 @@ static void ipath_ib_timer(void *arg)
*
* This is called from ipath_intr() at interrupt level when a PIO buffer is
* available after ipath_verbs_send() returned an error that no buffers were
- * available. Return 0 if we consumed all the PIO buffers and we still have
+ * available. Return 1 if we consumed all the PIO buffers and we still have
* QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
- * return one).
+ * return zero).
*/
static int ipath_ib_piobufavail(void *arg)
{
@@ -579,7 +560,7 @@ static int ipath_ib_piobufavail(void *arg)
spin_unlock_irqrestore(&dev->pending_lock, flags);
bail:
- return 1;
+ return 0;
}
static int ipath_query_device(struct ib_device *ibdev,
@@ -1159,7 +1140,7 @@ static ssize_t show_stats(struct class_device *cdev, char *buf)
len = sprintf(buf,
"RC resends %d\n"
- "RC QACKs %d\n"
+ "RC no QACK %d\n"
"RC ACKs %d\n"
"RC SEQ NAKs %d\n"
"RC RDMA seq %d\n"
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index fcafbc7c9e7..4f8d59300e9 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -282,7 +282,8 @@ struct ipath_srq {
*/
struct ipath_qp {
struct ib_qp ibqp;
- struct ipath_qp *next; /* link list for QPN hash table */
+ struct ipath_qp *next; /* link list for QPN hash table */
+ struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */
struct list_head piowait; /* link for wait PIO buf */
struct list_head timerwait; /* link for waiting for timeouts */
struct ib_ah_attr remote_ah_attr;
diff --git a/drivers/infiniband/hw/ipath/ips_common.h b/drivers/infiniband/hw/ipath/ips_common.h
index 410a764dfce..ab7cbbbfd03 100644
--- a/drivers/infiniband/hw/ipath/ips_common.h
+++ b/drivers/infiniband/hw/ipath/ips_common.h
@@ -95,7 +95,7 @@ struct ether_header {
__u8 seq_num;
__le32 len;
/* MUST be of word size due to PIO write requirements */
- __u32 csum;
+ __le32 csum;
__le16 csum_offset;
__le16 flags;
__u16 first_2_bytes;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 565a24b1756..a2eae8a3016 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -306,7 +306,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
goto out;
}
- memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8);
+ memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
out:
kfree(in_mad);
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index a34e3d91d9e..ba325f16d07 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -403,6 +403,27 @@ static long evdev_ioctl_handler(struct file *file, unsigned int cmd,
case EVIOCGID:
if (copy_to_user(p, &dev->id, sizeof(struct input_id)))
return -EFAULT;
+ return 0;
+
+ case EVIOCGREP:
+ if (!test_bit(EV_REP, dev->evbit))
+ return -ENOSYS;
+ if (put_user(dev->rep[REP_DELAY], ip))
+ return -EFAULT;
+ if (put_user(dev->rep[REP_PERIOD], ip + 1))
+ return -EFAULT;
+ return 0;
+
+ case EVIOCSREP:
+ if (!test_bit(EV_REP, dev->evbit))
+ return -ENOSYS;
+ if (get_user(u, ip))
+ return -EFAULT;
+ if (get_user(v, ip + 1))
+ return -EFAULT;
+
+ input_event(dev, EV_REP, REP_DELAY, u);
+ input_event(dev, EV_REP, REP_PERIOD, v);
return 0;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index a935abeffff..3038c268917 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -155,6 +155,9 @@ void input_event(struct input_dev *dev, unsigned int type, unsigned int code, in
if (code > SND_MAX || !test_bit(code, dev->sndbit))
return;
+ if (!!test_bit(code, dev->snd) != !!value)
+ change_bit(code, dev->snd);
+
if (dev->event) dev->event(dev, type, code, value);
break;
@@ -286,19 +289,19 @@ static struct input_device_id *input_match_device(struct input_device_id *id, st
for (; id->flags || id->driver_info; id++) {
if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
- if (id->id.bustype != dev->id.bustype)
+ if (id->bustype != dev->id.bustype)
continue;
if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
- if (id->id.vendor != dev->id.vendor)
+ if (id->vendor != dev->id.vendor)
continue;
if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
- if (id->id.product != dev->id.product)
+ if (id->product != dev->id.product)
continue;
if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
- if (id->id.version != dev->id.version)
+ if (id->version != dev->id.version)
continue;
MATCH_BIT(evbit, EV_MAX);
diff --git a/drivers/input/keyboard/spitzkbd.c b/drivers/input/keyboard/spitzkbd.c
index bc61cf8cfc6..1d238a9d52d 100644
--- a/drivers/input/keyboard/spitzkbd.c
+++ b/drivers/input/keyboard/spitzkbd.c
@@ -53,8 +53,8 @@ static unsigned char spitzkbd_keycode[NR_SCANCODES] = {
KEY_LEFTCTRL, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, SPITZ_KEY_EXOK, SPITZ_KEY_EXCANCEL, 0, 0, 0, 0, 0, /* 1-16 */
0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, SPITZ_KEY_EXJOGDOWN, SPITZ_KEY_EXJOGUP, 0, 0, 0, 0, 0, /* 17-32 */
KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
- SPITZ_KEY_CALENDER, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
- SPITZ_KEY_ADDRESS, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, /* 65-80 */
+ SPITZ_KEY_ADDRESS, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
+ SPITZ_KEY_CALENDER, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, /* 65-80 */
SPITZ_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, SPITZ_KEY_FN, 0, 0, 0, 0, 0, /* 81-96 */
KEY_SYSRQ, SPITZ_KEY_JAP1, SPITZ_KEY_JAP2, SPITZ_KEY_CANCEL, SPITZ_KEY_OK, SPITZ_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0 /* 97-112 */
};
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 4b415d9b012..36cd2e07fce 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -273,6 +273,18 @@ static struct key_entry keymap_fs_amilo_pro_v2000[] = {
{ KE_END, 0 }
};
+static struct key_entry keymap_fujitsu_n3510[] = {
+ { KE_KEY, 0x11, KEY_PROG1 },
+ { KE_KEY, 0x12, KEY_PROG2 },
+ { KE_KEY, 0x36, KEY_WWW },
+ { KE_KEY, 0x31, KEY_MAIL },
+ { KE_KEY, 0x71, KEY_STOPCD },
+ { KE_KEY, 0x72, KEY_PLAYPAUSE },
+ { KE_KEY, 0x74, KEY_REWIND },
+ { KE_KEY, 0x78, KEY_FORWARD },
+ { KE_END, 0 }
+};
+
static struct key_entry keymap_wistron_ms2141[] = {
{ KE_KEY, 0x11, KEY_PROG1 },
{ KE_KEY, 0x12, KEY_PROG2 },
@@ -323,6 +335,24 @@ static struct dmi_system_id dmi_ids[] = {
},
{
.callback = dmi_matched,
+ .ident = "Fujitsu-Siemens Amilo M7400",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO M "),
+ },
+ .driver_data = keymap_fs_amilo_pro_v2000
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Fujitsu N3510",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N3510"),
+ },
+ .driver_data = keymap_fujitsu_n3510
+ },
+ {
+ .callback = dmi_matched,
.ident = "Acer Aspire 1500",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 32d70ed8f41..136321a2cfd 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -302,8 +302,10 @@ static irqreturn_t psmouse_interrupt(struct serio *serio,
* Check if this is a new device announcement (0xAA 0x00)
*/
if (unlikely(psmouse->packet[0] == PSMOUSE_RET_BAT && psmouse->pktcnt <= 2)) {
- if (psmouse->pktcnt == 1)
+ if (psmouse->pktcnt == 1) {
+ psmouse->last = jiffies;
goto out;
+ }
if (psmouse->packet[1] == PSMOUSE_RET_ID) {
__psmouse_set_state(psmouse, PSMOUSE_IGNORE);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 46d1fec2cfd..1494175ac6f 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -2,6 +2,8 @@
* ADS7846 based touchscreen and sensor driver
*
* Copyright (c) 2005 David Brownell
+ * Copyright (c) 2006 Nokia Corporation
+ * Various changes: Imre Deak <imre.deak@nokia.com>
*
* Using code from:
* - corgi_ts.c
@@ -34,17 +36,25 @@
/*
- * This code has been lightly tested on an ads7846.
+ * This code has been tested on an ads7846 / N770 device.
* Support for ads7843 and ads7845 has only been stubbed in.
*
- * Not yet done: investigate the values reported. Are x/y/pressure
- * event values sane enough for X11? How accurate are the temperature
- * and voltage readings? (System-specific calibration should support
+ * Not yet done: How accurate are the temperature and voltage
+ * readings? (System-specific calibration should support
* accuracy of 0.3 degrees C; otherwise it's 2.0 degrees.)
*
+ * IRQ handling needs a workaround because of a shortcoming in handling
+ * edge triggered IRQs on some platforms like the OMAP1/2. These
+ * platforms don't handle the ARM lazy IRQ disabling properly, thus we
+ * have to maintain our own SW IRQ disabled status. This should be
+ * removed as soon as the affected platform's IRQ handling is fixed.
+ *
* app note sbaa036 talks in more detail about accurate sampling...
* that ought to help in situations like LCDs inducing noise (which
* can also be helped by using synch signals) and more generally.
+ * This driver tries to utilize the measures described in the app
+ * note. The strength of filtering can be set in the board-* specific
+ * files.
*/
#define TS_POLL_PERIOD msecs_to_jiffies(10)
@@ -61,6 +71,7 @@ struct ts_event {
__be16 x;
__be16 y;
__be16 z1, z2;
+ int ignore;
};
struct ads7846 {
@@ -71,12 +82,23 @@ struct ads7846 {
u16 model;
u16 vref_delay_usecs;
u16 x_plate_ohms;
+ u16 pressure_max;
- u8 read_x, read_y, read_z1, read_z2;
+ u8 read_x, read_y, read_z1, read_z2, pwrdown;
+ u16 dummy; /* for the pwrdown read */
struct ts_event tc;
- struct spi_transfer xfer[8];
- struct spi_message msg;
+ struct spi_transfer xfer[10];
+ struct spi_message msg[5];
+ struct spi_message *last_msg;
+ int msg_idx;
+ int read_cnt;
+ int read_rep;
+ int last_read;
+
+ u16 debounce_max;
+ u16 debounce_tol;
+ u16 debounce_rep;
spinlock_t lock;
struct timer_list timer; /* P: lock */
@@ -84,6 +106,9 @@ struct ads7846 {
unsigned pending:1; /* P: lock */
// FIXME remove "irq_disabled"
unsigned irq_disabled:1; /* P: lock */
+ unsigned disabled:1;
+
+ int (*get_pendown_state)(void);
};
/* leave chip selected when we're done, for quicker re-select? */
@@ -125,7 +150,9 @@ struct ads7846 {
#define READ_Y (READ_12BIT_DFR(y) | ADS_PD10_ADC_ON)
#define READ_Z1 (READ_12BIT_DFR(z1) | ADS_PD10_ADC_ON)
#define READ_Z2 (READ_12BIT_DFR(z2) | ADS_PD10_ADC_ON)
-#define READ_X (READ_12BIT_DFR(x) | ADS_PD10_PDOWN) /* LAST */
+
+#define READ_X (READ_12BIT_DFR(x) | ADS_PD10_ADC_ON)
+#define PWRDOWN (READ_12BIT_DFR(y) | ADS_PD10_PDOWN) /* LAST */
/* single-ended samples need to first power up reference voltage;
* we leave both ADC and VREF powered
@@ -152,6 +179,15 @@ struct ser_req {
struct spi_transfer xfer[6];
};
+static void ads7846_enable(struct ads7846 *ts);
+static void ads7846_disable(struct ads7846 *ts);
+
+static int device_suspended(struct device *dev)
+{
+ struct ads7846 *ts = dev_get_drvdata(dev);
+ return dev->power.power_state.event != PM_EVENT_ON || ts->disabled;
+}
+
static int ads7846_read12_ser(struct device *dev, unsigned command)
{
struct spi_device *spi = to_spi_device(dev);
@@ -164,7 +200,7 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
if (!req)
return -ENOMEM;
- INIT_LIST_HEAD(&req->msg.transfers);
+ spi_message_init(&req->msg);
/* activate reference, so it has time to settle; */
req->ref_on = REF_ON;
@@ -204,8 +240,10 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
for (i = 0; i < 6; i++)
spi_message_add_tail(&req->xfer[i], &req->msg);
+ ts->irq_disabled = 1;
disable_irq(spi->irq);
status = spi_sync(spi, &req->msg);
+ ts->irq_disabled = 0;
enable_irq(spi->irq);
if (req->msg.status)
@@ -233,6 +271,52 @@ SHOW(temp1)
SHOW(vaux)
SHOW(vbatt)
+static int is_pen_down(struct device *dev)
+{
+ struct ads7846 *ts = dev_get_drvdata(dev);
+
+ return ts->pendown;
+}
+
+static ssize_t ads7846_pen_down_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", is_pen_down(dev));
+}
+
+static DEVICE_ATTR(pen_down, S_IRUGO, ads7846_pen_down_show, NULL);
+
+static ssize_t ads7846_disable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ads7846 *ts = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", ts->disabled);
+}
+
+static ssize_t ads7846_disable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ads7846 *ts = dev_get_drvdata(dev);
+ char *endp;
+ int i;
+
+ i = simple_strtoul(buf, &endp, 10);
+ spin_lock_irq(&ts->lock);
+
+ if (i)
+ ads7846_disable(ts);
+ else
+ ads7846_enable(ts);
+
+ spin_unlock_irq(&ts->lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(disable, 0664, ads7846_disable_show, ads7846_disable_store);
+
/*--------------------------------------------------------------------------*/
/*
@@ -264,7 +348,7 @@ static void ads7846_rx(void *ads)
if (x == MAX_12BIT)
x = 0;
- if (x && z1 && ts->spi->dev.power.power_state.event == PM_EVENT_ON) {
+ if (likely(x && z1 && !device_suspended(&ts->spi->dev))) {
/* compute touch pressure resistance using equation #2 */
Rt = z2;
Rt -= z1;
@@ -275,6 +359,14 @@ static void ads7846_rx(void *ads)
} else
Rt = 0;
+ /* Sample found inconsistent by debouncing or pressure is beyond
+ * the maximum. Don't report it to user space, repeat at least
+ * once more the measurement */
+ if (ts->tc.ignore || Rt > ts->pressure_max) {
+ mod_timer(&ts->timer, jiffies + TS_POLL_PERIOD);
+ return;
+ }
+
/* NOTE: "pendown" is inferred from pressure; we don't rely on
* being able to check nPENIRQ status, or "friendly" trigger modes
* (both-edges is much better than just-falling or low-level).
@@ -296,11 +388,13 @@ static void ads7846_rx(void *ads)
if (Rt) {
input_report_abs(input_dev, ABS_X, x);
input_report_abs(input_dev, ABS_Y, y);
- input_report_abs(input_dev, ABS_PRESSURE, Rt);
sync = 1;
}
- if (sync)
+
+ if (sync) {
+ input_report_abs(input_dev, ABS_PRESSURE, Rt);
input_sync(input_dev);
+ }
#ifdef VERBOSE
if (Rt || ts->pendown)
@@ -308,80 +402,138 @@ static void ads7846_rx(void *ads)
x, y, Rt, Rt ? "" : " UP");
#endif
- /* don't retrigger while we're suspended */
spin_lock_irqsave(&ts->lock, flags);
ts->pendown = (Rt != 0);
- ts->pending = 0;
+ mod_timer(&ts->timer, jiffies + TS_POLL_PERIOD);
- if (ts->spi->dev.power.power_state.event == PM_EVENT_ON) {
- if (ts->pendown)
- mod_timer(&ts->timer, jiffies + TS_POLL_PERIOD);
- else if (ts->irq_disabled) {
- ts->irq_disabled = 0;
- enable_irq(ts->spi->irq);
+ spin_unlock_irqrestore(&ts->lock, flags);
+}
+
+static void ads7846_debounce(void *ads)
+{
+ struct ads7846 *ts = ads;
+ struct spi_message *m;
+ struct spi_transfer *t;
+ int val;
+ int status;
+
+ m = &ts->msg[ts->msg_idx];
+ t = list_entry(m->transfers.prev, struct spi_transfer, transfer_list);
+ val = (*(u16 *)t->rx_buf) >> 3;
+ if (!ts->read_cnt || (abs(ts->last_read - val) > ts->debounce_tol)) {
+ /* Repeat it, if this was the first read or the read
+ * wasn't consistent enough. */
+ if (ts->read_cnt < ts->debounce_max) {
+ ts->last_read = val;
+ ts->read_cnt++;
+ } else {
+ /* Maximum number of debouncing reached and still
+ * not enough number of consistent readings. Abort
+ * the whole sample, repeat it in the next sampling
+ * period.
+ */
+ ts->tc.ignore = 1;
+ ts->read_cnt = 0;
+ /* Last message will contain ads7846_rx() as the
+ * completion function.
+ */
+ m = ts->last_msg;
}
+ /* Start over collecting consistent readings. */
+ ts->read_rep = 0;
+ } else {
+ if (++ts->read_rep > ts->debounce_rep) {
+ /* Got a good reading for this coordinate,
+ * go for the next one. */
+ ts->tc.ignore = 0;
+ ts->msg_idx++;
+ ts->read_cnt = 0;
+ ts->read_rep = 0;
+ m++;
+ } else
+ /* Read more values that are consistent. */
+ ts->read_cnt++;
}
-
- spin_unlock_irqrestore(&ts->lock, flags);
+ status = spi_async(ts->spi, m);
+ if (status)
+ dev_err(&ts->spi->dev, "spi_async --> %d\n",
+ status);
}
static void ads7846_timer(unsigned long handle)
{
struct ads7846 *ts = (void *)handle;
int status = 0;
- unsigned long flags;
+
+ spin_lock_irq(&ts->lock);
+
+ if (unlikely(ts->msg_idx && !ts->pendown)) {
+ /* measurment cycle ended */
+ if (!device_suspended(&ts->spi->dev)) {
+ ts->irq_disabled = 0;
+ enable_irq(ts->spi->irq);
+ }
+ ts->pending = 0;
+ ts->msg_idx = 0;
+ } else {
+ /* pen is still down, continue with the measurement */
+ ts->msg_idx = 0;
+ status = spi_async(ts->spi, &ts->msg[0]);
+ if (status)
+ dev_err(&ts->spi->dev, "spi_async --> %d\n", status);
+ }
+
+ spin_unlock_irq(&ts->lock);
+}
+
+static irqreturn_t ads7846_irq(int irq, void *handle, struct pt_regs *regs)
+{
+ struct ads7846 *ts = handle;
+ unsigned long flags;
spin_lock_irqsave(&ts->lock, flags);
- if (!ts->pending) {
- ts->pending = 1;
+ if (likely(ts->get_pendown_state())) {
if (!ts->irq_disabled) {
+ /* REVISIT irq logic for many ARM chips has cloned a
+ * bug wherein disabling an irq in its handler won't
+ * work;(it's disabled lazily, and too late to work.
+ * until all their irq logic is fixed, we must shadow
+ * that state here.
+ */
ts->irq_disabled = 1;
disable_irq(ts->spi->irq);
+ ts->pending = 1;
+ mod_timer(&ts->timer, jiffies);
}
- status = spi_async(ts->spi, &ts->msg);
- if (status)
- dev_err(&ts->spi->dev, "spi_async --> %d\n",
- status);
}
spin_unlock_irqrestore(&ts->lock, flags);
-}
-static irqreturn_t ads7846_irq(int irq, void *handle, struct pt_regs *regs)
-{
- ads7846_timer((unsigned long) handle);
return IRQ_HANDLED;
}
/*--------------------------------------------------------------------------*/
-static int
-ads7846_suspend(struct spi_device *spi, pm_message_t message)
+/* Must be called with ts->lock held */
+static void ads7846_disable(struct ads7846 *ts)
{
- struct ads7846 *ts = dev_get_drvdata(&spi->dev);
- unsigned long flags;
+ if (ts->disabled)
+ return;
- spin_lock_irqsave(&ts->lock, flags);
-
- spi->dev.power.power_state = message;
+ ts->disabled = 1;
/* are we waiting for IRQ, or polling? */
- if (!ts->pendown) {
- if (!ts->irq_disabled) {
- ts->irq_disabled = 1;
- disable_irq(ts->spi->irq);
- }
+ if (!ts->pending) {
+ ts->irq_disabled = 1;
+ disable_irq(ts->spi->irq);
} else {
- /* polling; force a final SPI completion;
- * that will clean things up neatly
+ /* the timer will run at least once more, and
+ * leave everything in a clean state, IRQ disabled
*/
- if (!ts->pending)
- mod_timer(&ts->timer, jiffies);
-
- while (ts->pendown || ts->pending) {
- spin_unlock_irqrestore(&ts->lock, flags);
- udelay(10);
- spin_lock_irqsave(&ts->lock, flags);
+ while (ts->pending) {
+ spin_unlock_irq(&ts->lock);
+ msleep(1);
+ spin_lock_irq(&ts->lock);
}
}
@@ -389,17 +541,45 @@ ads7846_suspend(struct spi_device *spi, pm_message_t message)
* leave it that way after every request
*/
- spin_unlock_irqrestore(&ts->lock, flags);
+}
+
+/* Must be called with ts->lock held */
+static void ads7846_enable(struct ads7846 *ts)
+{
+ if (!ts->disabled)
+ return;
+
+ ts->disabled = 0;
+ ts->irq_disabled = 0;
+ enable_irq(ts->spi->irq);
+}
+
+static int ads7846_suspend(struct spi_device *spi, pm_message_t message)
+{
+ struct ads7846 *ts = dev_get_drvdata(&spi->dev);
+
+ spin_lock_irq(&ts->lock);
+
+ spi->dev.power.power_state = message;
+ ads7846_disable(ts);
+
+ spin_unlock_irq(&ts->lock);
+
return 0;
+
}
static int ads7846_resume(struct spi_device *spi)
{
struct ads7846 *ts = dev_get_drvdata(&spi->dev);
- ts->irq_disabled = 0;
- enable_irq(ts->spi->irq);
+ spin_lock_irq(&ts->lock);
+
spi->dev.power.power_state = PMSG_ON;
+ ads7846_enable(ts);
+
+ spin_unlock_irq(&ts->lock);
+
return 0;
}
@@ -408,6 +588,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
struct ads7846 *ts;
struct input_dev *input_dev;
struct ads7846_platform_data *pdata = spi->dev.platform_data;
+ struct spi_message *m;
struct spi_transfer *x;
int err;
@@ -428,6 +609,11 @@ static int __devinit ads7846_probe(struct spi_device *spi)
return -EINVAL;
}
+ if (pdata->get_pendown_state == NULL) {
+ dev_dbg(&spi->dev, "no get_pendown_state function?\n");
+ return -EINVAL;
+ }
+
/* We'd set the wordsize to 12 bits ... except that some controllers
* will then treat the 8 bit command words as 12 bits (and drop the
* four MSBs of the 12 bit result). Result: inputs must be shifted
@@ -451,9 +637,21 @@ static int __devinit ads7846_probe(struct spi_device *spi)
ts->timer.data = (unsigned long) ts;
ts->timer.function = ads7846_timer;
+ spin_lock_init(&ts->lock);
+
ts->model = pdata->model ? : 7846;
ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100;
ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
+ ts->pressure_max = pdata->pressure_max ? : ~0;
+ if (pdata->debounce_max) {
+ ts->debounce_max = pdata->debounce_max;
+ ts->debounce_tol = pdata->debounce_tol;
+ ts->debounce_rep = pdata->debounce_rep;
+ if (ts->debounce_rep > ts->debounce_max + 1)
+ ts->debounce_rep = ts->debounce_max - 1;
+ } else
+ ts->debounce_tol = ~0;
+ ts->get_pendown_state = pdata->get_pendown_state;
snprintf(ts->phys, sizeof(ts->phys), "%s/input0", spi->dev.bus_id);
@@ -477,60 +675,100 @@ static int __devinit ads7846_probe(struct spi_device *spi)
/* set up the transfers to read touchscreen state; this assumes we
* use formula #2 for pressure, not #3.
*/
- INIT_LIST_HEAD(&ts->msg.transfers);
+ m = &ts->msg[0];
x = ts->xfer;
+ spi_message_init(m);
+
/* y- still on; turn on only y+ (and ADC) */
ts->read_y = READ_Y;
x->tx_buf = &ts->read_y;
x->len = 1;
- spi_message_add_tail(x, &ts->msg);
+ spi_message_add_tail(x, m);
x++;
x->rx_buf = &ts->tc.y;
x->len = 2;
- spi_message_add_tail(x, &ts->msg);
+ spi_message_add_tail(x, m);
+
+ m->complete = ads7846_debounce;
+ m->context = ts;
+
+ m++;
+ spi_message_init(m);
+
+ /* turn y- off, x+ on, then leave in lowpower */
+ x++;
+ ts->read_x = READ_X;
+ x->tx_buf = &ts->read_x;
+ x->len = 1;
+ spi_message_add_tail(x, m);
+
+ x++;
+ x->rx_buf = &ts->tc.x;
+ x->len = 2;
+ spi_message_add_tail(x, m);
+
+ m->complete = ads7846_debounce;
+ m->context = ts;
/* turn y+ off, x- on; we'll use formula #2 */
if (ts->model == 7846) {
+ m++;
+ spi_message_init(m);
+
x++;
ts->read_z1 = READ_Z1;
x->tx_buf = &ts->read_z1;
x->len = 1;
- spi_message_add_tail(x, &ts->msg);
+ spi_message_add_tail(x, m);
x++;
x->rx_buf = &ts->tc.z1;
x->len = 2;
- spi_message_add_tail(x, &ts->msg);
+ spi_message_add_tail(x, m);
+
+ m->complete = ads7846_debounce;
+ m->context = ts;
+
+ m++;
+ spi_message_init(m);
x++;
ts->read_z2 = READ_Z2;
x->tx_buf = &ts->read_z2;
x->len = 1;
- spi_message_add_tail(x, &ts->msg);
+ spi_message_add_tail(x, m);
x++;
x->rx_buf = &ts->tc.z2;
x->len = 2;
- spi_message_add_tail(x, &ts->msg);
+ spi_message_add_tail(x, m);
+
+ m->complete = ads7846_debounce;
+ m->context = ts;
}
- /* turn y- off, x+ on, then leave in lowpower */
+ /* power down */
+ m++;
+ spi_message_init(m);
+
x++;
- ts->read_x = READ_X;
- x->tx_buf = &ts->read_x;
+ ts->pwrdown = PWRDOWN;
+ x->tx_buf = &ts->pwrdown;
x->len = 1;
- spi_message_add_tail(x, &ts->msg);
+ spi_message_add_tail(x, m);
x++;
- x->rx_buf = &ts->tc.x;
+ x->rx_buf = &ts->dummy;
x->len = 2;
CS_CHANGE(*x);
- spi_message_add_tail(x, &ts->msg);
+ spi_message_add_tail(x, m);
- ts->msg.complete = ads7846_rx;
- ts->msg.context = ts;
+ m->complete = ads7846_rx;
+ m->context = ts;
+
+ ts->last_msg = m;
if (request_irq(spi->irq, ads7846_irq,
SA_SAMPLE_RANDOM | SA_TRIGGER_FALLING,
@@ -559,13 +797,27 @@ static int __devinit ads7846_probe(struct spi_device *spi)
device_create_file(&spi->dev, &dev_attr_vbatt);
device_create_file(&spi->dev, &dev_attr_vaux);
+ device_create_file(&spi->dev, &dev_attr_pen_down);
+
+ device_create_file(&spi->dev, &dev_attr_disable);
+
err = input_register_device(input_dev);
if (err)
- goto err_free_irq;
+ goto err_remove_attr;
return 0;
- err_free_irq:
+ err_remove_attr:
+ device_remove_file(&spi->dev, &dev_attr_disable);
+ device_remove_file(&spi->dev, &dev_attr_pen_down);
+ if (ts->model == 7846) {
+ device_remove_file(&spi->dev, &dev_attr_temp1);
+ device_remove_file(&spi->dev, &dev_attr_temp0);
+ }
+ if (ts->model != 7845)
+ device_remove_file(&spi->dev, &dev_attr_vbatt);
+ device_remove_file(&spi->dev, &dev_attr_vaux);
+
free_irq(spi->irq, ts);
err_free_mem:
input_free_device(input_dev);
@@ -577,20 +829,24 @@ static int __devexit ads7846_remove(struct spi_device *spi)
{
struct ads7846 *ts = dev_get_drvdata(&spi->dev);
+ input_unregister_device(ts->input);
+
ads7846_suspend(spi, PMSG_SUSPEND);
- free_irq(ts->spi->irq, ts);
- if (ts->irq_disabled)
- enable_irq(ts->spi->irq);
+ device_remove_file(&spi->dev, &dev_attr_disable);
+ device_remove_file(&spi->dev, &dev_attr_pen_down);
if (ts->model == 7846) {
- device_remove_file(&spi->dev, &dev_attr_temp0);
device_remove_file(&spi->dev, &dev_attr_temp1);
+ device_remove_file(&spi->dev, &dev_attr_temp0);
}
if (ts->model != 7845)
device_remove_file(&spi->dev, &dev_attr_vbatt);
device_remove_file(&spi->dev, &dev_attr_vaux);
- input_unregister_device(ts->input);
+ free_irq(ts->spi->irq, ts);
+ /* suspend left the IRQ disabled */
+ enable_irq(ts->spi->irq);
+
kfree(ts);
dev_dbg(&spi->dev, "unregistered touchscreen\n");
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6081941de1b..4070eff6f0f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -315,10 +315,11 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
if (r1_bio->bios[mirror] == bio)
break;
- if (error == -ENOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
+ if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags);
set_bit(R1BIO_BarrierRetry, &r1_bio->state);
r1_bio->mddev->barriers_work = 0;
+ /* Don't rdev_dec_pending in this branch - keep it for the retry */
} else {
/*
* this branch is our 'one mirror IO has finished' event handler:
@@ -365,6 +366,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
}
}
}
+ rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
}
/*
*
@@ -374,11 +376,9 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
if (atomic_dec_and_test(&r1_bio->remaining)) {
if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
reschedule_retry(r1_bio);
- /* Don't dec_pending yet, we want to hold
- * the reference over the retry
- */
goto out;
}
+ /* it really is the end of this request */
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
/* free extra copy of the data pages */
int i = bio->bi_vcnt;
@@ -393,8 +393,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
md_write_end(r1_bio->mddev);
raid_end_bio_io(r1_bio);
}
-
- rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
out:
if (to_put)
bio_put(to_put);
@@ -753,18 +751,24 @@ static int make_request(request_queue_t *q, struct bio * bio)
const int rw = bio_data_dir(bio);
int do_barriers;
- if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
- return 0;
- }
-
/*
* Register the new request and wait if the reconstruction
* thread has put up a bar for new requests.
* Continue immediately if no resync is active currently.
+ * We test barriers_work *after* md_write_start as md_write_start
+ * may cause the first superblock write, and that will check out
+ * if barriers work.
*/
+
md_write_start(mddev, bio); /* wait on superblock update early */
+ if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
+ if (rw == WRITE)
+ md_write_end(mddev);
+ bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ return 0;
+ }
+
wait_barrier(conf);
disk_stat_inc(mddev->gendisk, ios[rw]);
@@ -1404,10 +1408,11 @@ static void raid1d(mddev_t *mddev)
unplug = 1;
} else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
/* some requests in the r1bio were BIO_RW_BARRIER
- * requests which failed with -ENOTSUPP. Hohumm..
+ * requests which failed with -EOPNOTSUPP. Hohumm..
* Better resubmit without the barrier.
* We know which devices to resubmit for, because
* all others have had their bios[] entry cleared.
+ * We already have a nr_pending reference on these rdevs.
*/
int i;
clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 617012bc107..1440935414e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1407,43 +1407,54 @@ static void raid10d(mddev_t *mddev)
if (s > (PAGE_SIZE>>9))
s = PAGE_SIZE >> 9;
+ rcu_read_lock();
do {
int d = r10_bio->devs[sl].devnum;
- rdev = conf->mirrors[d].rdev;
+ rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev &&
- test_bit(In_sync, &rdev->flags) &&
- sync_page_io(rdev->bdev,
- r10_bio->devs[sl].addr +
- sect + rdev->data_offset,
- s<<9,
- conf->tmppage, READ))
- success = 1;
- else {
- sl++;
- if (sl == conf->copies)
- sl = 0;
+ test_bit(In_sync, &rdev->flags)) {
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ success = sync_page_io(rdev->bdev,
+ r10_bio->devs[sl].addr +
+ sect + rdev->data_offset,
+ s<<9,
+ conf->tmppage, READ);
+ rdev_dec_pending(rdev, mddev);
+ rcu_read_lock();
+ if (success)
+ break;
}
+ sl++;
+ if (sl == conf->copies)
+ sl = 0;
} while (!success && sl != r10_bio->read_slot);
+ rcu_read_unlock();
if (success) {
int start = sl;
/* write it back and re-read */
+ rcu_read_lock();
while (sl != r10_bio->read_slot) {
int d;
if (sl==0)
sl = conf->copies;
sl--;
d = r10_bio->devs[sl].devnum;
- rdev = conf->mirrors[d].rdev;
- atomic_add(s, &rdev->corrected_errors);
+ rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev &&
test_bit(In_sync, &rdev->flags)) {
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ atomic_add(s, &rdev->corrected_errors);
if (sync_page_io(rdev->bdev,
r10_bio->devs[sl].addr +
sect + rdev->data_offset,
s<<9, conf->tmppage, WRITE) == 0)
/* Well, this device is dead */
md_error(mddev, rdev);
+ rdev_dec_pending(rdev, mddev);
+ rcu_read_lock();
}
}
sl = start;
@@ -1453,17 +1464,22 @@ static void raid10d(mddev_t *mddev)
sl = conf->copies;
sl--;
d = r10_bio->devs[sl].devnum;
- rdev = conf->mirrors[d].rdev;
+ rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev &&
test_bit(In_sync, &rdev->flags)) {
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
if (sync_page_io(rdev->bdev,
r10_bio->devs[sl].addr +
sect + rdev->data_offset,
s<<9, conf->tmppage, READ) == 0)
/* Well, this device is dead */
md_error(mddev, rdev);
+ rdev_dec_pending(rdev, mddev);
+ rcu_read_lock();
}
}
+ rcu_read_unlock();
} else {
/* Cannot read from anywhere -- bye bye array */
md_error(mddev, conf->mirrors[r10_bio->devs[r10_bio->read_slot].devnum].rdev);
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index f9d87b86492..320b3d9384b 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -616,7 +616,7 @@ static struct snd_kcontrol_new snd_cx88_capture_volume = {
* Only boards with eeprom and byte 1 at eeprom=1 have it
*/
-static struct pci_device_id cx88_audio_pci_tbl[] = {
+static struct pci_device_id cx88_audio_pci_tbl[] __devinitdata = {
{0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
{0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
{0, }
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c
index 6061c2d101a..88f0eef9cf3 100644
--- a/drivers/mmc/at91_mci.c
+++ b/drivers/mmc/at91_mci.c
@@ -621,9 +621,6 @@ static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct at91mci_host *host = mmc_priv(mmc);
unsigned long at91_master_clock = clk_get_rate(mci_clk);
- DBG("Clock %uHz, busmode %u, powermode %u, Vdd %u\n",
- ios->clock, ios->bus_mode, ios->power_mode, ios->vdd);
-
if (host)
host->bus_mode = ios->bus_mode;
else
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c
index c0326bbc5f2..914d62b2406 100644
--- a/drivers/mmc/au1xmmc.c
+++ b/drivers/mmc/au1xmmc.c
@@ -720,10 +720,6 @@ static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
{
struct au1xmmc_host *host = mmc_priv(mmc);
- DBG("set_ios (power=%u, clock=%uHz, vdd=%u, mode=%u)\n",
- host->id, ios->power_mode, ios->clock, ios->vdd,
- ios->bus_mode);
-
if (ios->power_mode == MMC_POWER_OFF)
au1xmmc_set_power(host, 0);
else if (ios->power_mode == MMC_POWER_ON) {
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index ffb7f55d346..79358e223f5 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -102,6 +102,7 @@ struct imxmci_host {
#define IMXMCI_PEND_CPU_DATA_b 5
#define IMXMCI_PEND_CARD_XCHG_b 6
#define IMXMCI_PEND_SET_INIT_b 7
+#define IMXMCI_PEND_STARTED_b 8
#define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b)
#define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b)
@@ -111,6 +112,7 @@ struct imxmci_host {
#define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b)
#define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b)
#define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b)
+#define IMXMCI_PEND_STARTED_m (1 << IMXMCI_PEND_STARTED_b)
static void imxmci_stop_clock(struct imxmci_host *host)
{
@@ -131,23 +133,52 @@ static void imxmci_stop_clock(struct imxmci_host *host)
dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
}
-static void imxmci_start_clock(struct imxmci_host *host)
+static int imxmci_start_clock(struct imxmci_host *host)
{
- int i = 0;
+ unsigned int trials = 0;
+ unsigned int delay_limit = 128;
+ unsigned long flags;
+
MMC_STR_STP_CLK &= ~STR_STP_CLK_STOP_CLK;
- while(i < 0x1000) {
- if(!(i & 0x7f))
- MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
- if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN) {
- /* Check twice before cut */
+ clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
+
+ /*
+ * Command start of the clock, this usually succeeds in less
+ * then 6 delay loops, but during card detection (low clockrate)
+ * it takes up to 5000 delay loops and sometimes fails for the first time
+ */
+ MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
+
+ do {
+ unsigned int delay = delay_limit;
+
+ while(delay--){
if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)
- return;
+ /* Check twice before cut */
+ if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)
+ return 0;
+
+ if(test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
+ return 0;
}
- i++;
- }
- dev_dbg(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
+ local_irq_save(flags);
+ /*
+ * Ensure, that request is not doubled under all possible circumstances.
+ * It is possible, that cock running state is missed, because some other
+ * IRQ or schedule delays this function execution and the clocks has
+ * been already stopped by other means (response processing, SDHC HW)
+ */
+ if(!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
+ MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
+ local_irq_restore(flags);
+
+ } while(++trials<256);
+
+ dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
+
+ return -1;
}
static void imxmci_softreset(void)
@@ -498,7 +529,7 @@ static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
data_error = imxmci_finish_data(host, stat);
- if (host->req->stop && (data_error == MMC_ERR_NONE)) {
+ if (host->req->stop) {
imxmci_stop_clock(host);
imxmci_start_cmd(host, host->req->stop, 0);
} else {
@@ -622,6 +653,7 @@ static irqreturn_t imxmci_irq(int irq, void *devid, struct pt_regs *regs)
atomic_set(&host->stuck_timeout, 0);
host->status_reg = stat;
set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
+ set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
tasklet_schedule(&host->tasklet);
return IRQ_RETVAL(handled);;
@@ -775,10 +807,6 @@ static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct imxmci_host *host = mmc_priv(mmc);
int prescaler;
- dev_dbg(mmc_dev(host->mmc), "clock %u power %u vdd %u width %u\n",
- ios->clock, ios->power_mode, ios->vdd,
- (ios->bus_width==MMC_BUS_WIDTH_4)?4:1);
-
if( ios->bus_width==MMC_BUS_WIDTH_4 ) {
host->actual_bus_width = MMC_BUS_WIDTH_4;
imx_gpio_mode(PB11_PF_SD_DAT3);
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index da6ddd910fc..1ca2c8b9c9b 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -59,21 +59,23 @@ static const unsigned int tacc_mant[] = {
/**
- * mmc_request_done - finish processing an MMC command
- * @host: MMC host which completed command
- * @mrq: MMC request which completed
+ * mmc_request_done - finish processing an MMC request
+ * @host: MMC host which completed request
+ * @mrq: MMC request which request
*
* MMC drivers should call this function when they have completed
- * their processing of a command. This should be called before the
- * data part of the command has completed.
+ * their processing of a request.
*/
void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
- int err = mrq->cmd->error;
- pr_debug("MMC: req done (%02x): %d: %08x %08x %08x %08x\n",
- cmd->opcode, err, cmd->resp[0], cmd->resp[1],
- cmd->resp[2], cmd->resp[3]);
+ int err = cmd->error;
+
+ pr_debug("%s: req done (CMD%u): %d/%d/%d: %08x %08x %08x %08x\n",
+ mmc_hostname(host), cmd->opcode, err,
+ mrq->data ? mrq->data->error : 0,
+ mrq->stop ? mrq->stop->error : 0,
+ cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
if (err && cmd->retries) {
cmd->retries--;
@@ -97,8 +99,9 @@ EXPORT_SYMBOL(mmc_request_done);
void
mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
- pr_debug("MMC: starting cmd %02x arg %08x flags %08x\n",
- mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
+ pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
+ mmc_hostname(host), mrq->cmd->opcode,
+ mrq->cmd->arg, mrq->cmd->flags);
WARN_ON(host->card_busy == NULL);
@@ -312,6 +315,18 @@ void mmc_release_host(struct mmc_host *host)
EXPORT_SYMBOL(mmc_release_host);
+static inline void mmc_set_ios(struct mmc_host *host)
+{
+ struct mmc_ios *ios = &host->ios;
+
+ pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
+ mmc_hostname(host), ios->clock, ios->bus_mode,
+ ios->power_mode, ios->chip_select, ios->vdd,
+ ios->bus_width);
+
+ host->ops->set_ios(host, ios);
+}
+
static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
{
int err;
@@ -364,7 +379,7 @@ static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
}
}
- host->ops->set_ios(host, &host->ios);
+ mmc_set_ios(host);
return MMC_ERR_NONE;
}
@@ -415,7 +430,7 @@ static u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
ocr = 3 << bit;
host->ios.vdd = bit;
- host->ops->set_ios(host, &host->ios);
+ mmc_set_ios(host);
} else {
ocr = 0;
}
@@ -549,6 +564,7 @@ static void mmc_decode_csd(struct mmc_card *card)
csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
+ csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
} else {
@@ -583,6 +599,7 @@ static void mmc_decode_csd(struct mmc_card *card)
csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
+ csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
}
@@ -666,7 +683,7 @@ static void mmc_idle_cards(struct mmc_host *host)
struct mmc_command cmd;
host->ios.chip_select = MMC_CS_HIGH;
- host->ops->set_ios(host, &host->ios);
+ mmc_set_ios(host);
mmc_delay(1);
@@ -679,7 +696,7 @@ static void mmc_idle_cards(struct mmc_host *host)
mmc_delay(1);
host->ios.chip_select = MMC_CS_DONTCARE;
- host->ops->set_ios(host, &host->ios);
+ mmc_set_ios(host);
mmc_delay(1);
}
@@ -704,13 +721,13 @@ static void mmc_power_up(struct mmc_host *host)
host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.power_mode = MMC_POWER_UP;
host->ios.bus_width = MMC_BUS_WIDTH_1;
- host->ops->set_ios(host, &host->ios);
+ mmc_set_ios(host);
mmc_delay(1);
host->ios.clock = host->f_min;
host->ios.power_mode = MMC_POWER_ON;
- host->ops->set_ios(host, &host->ios);
+ mmc_set_ios(host);
mmc_delay(2);
}
@@ -723,7 +740,7 @@ static void mmc_power_off(struct mmc_host *host)
host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.power_mode = MMC_POWER_OFF;
host->ios.bus_width = MMC_BUS_WIDTH_1;
- host->ops->set_ios(host, &host->ios);
+ mmc_set_ios(host);
}
static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
@@ -971,7 +988,8 @@ static unsigned int mmc_calculate_clock(struct mmc_host *host)
if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr)
max_dtr = card->csd.max_dtr;
- pr_debug("MMC: selected %d.%03dMHz transfer rate\n",
+ pr_debug("%s: selected %d.%03dMHz transfer rate\n",
+ mmc_hostname(host),
max_dtr / 1000000, (max_dtr / 1000) % 1000);
return max_dtr;
@@ -1046,7 +1064,7 @@ static void mmc_setup(struct mmc_host *host)
} else {
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
host->ios.clock = host->f_min;
- host->ops->set_ios(host, &host->ios);
+ mmc_set_ios(host);
/*
* We should remember the OCR mask from the existing
@@ -1082,7 +1100,7 @@ static void mmc_setup(struct mmc_host *host)
* Ok, now switch to push-pull mode.
*/
host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
- host->ops->set_ios(host, &host->ios);
+ mmc_set_ios(host);
mmc_read_csds(host);
@@ -1128,7 +1146,7 @@ static void mmc_rescan(void *data)
* attached cards and the host support.
*/
host->ios.clock = mmc_calculate_clock(host);
- host->ops->set_ios(host, &host->ios);
+ mmc_set_ios(host);
}
mmc_release_host(host);
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 8eb2a2ede64..06bd1f4cb9b 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -187,6 +187,12 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.cmd.opcode = MMC_WRITE_BLOCK;
brq.data.flags |= MMC_DATA_WRITE;
brq.data.blocks = 1;
+
+ /*
+ * Scale up the timeout by the r2w factor
+ */
+ brq.data.timeout_ns <<= card->csd.r2w_factor;
+ brq.data.timeout_clks <<= card->csd.r2w_factor;
}
if (brq.data.blocks > 1) {
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index df7e861e2fc..da8e4d7339c 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -402,9 +402,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct mmci_host *host = mmc_priv(mmc);
u32 clk = 0, pwr = 0;
- DBG(host, "clock %uHz busmode %u powermode %u Vdd %u\n",
- ios->clock, ios->bus_mode, ios->power_mode, ios->vdd);
-
if (ios->clock) {
if (ios->clock >= host->mclk) {
clk = MCI_CLK_BYPASS;
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index eb42cb34942..f97b472085c 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -198,7 +198,6 @@ static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd,
static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
{
- pr_debug("PXAMCI: request done\n");
host->mrq = NULL;
host->cmd = NULL;
host->data = NULL;
@@ -291,7 +290,7 @@ static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
pxamci_disable_irq(host, DATA_TRAN_DONE);
host->data = NULL;
- if (host->mrq->stop && data->error == MMC_ERR_NONE) {
+ if (host->mrq->stop) {
pxamci_stop_clock(host);
pxamci_start_cmd(host, host->mrq->stop, 0);
} else {
@@ -309,12 +308,10 @@ static irqreturn_t pxamci_irq(int irq, void *devid, struct pt_regs *regs)
ireg = readl(host->base + MMC_I_REG);
- pr_debug("PXAMCI: irq %08x\n", ireg);
-
if (ireg) {
unsigned stat = readl(host->base + MMC_STAT);
- pr_debug("PXAMCI: stat %08x\n", stat);
+ pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
if (ireg & END_CMD_RES)
handled |= pxamci_cmd_done(host, stat);
@@ -368,10 +365,6 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct pxamci_host *host = mmc_priv(mmc);
- pr_debug("pxamci_set_ios: clock %u power %u vdd %u.%02u\n",
- ios->clock, ios->power_mode, ios->vdd / 100,
- ios->vdd % 100);
-
if (ios->clock) {
unsigned int clk = CLOCKRATE / ios->clock;
if (CLOCKRATE / clk > ios->clock)
@@ -397,7 +390,7 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->cmdat |= CMDAT_INIT;
}
- pr_debug("pxamci_set_ios: clkrt = %x cmdat = %x\n",
+ pr_debug("PXAMCI: clkrt = %x cmdat = %x\n",
host->clkrt, host->cmdat);
}
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index bdbfca05002..b0053280ff2 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -570,10 +570,6 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_lock_irqsave(&host->lock, flags);
- DBG("clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
- ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select,
- ios->vdd, ios->bus_width);
-
/*
* Reset the chip on each power off.
* Should clear out any weird states.
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 511f7b0b31d..39b3d97f891 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -931,10 +931,6 @@ static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct wbsd_host *host = mmc_priv(mmc);
u8 clk, setup, pwr;
- DBGF("clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
- ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select,
- ios->vdd, ios->bus_width);
-
spin_lock_bh(&host->lock);
/*
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 9788b1ef2e7..f7235c9bc42 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -106,6 +106,7 @@
* 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
* 0.52: 20 Jan 2006: Add MSI/MSIX support.
* 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
+ * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -117,7 +118,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
-#define FORCEDETH_VERSION "0.53"
+#define FORCEDETH_VERSION "0.54"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
@@ -710,6 +711,72 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
}
}
+static int using_multi_irqs(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+ ((np->msi_flags & NV_MSI_X_ENABLED) &&
+ ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
+ return 0;
+ else
+ return 1;
+}
+
+static void nv_enable_irq(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ enable_irq(dev->irq);
+ } else {
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+ }
+}
+
+static void nv_disable_irq(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ disable_irq(dev->irq);
+ } else {
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+ }
+}
+
+/* In MSIX mode, a write to irqmask behaves as XOR */
+static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
+{
+ u8 __iomem *base = get_hwbase(dev);
+
+ writel(mask, base + NvRegIrqMask);
+}
+
+static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ if (np->msi_flags & NV_MSI_X_ENABLED) {
+ writel(mask, base + NvRegIrqMask);
+ } else {
+ if (np->msi_flags & NV_MSI_ENABLED)
+ writel(0, base + NvRegMSIIrqMask);
+ writel(0, base + NvRegIrqMask);
+ }
+}
+
#define MII_READ (-1)
/* mii_rw: read/write a register on the PHY.
*
@@ -1019,24 +1086,25 @@ static void nv_do_rx_refill(unsigned long data)
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
-
- if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
- disable_irq(dev->irq);
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ disable_irq(dev->irq);
} else {
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
if (nv_alloc_rx(dev)) {
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
}
- if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
- enable_irq(dev->irq);
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ enable_irq(dev->irq);
} else {
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
@@ -1668,15 +1736,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
* guessed, there is probably a simpler approach.
* Changing the MTU is a rare event, it shouldn't matter.
*/
- if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
- disable_irq(dev->irq);
- } else {
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
- }
+ nv_disable_irq(dev);
spin_lock_bh(&dev->xmit_lock);
spin_lock(&np->lock);
/* stop engines */
@@ -1709,15 +1769,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
nv_start_tx(dev);
spin_unlock(&np->lock);
spin_unlock_bh(&dev->xmit_lock);
- if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
- enable_irq(dev->irq);
- } else {
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
- }
+ nv_enable_irq(dev);
}
return 0;
}
@@ -2108,16 +2160,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
if (!(events & np->irqmask))
break;
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
nv_tx_done(dev);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
if (events & (NVREG_IRQ_TX_ERR)) {
dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
dev->name, events);
}
if (i > max_interrupt_work) {
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
/* disable interrupts on the nic */
writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
pci_push(base);
@@ -2127,7 +2179,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
break;
}
@@ -2157,14 +2209,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
nv_rx_process(dev);
if (nv_alloc_rx(dev)) {
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
}
if (i > max_interrupt_work) {
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
/* disable interrupts on the nic */
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
pci_push(base);
@@ -2174,7 +2226,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
break;
}
@@ -2203,14 +2255,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
break;
if (events & NVREG_IRQ_LINK) {
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
nv_link_irq(dev);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
}
if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
nv_linkchange(dev);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
np->link_timeout = jiffies + LINK_TIMEOUT;
}
if (events & (NVREG_IRQ_UNKNOWN)) {
@@ -2218,7 +2270,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
dev->name, events);
}
if (i > max_interrupt_work) {
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
/* disable interrupts on the nic */
writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
pci_push(base);
@@ -2228,7 +2280,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
break;
}
@@ -2251,10 +2303,11 @@ static void nv_do_nic_poll(unsigned long data)
* nv_nic_irq because that may decide to do otherwise
*/
- if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
- disable_irq(dev->irq);
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ disable_irq(dev->irq);
mask = np->irqmask;
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
@@ -2277,11 +2330,12 @@ static void nv_do_nic_poll(unsigned long data)
writel(mask, base + NvRegIrqMask);
pci_push(base);
- if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+ if (!using_multi_irqs(dev)) {
nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
- enable_irq(dev->irq);
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ enable_irq(dev->irq);
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
@@ -2628,6 +2682,113 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
}
+static int nv_request_irq(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int ret = 1;
+ int i;
+
+ if (np->msi_flags & NV_MSI_X_CAPABLE) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ np->msi_x_entry[i].entry = i;
+ }
+ if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
+ np->msi_flags |= NV_MSI_X_ENABLED;
+ if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
+ /* Request irq for rx handling */
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_err;
+ }
+ /* Request irq for tx handling */
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_free_rx;
+ }
+ /* Request irq for link and timer handling */
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_free_tx;
+ }
+ /* map interrupts to their respective vector */
+ writel(0, base + NvRegMSIXMap0);
+ writel(0, base + NvRegMSIXMap1);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
+ } else {
+ /* Request irq for all interrupts */
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_err;
+ }
+
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIXMap0);
+ writel(0, base + NvRegMSIXMap1);
+ }
+ }
+ }
+ if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
+ if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
+ np->msi_flags |= NV_MSI_ENABLED;
+ if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_ENABLED;
+ goto out_err;
+ }
+
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIMap0);
+ writel(0, base + NvRegMSIMap1);
+ /* enable msi vector 0 */
+ writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+ }
+ }
+ if (ret != 0) {
+ if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
+ goto out_err;
+ }
+
+ return 0;
+out_free_tx:
+ free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
+out_free_rx:
+ free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
+out_err:
+ return 1;
+}
+
+static void nv_free_irq(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ int i;
+
+ if (np->msi_flags & NV_MSI_X_ENABLED) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ free_irq(np->msi_x_entry[i].vector, dev);
+ }
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ } else {
+ free_irq(np->pci_dev->irq, dev);
+ if (np->msi_flags & NV_MSI_ENABLED) {
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_ENABLED;
+ }
+ }
+}
+
static int nv_open(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
@@ -2720,12 +2881,16 @@ static int nv_open(struct net_device *dev)
udelay(10);
writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
- writel(0, base + NvRegIrqMask);
+ nv_disable_hw_interrupts(dev, np->irqmask);
pci_push(base);
writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
pci_push(base);
+ if (nv_request_irq(dev)) {
+ goto out_drain;
+ }
+
if (np->msi_flags & NV_MSI_X_CAPABLE) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
np->msi_x_entry[i].entry = i;
@@ -2799,7 +2964,7 @@ static int nv_open(struct net_device *dev)
}
/* ask for interrupts */
- writel(np->irqmask, base + NvRegIrqMask);
+ nv_enable_hw_interrupts(dev, np->irqmask);
spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
@@ -2843,7 +3008,6 @@ static int nv_close(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base;
- int i;
spin_lock_irq(&np->lock);
np->in_shutdown = 1;
@@ -2861,31 +3025,13 @@ static int nv_close(struct net_device *dev)
/* disable interrupts on the nic or we will lock up */
base = get_hwbase(dev);
- if (np->msi_flags & NV_MSI_X_ENABLED) {
- writel(np->irqmask, base + NvRegIrqMask);
- } else {
- if (np->msi_flags & NV_MSI_ENABLED)
- writel(0, base + NvRegMSIIrqMask);
- writel(0, base + NvRegIrqMask);
- }
+ nv_disable_hw_interrupts(dev, np->irqmask);
pci_push(base);
dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
spin_unlock_irq(&np->lock);
- if (np->msi_flags & NV_MSI_X_ENABLED) {
- for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
- free_irq(np->msi_x_entry[i].vector, dev);
- }
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- } else {
- free_irq(np->pci_dev->irq, dev);
- if (np->msi_flags & NV_MSI_ENABLED) {
- pci_disable_msi(np->pci_dev);
- np->msi_flags &= ~NV_MSI_ENABLED;
- }
- }
+ nv_free_irq(dev);
drain_ring(dev);
@@ -2974,20 +3120,18 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (id->driver_data & DEV_HAS_HIGH_DMA) {
/* packet format 3: supports 40-bit addressing */
np->desc_ver = DESC_VER_3;
+ np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
pci_name(pci_dev));
} else {
- if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
- printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
- pci_name(pci_dev));
- goto out_relreg;
- } else {
- dev->features |= NETIF_F_HIGHDMA;
- printk(KERN_INFO "forcedeth: using HIGHDMA\n");
- }
+ dev->features |= NETIF_F_HIGHDMA;
+ printk(KERN_INFO "forcedeth: using HIGHDMA\n");
+ }
+ if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
+ printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
+ pci_name(pci_dev));
}
- np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
} else if (id->driver_data & DEV_HAS_LARGEDESC) {
/* packet format 2: supports jumbo frames */
np->desc_ver = DESC_VER_2;
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 79a8fbcf5f9..0d5fccc984b 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -582,7 +582,6 @@ static int __init setup_adapter(int card_base, int type, int n)
INIT_WORK(&priv->rx_work, rx_bh, priv);
dev->priv = priv;
sprintf(dev->name, "dmascc%i", 2 * n + i);
- SET_MODULE_OWNER(dev);
dev->base_addr = card_base;
dev->irq = irq;
dev->open = scc_open;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 6ace0e914fd..5927784df3f 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1550,7 +1550,6 @@ static unsigned char ax25_nocall[AX25_ADDR_LEN] =
static void scc_net_setup(struct net_device *dev)
{
- SET_MODULE_OWNER(dev);
dev->tx_queue_len = 16; /* should be enough... */
dev->open = scc_net_open;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index fe22479eb20..b49884048ca 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1098,7 +1098,6 @@ static void yam_setup(struct net_device *dev)
dev->base_addr = yp->iobase;
dev->irq = yp->irq;
- SET_MODULE_OWNER(dev);
dev->open = yam_open;
dev->stop = yam_close;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index ea62a3e7d58..411f4d809c4 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1419,6 +1419,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mv643xx_eth_update_pscr(dev, &cmd);
mv643xx_set_settings(dev, &cmd);
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
err = register_netdev(dev);
if (err)
goto out;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 73e271e59c6..beeb612be98 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.56"
-#define DRV_MODULE_RELDATE "Apr 1, 2006"
+#define DRV_MODULE_VERSION "3.57"
+#define DRV_MODULE_RELDATE "Apr 28, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -974,6 +974,8 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
return err;
}
+static void tg3_link_report(struct tg3 *);
+
/* This will reset the tigon3 PHY if there is no valid
* link unless the FORCE argument is non-zero.
*/
@@ -987,6 +989,11 @@ static int tg3_phy_reset(struct tg3 *tp)
if (err != 0)
return -EBUSY;
+ if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
+ netif_carrier_off(tp->dev);
+ tg3_link_report(tp);
+ }
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
@@ -1023,6 +1030,12 @@ out:
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
}
+ else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
+ }
/* Set Extended packet length bit (bit 14) on all chips that */
/* support jumbo frames */
if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
@@ -3531,7 +3544,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
return IRQ_RETVAL(0);
}
-static int tg3_init_hw(struct tg3 *);
+static int tg3_init_hw(struct tg3 *, int);
static int tg3_halt(struct tg3 *, int, int);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3567,7 +3580,7 @@ static void tg3_reset_task(void *_data)
tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
- tg3_init_hw(tp);
+ tg3_init_hw(tp, 1);
tg3_netif_start(tp);
@@ -4042,7 +4055,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_set_mtu(dev, tp, new_mtu);
- tg3_init_hw(tp);
+ tg3_init_hw(tp, 0);
tg3_netif_start(tp);
@@ -5719,9 +5732,23 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
if (!netif_running(dev))
return 0;
- spin_lock_bh(&tp->lock);
- __tg3_set_mac_addr(tp);
- spin_unlock_bh(&tp->lock);
+ if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
+ /* Reset chip so that ASF can re-init any MAC addresses it
+ * needs.
+ */
+ tg3_netif_stop(tp);
+ tg3_full_lock(tp, 1);
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_init_hw(tp, 0);
+
+ tg3_netif_start(tp);
+ tg3_full_unlock(tp);
+ } else {
+ spin_lock_bh(&tp->lock);
+ __tg3_set_mac_addr(tp);
+ spin_unlock_bh(&tp->lock);
+ }
return 0;
}
@@ -5771,7 +5798,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
}
/* tp->lock is held. */
-static int tg3_reset_hw(struct tg3 *tp)
+static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
{
u32 val, rdmac_mode;
int i, err, limit;
@@ -5786,7 +5813,7 @@ static int tg3_reset_hw(struct tg3 *tp)
tg3_abort_hw(tp, 1);
}
- if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+ if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
tg3_phy_reset(tp);
err = tg3_chip_reset(tp);
@@ -6327,7 +6354,7 @@ static int tg3_reset_hw(struct tg3 *tp)
tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
}
- err = tg3_setup_phy(tp, 1);
+ err = tg3_setup_phy(tp, reset_phy);
if (err)
return err;
@@ -6400,7 +6427,7 @@ static int tg3_reset_hw(struct tg3 *tp)
/* Called at device open time to get the chip ready for
* packet processing. Invoked with tp->lock held.
*/
-static int tg3_init_hw(struct tg3 *tp)
+static int tg3_init_hw(struct tg3 *tp, int reset_phy)
{
int err;
@@ -6413,7 +6440,7 @@ static int tg3_init_hw(struct tg3 *tp)
tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
- err = tg3_reset_hw(tp);
+ err = tg3_reset_hw(tp, reset_phy);
out:
return err;
@@ -6683,7 +6710,7 @@ static int tg3_test_msi(struct tg3 *tp)
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- err = tg3_init_hw(tp);
+ err = tg3_init_hw(tp, 1);
tg3_full_unlock(tp);
@@ -6748,7 +6775,7 @@ static int tg3_open(struct net_device *dev)
tg3_full_lock(tp, 0);
- err = tg3_init_hw(tp);
+ err = tg3_init_hw(tp, 1);
if (err) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_free_rings(tp);
@@ -7839,7 +7866,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp);
+ tg3_init_hw(tp, 1);
tg3_netif_start(tp);
}
@@ -7884,7 +7911,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp);
+ tg3_init_hw(tp, 1);
tg3_netif_start(tp);
}
@@ -8522,7 +8549,7 @@ static int tg3_test_loopback(struct tg3 *tp)
if (!netif_running(tp->dev))
return TG3_LOOPBACK_FAILED;
- tg3_reset_hw(tp);
+ tg3_reset_hw(tp, 1);
if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
err |= TG3_MAC_LOOPBACK_FAILED;
@@ -8596,7 +8623,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
if (netif_running(dev)) {
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp);
+ tg3_init_hw(tp, 1);
tg3_netif_start(tp);
}
@@ -9377,7 +9404,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
if ((page_off == 0) || (i == 0))
nvram_cmd |= NVRAM_CMD_FIRST;
- else if (page_off == (tp->nvram_pagesize - 4))
+ if (page_off == (tp->nvram_pagesize - 4))
nvram_cmd |= NVRAM_CMD_LAST;
if (i == (len - 4))
@@ -10353,10 +10380,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
- if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
- tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
+ if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
+ else
+ tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
+ }
tp->coalesce_mode = 0;
if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
@@ -11569,7 +11599,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp);
+ tg3_init_hw(tp, 1);
tp->timer.expires = jiffies + tp->timer_offset;
add_timer(&tp->timer);
@@ -11603,7 +11633,7 @@ static int tg3_resume(struct pci_dev *pdev)
tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp);
+ tg3_init_hw(tp, 1);
tp->timer.expires = jiffies + tp->timer_offset;
add_timer(&tp->timer);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 8c8b987d125..0e29b885d44 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2215,6 +2215,7 @@ struct tg3 {
#define TG3_FLG2_HW_TSO_2 0x08000000
#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
#define TG3_FLG2_1SHOT_MSI 0x10000000
+#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
u32 split_mode_max_reqs;
#define SPLIT_MODE_5704_MAX_REQ 3
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 6a23964c131..a6dc53b4250 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -129,6 +129,7 @@
- Massive clean-up
- Rewrite PHY, media handling (remove options, full_duplex, backoff)
- Fix Tx engine race for good
+ - Craig Brind: Zero padded aligned buffers for short packets.
*/
@@ -1326,7 +1327,12 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
rp->stats.tx_dropped++;
return 0;
}
+
+ /* Padding is not copied and so must be redone. */
skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
+ if (skb->len < ETH_ZLEN)
+ memset(rp->tx_buf[entry] + skb->len, 0,
+ ETH_ZLEN - skb->len);
rp->tx_skbuff_dma[entry] = 0;
rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
(rp->tx_buf[entry] -
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index b1e3e6179e5..6c9ad92747f 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -58,7 +58,7 @@ rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
unsigned long data;
ssize_t ret;
- if (count < sizeof(unsigned long))
+ if (count != sizeof(unsigned int) && count < sizeof(unsigned long))
return -EINVAL;
add_wait_queue(&rtc->irq_queue, &wait);
@@ -90,11 +90,16 @@ rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
if (ret == 0) {
/* Check for any data updates */
if (rtc->ops->read_callback)
- data = rtc->ops->read_callback(rtc->class_dev.dev, data);
-
- ret = put_user(data, (unsigned long __user *)buf);
- if (ret == 0)
- ret = sizeof(unsigned long);
+ data = rtc->ops->read_callback(rtc->class_dev.dev,
+ data);
+
+ if (sizeof(int) != sizeof(long) &&
+ count == sizeof(unsigned int))
+ ret = put_user(data, (unsigned int __user *)buf) ?:
+ sizeof(unsigned int);
+ else
+ ret = put_user(data, (unsigned long __user *)buf) ?:
+ sizeof(unsigned long);
}
return ret;
}
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index b3c6e790779..cb14642d97a 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -8014,7 +8014,6 @@ static int (*qeth_old_arp_constructor) (struct neighbour *);
static struct neigh_ops arp_direct_ops_template = {
.family = AF_INET,
- .destructor = NULL,
.solicit = NULL,
.error_report = NULL,
.output = dev_queue_xmit,
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 5ae14803091..f99e55308b3 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/workqueue.h>
+#include <linux/time.h>
#include <asm/lowcore.h>
@@ -363,7 +364,7 @@ s390_revalidate_registers(struct mci *mci)
}
#define MAX_IPD_COUNT 29
-#define MAX_IPD_TIME (5 * 60 * 100 * 1000) /* 5 minutes */
+#define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */
/*
* machine check handler.
diff --git a/drivers/sn/ioc3.c b/drivers/sn/ioc3.c
index 0b49ff78efc..501316b198e 100644
--- a/drivers/sn/ioc3.c
+++ b/drivers/sn/ioc3.c
@@ -678,7 +678,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
/* Track PCI-device specific data */
pci_set_drvdata(pdev, idd);
down_write(&ioc3_devices_rwsem);
- list_add(&idd->list, &ioc3_devices);
+ list_add_tail(&idd->list, &ioc3_devices);
idd->id = ioc3_counter++;
up_write(&ioc3_devices_rwsem);
diff --git a/drivers/sn/ioc4.c b/drivers/sn/ioc4.c
index 67140a5804f..cdeff909403 100644
--- a/drivers/sn/ioc4.c
+++ b/drivers/sn/ioc4.c
@@ -310,7 +310,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
pci_set_drvdata(idd->idd_pdev, idd);
mutex_lock(&ioc4_mutex);
- list_add(&idd->idd_list, &ioc4_devices);
+ list_add_tail(&idd->idd_list, &ioc4_devices);
/* Add this IOC4 to all submodules */
list_for_each_entry(is, &ioc4_submodules, is_list) {