diff options
116 files changed, 1262 insertions, 851 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index 5d2480d33b4..ecad6ee7570 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt @@ -954,14 +954,14 @@ elevator_allow_merge_fn called whenever the block layer determines results in some sort of conflict internally, this hook allows it to do that. -elevator_dispatch_fn fills the dispatch queue with ready requests. +elevator_dispatch_fn* fills the dispatch queue with ready requests. I/O schedulers are free to postpone requests by not filling the dispatch queue unless @force is non-zero. Once dispatched, I/O schedulers are not allowed to manipulate the requests - they belong to generic dispatch queue. -elevator_add_req_fn called to add a new request into the scheduler +elevator_add_req_fn* called to add a new request into the scheduler elevator_queue_empty_fn returns true if the merge queue is empty. Drivers shouldn't use this, but rather check @@ -991,7 +991,7 @@ elevator_activate_req_fn Called when device driver first sees a request. elevator_deactivate_req_fn Called when device driver decides to delay a request by requeueing it. -elevator_init_fn +elevator_init_fn* elevator_exit_fn Allocate and free any elevator specific storage for a queue. diff --git a/Documentation/networking/alias.txt b/Documentation/networking/alias.txt index cd12c2ff518..85046f53fcf 100644 --- a/Documentation/networking/alias.txt +++ b/Documentation/networking/alias.txt @@ -2,13 +2,13 @@ IP-Aliasing: ============ -IP-aliases are additional IP-addresses/masks hooked up to a base -interface by adding a colon and a string when running ifconfig. -This string is usually numeric, but this is not a must. - -IP-Aliases are avail if CONFIG_INET (`standard' IPv4 networking) -is configured in the kernel. +IP-aliases are an obsolete way to manage multiple IP-addresses/masks +per interface. Newer tools such as iproute2 support multiple +address/prefixes per interface, but aliases are still supported +for backwards compatibility. +An alias is formed by adding a colon and a string when running ifconfig. +This string is usually numeric, but this is not a must. o Alias creation. Alias creation is done by 'magic' interface naming: eg. to create a @@ -38,16 +38,3 @@ o Relationship with main device If the base device is shut down the added aliases will be deleted too. - - -Contact -------- -Please finger or e-mail me: - Juan Jose Ciarlante <jjciarla@raiz.uncu.edu.ar> - -Updated by Erik Schoenfelder <schoenfr@gaertner.DE> - -; local variables: -; mode: indented-text -; mode: auto-fill -; end: diff --git a/MAINTAINERS b/MAINTAINERS index d992d407197..474ec0c5327 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2836,8 +2836,6 @@ S: Maintained MAC80211 P: Johannes Berg M: johannes@sipsolutions.net -P: Michael Wu -M: flamingice@sourmilk.net L: linux-wireless@vger.kernel.org W: http://linuxwireless.org/ T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 52c80c2a57f..600eef3f3ac 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -351,7 +351,7 @@ config SGI_IP27 select ARC64 select BOOT_ELF64 select DEFAULT_SGI_PARTITION - select DMA_IP27 + select DMA_COHERENT select SYS_HAS_EARLY_PRINTK select HW_HAS_PCI select NR_CPUS_DEFAULT_64 @@ -761,9 +761,6 @@ config CFE config DMA_COHERENT bool -config DMA_IP27 - bool - config DMA_NONCOHERENT bool select DMA_NEED_PCI_MAP_STATE @@ -1368,7 +1365,7 @@ config CPU_SUPPORTS_64BIT_KERNEL # config HARDWARE_WATCHPOINTS bool - default y if CPU_MIPS32 || CPU_MIPS64 + default y if CPU_MIPSR1 || CPU_MIPSR2 menu "Kernel type" diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c index 32880146cbc..6fd441d16af 100644 --- a/arch/mips/alchemy/common/time.c +++ b/arch/mips/alchemy/common/time.c @@ -89,7 +89,7 @@ static struct clock_event_device au1x_rtcmatch2_clockdev = { .irq = AU1000_RTC_MATCH2_INT, .set_next_event = au1x_rtcmatch2_set_next_event, .set_mode = au1x_rtcmatch2_set_mode, - .cpumask = CPU_MASK_ALL, + .cpumask = CPU_MASK_ALL_PTR, }; static struct irqaction au1x_rtcmatch2_irqaction = { diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index e085feddb4a..5f4e49ba471 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -15,13 +15,11 @@ #include <linux/serial.h> #include <linux/types.h> #include <linux/string.h> /* for memset */ -#include <linux/serial.h> #include <linux/tty.h> #include <linux/time.h> #include <linux/platform_device.h> #include <linux/serial_core.h> #include <linux/serial_8250.h> -#include <linux/string.h> #include <asm/processor.h> #include <asm/reboot.h> diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index 34ea319be94..f2baea3039b 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig @@ -53,7 +53,7 @@ CONFIG_GENERIC_TIME=y CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_ARC=y -CONFIG_DMA_IP27=y +CONFIG_DMA_COHERENT=y CONFIG_EARLY_PRINTK=y CONFIG_SYS_HAS_EARLY_PRINTK=y # CONFIG_NO_IOPORT is not set diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index c996c3b4d07..1b332e15ab5 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -50,7 +50,7 @@ static __inline__ void atomic_add(int i, atomic_t * v) { if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -62,7 +62,7 @@ static __inline__ void atomic_add(int i, atomic_t * v) : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else if (cpu_has_llsc) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -95,7 +95,7 @@ static __inline__ void atomic_add(int i, atomic_t * v) static __inline__ void atomic_sub(int i, atomic_t * v) { if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -107,7 +107,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v) : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else if (cpu_has_llsc) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -135,12 +135,12 @@ static __inline__ void atomic_sub(int i, atomic_t * v) */ static __inline__ int atomic_add_return(int i, atomic_t * v) { - unsigned long result; + int result; smp_llsc_mb(); if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -154,7 +154,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) : "Ir" (i), "m" (v->counter) : "memory"); } else if (cpu_has_llsc) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -187,12 +187,12 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) static __inline__ int atomic_sub_return(int i, atomic_t * v) { - unsigned long result; + int result; smp_llsc_mb(); if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -206,7 +206,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) : "Ir" (i), "m" (v->counter) : "memory"); } else if (cpu_has_llsc) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -247,12 +247,12 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) */ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) { - unsigned long result; + int result; smp_llsc_mb(); if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -270,7 +270,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) : "Ir" (i), "m" (v->counter) : "memory"); } else if (cpu_has_llsc) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -429,7 +429,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) static __inline__ void atomic64_add(long i, atomic64_t * v) { if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -441,7 +441,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else if (cpu_has_llsc) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -474,7 +474,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) static __inline__ void atomic64_sub(long i, atomic64_t * v) { if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -486,7 +486,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else if (cpu_has_llsc) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -514,12 +514,12 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) */ static __inline__ long atomic64_add_return(long i, atomic64_t * v) { - unsigned long result; + long result; smp_llsc_mb(); if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -533,7 +533,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) : "Ir" (i), "m" (v->counter) : "memory"); } else if (cpu_has_llsc) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -566,12 +566,12 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) static __inline__ long atomic64_sub_return(long i, atomic64_t * v) { - unsigned long result; + long result; smp_llsc_mb(); if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -585,7 +585,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) : "Ir" (i), "m" (v->counter) : "memory"); } else if (cpu_has_llsc) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -626,12 +626,12 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) */ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) { - unsigned long result; + long result; smp_llsc_mb(); if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -649,7 +649,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) : "Ir" (i), "m" (v->counter) : "memory"); } else if (cpu_has_llsc) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" diff --git a/arch/mips/include/asm/mach-rc32434/gpio.h b/arch/mips/include/asm/mach-rc32434/gpio.h index b5cf6457305..3cb50d17b62 100644 --- a/arch/mips/include/asm/mach-rc32434/gpio.h +++ b/arch/mips/include/asm/mach-rc32434/gpio.h @@ -80,11 +80,8 @@ struct rb532_gpio_reg { /* Compact Flash GPIO pin */ #define CF_GPIO_NUM 13 -extern void set_434_reg(unsigned reg_offs, unsigned bit, unsigned len, unsigned val); -extern unsigned get_434_reg(unsigned reg_offs); -extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask); -extern unsigned char get_latch_u5(void); extern void rb532_gpio_set_ilevel(int bit, unsigned gpio); extern void rb532_gpio_set_istat(int bit, unsigned gpio); +extern void rb532_gpio_set_func(unsigned gpio); #endif /* _RC32434_GPIO_H_ */ diff --git a/arch/mips/include/asm/mach-rc32434/irq.h b/arch/mips/include/asm/mach-rc32434/irq.h index 56738d8ec4e..023a5b100ed 100644 --- a/arch/mips/include/asm/mach-rc32434/irq.h +++ b/arch/mips/include/asm/mach-rc32434/irq.h @@ -30,4 +30,7 @@ #define ETH0_RX_OVR_IRQ (GROUP3_IRQ_BASE + 9) #define ETH0_TX_UND_IRQ (GROUP3_IRQ_BASE + 10) +#define GPIO_MAPPED_IRQ_BASE GROUP4_IRQ_BASE +#define GPIO_MAPPED_IRQ_GROUP 4 + #endif /* __ASM_RC32434_IRQ_H */ diff --git a/arch/mips/include/asm/mach-rc32434/rb.h b/arch/mips/include/asm/mach-rc32434/rb.h index f25a8491670..6dc5f8df1f3 100644 --- a/arch/mips/include/asm/mach-rc32434/rb.h +++ b/arch/mips/include/asm/mach-rc32434/rb.h @@ -83,4 +83,7 @@ struct mpmc_device { void __iomem *base; }; +extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask); +extern unsigned char get_latch_u5(void); + #endif /* __ASM_RC32434_RB_H */ diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index 1f30d16d466..ce47118e52b 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h @@ -105,7 +105,7 @@ struct pt_watch_regs { enum pt_watch_style style; union { struct mips32_watch_regs mips32; - struct mips32_watch_regs mips64; + struct mips64_watch_regs mips64; }; }; diff --git a/arch/mips/include/asm/termios.h b/arch/mips/include/asm/termios.h index a275661fa7e..8f77f774a2a 100644 --- a/arch/mips/include/asm/termios.h +++ b/arch/mips/include/asm/termios.h @@ -9,6 +9,7 @@ #ifndef _ASM_TERMIOS_H #define _ASM_TERMIOS_H +#include <linux/errno.h> #include <asm/termbits.h> #include <asm/ioctls.h> @@ -94,38 +95,81 @@ struct termio { /* * Translate a "termio" structure into a "termios". Ugh. */ -#define user_termio_to_kernel_termios(termios, termio) \ -({ \ - unsigned short tmp; \ - get_user(tmp, &(termio)->c_iflag); \ - (termios)->c_iflag = (0xffff0000 & ((termios)->c_iflag)) | tmp; \ - get_user(tmp, &(termio)->c_oflag); \ - (termios)->c_oflag = (0xffff0000 & ((termios)->c_oflag)) | tmp; \ - get_user(tmp, &(termio)->c_cflag); \ - (termios)->c_cflag = (0xffff0000 & ((termios)->c_cflag)) | tmp; \ - get_user(tmp, &(termio)->c_lflag); \ - (termios)->c_lflag = (0xffff0000 & ((termios)->c_lflag)) | tmp; \ - get_user((termios)->c_line, &(termio)->c_line); \ - copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ -}) +static inline int user_termio_to_kernel_termios(struct ktermios *termios, + struct termio __user *termio) +{ + unsigned short iflag, oflag, cflag, lflag; + unsigned int err; + + if (!access_ok(VERIFY_READ, termio, sizeof(struct termio))) + return -EFAULT; + + err = __get_user(iflag, &termio->c_iflag); + termios->c_iflag = (termios->c_iflag & 0xffff0000) | iflag; + err |=__get_user(oflag, &termio->c_oflag); + termios->c_oflag = (termios->c_oflag & 0xffff0000) | oflag; + err |=__get_user(cflag, &termio->c_cflag); + termios->c_cflag = (termios->c_cflag & 0xffff0000) | cflag; + err |=__get_user(lflag, &termio->c_lflag); + termios->c_lflag = (termios->c_lflag & 0xffff0000) | lflag; + err |=__get_user(termios->c_line, &termio->c_line); + if (err) + return -EFAULT; + + if (__copy_from_user(termios->c_cc, termio->c_cc, NCC)) + return -EFAULT; + + return 0; +} /* * Translate a "termios" structure into a "termio". Ugh. */ -#define kernel_termios_to_user_termio(termio, termios) \ -({ \ - put_user((termios)->c_iflag, &(termio)->c_iflag); \ - put_user((termios)->c_oflag, &(termio)->c_oflag); \ - put_user((termios)->c_cflag, &(termio)->c_cflag); \ - put_user((termios)->c_lflag, &(termio)->c_lflag); \ - put_user((termios)->c_line, &(termio)->c_line); \ - copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ -}) - -#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) -#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) -#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) -#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) +static inline int kernel_termios_to_user_termio(struct termio __user *termio, + struct ktermios *termios) +{ + int err; + + if (!access_ok(VERIFY_WRITE, termio, sizeof(struct termio))) + return -EFAULT; + + err = __put_user(termios->c_iflag, &termio->c_iflag); + err |= __put_user(termios->c_oflag, &termio->c_oflag); + err |= __put_user(termios->c_cflag, &termio->c_cflag); + err |= __put_user(termios->c_lflag, &termio->c_lflag); + err |= __put_user(termios->c_line, &termio->c_line); + if (err) + return -EFAULT; + + if (__copy_to_user(termio->c_cc, termios->c_cc, NCC)) + return -EFAULT; + + return 0; +} + +static inline int user_termios_to_kernel_termios(struct ktermios __user *k, + struct termios2 *u) +{ + return copy_from_user(k, u, sizeof(struct termios2)) ? -EFAULT : 0; +} + +static inline int kernel_termios_to_user_termios(struct termios2 __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios2)) ? -EFAULT : 0; +} + +static inline int user_termios_to_kernel_termios_1(struct ktermios *k, + struct termios __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios)) ? -EFAULT : 0; +} + +static inline int kernel_termios_to_user_termios_1(struct termios __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios)) ? -EFAULT : 0; +} #endif /* defined(__KERNEL__) */ diff --git a/arch/mips/include/asm/txx9/tx4939.h b/arch/mips/include/asm/txx9/tx4939.h index 88badb42301..964ef7ede26 100644 --- a/arch/mips/include/asm/txx9/tx4939.h +++ b/arch/mips/include/asm/txx9/tx4939.h @@ -541,5 +541,6 @@ void tx4939_irq_init(void); int tx4939_irq(void); void tx4939_mtd_init(int ch); void tx4939_ata_init(void); +void tx4939_rtc_init(void); #endif /* __ASM_TXX9_TX4939_H */ diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index fb6f73148df..8882e5766f2 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -458,7 +458,11 @@ NESTED(nmi_handler, PT_SIZE, sp) BUILD_HANDLER fpe fpe fpe silent /* #15 */ BUILD_HANDLER mdmx mdmx sti silent /* #22 */ #ifdef CONFIG_HARDWARE_WATCHPOINTS - BUILD_HANDLER watch watch sti silent /* #23 */ + /* + * For watch, interrupts will be enabled after the watch + * registers are read. + */ + BUILD_HANDLER watch watch cli silent /* #23 */ #else BUILD_HANDLER watch watch sti verbose /* #23 */ #endif diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 5e77a3a21f9..42461310b18 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -79,7 +79,8 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, euid = current_euid(); retval = -EPERM; - if (euid != p->euid && euid != p->uid && !capable(CAP_SYS_NICE)) { + if (euid != p->cred->euid && euid != p->cred->uid && + !capable(CAP_SYS_NICE)) { read_unlock(&tasklist_lock); goto out_unlock; } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index f6083c6bfaa..b2d7041341b 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -944,6 +944,9 @@ asmlinkage void do_mdmx(struct pt_regs *regs) force_sig(SIGILL, current); } +/* + * Called with interrupts disabled. + */ asmlinkage void do_watch(struct pt_regs *regs) { u32 cause; @@ -963,9 +966,12 @@ asmlinkage void do_watch(struct pt_regs *regs) */ if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { mips_read_watch_registers(); + local_irq_enable(); force_sig(SIGTRAP, current); - } else + } else { mips_clear_watch_registers(); + local_irq_enable(); + } } asmlinkage void do_mcheck(struct pt_regs *regs) @@ -1582,7 +1588,11 @@ void __init set_handler(unsigned long offset, void *addr, unsigned long size) static char panic_null_cerr[] __cpuinitdata = "Trying to set NULL cache error exception handler"; -/* Install uncached CPU exception handler */ +/* + * Install uncached CPU exception handler. + * This is suitable only for the cache error exception which is the only + * exception handler that is being run uncached. + */ void __cpuinit set_uncached_handler(unsigned long offset, void *addr, unsigned long size) { @@ -1593,7 +1603,7 @@ void __cpuinit set_uncached_handler(unsigned long offset, void *addr, unsigned long uncached_ebase = TO_UNCAC(ebase); #endif if (cpu_has_mips_r2) - ebase += (read_c0_ebase() & 0x3ffff000); + uncached_ebase += (read_c0_ebase() & 0x3ffff000); if (!addr) panic(panic_null_cerr); diff --git a/arch/mips/lib/memcpy-inatomic.S b/arch/mips/lib/memcpy-inatomic.S index 736d0fb56a9..68853a038d3 100644 --- a/arch/mips/lib/memcpy-inatomic.S +++ b/arch/mips/lib/memcpy-inatomic.S @@ -21,7 +21,7 @@ * end of memory on some systems. It's also a seriously bad idea on non * dma-coherent systems. */ -#if !defined(CONFIG_DMA_COHERENT) || !defined(CONFIG_DMA_IP27) +#ifdef CONFIG_DMA_NONCOHERENT #undef CONFIG_CPU_HAS_PREFETCH #endif #ifdef CONFIG_MIPS_MALTA diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index c06cccf60be..56a1f85a1ce 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -21,7 +21,7 @@ * end of memory on some systems. It's also a seriously bad idea on non * dma-coherent systems. */ -#if !defined(CONFIG_DMA_COHERENT) || !defined(CONFIG_DMA_IP27) +#ifdef CONFIG_DMA_NONCOHERENT #undef CONFIG_CPU_HAS_PREFETCH #endif #ifdef CONFIG_MIPS_MALTA diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 6e99665ae86..c43f4b26a69 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -618,15 +618,35 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); - else + else { + unsigned long lsize = cpu_scache_line_size(); + unsigned long almask = ~(lsize - 1); + + /* + * There is no clearly documented alignment requirement + * for the cache instruction on MIPS processors and + * some processors, among them the RM5200 and RM7000 + * QED processors will throw an address error for cache + * hit ops with insufficient alignment. Solved by + * aligning the address to cache line size. + */ + cache_op(Hit_Writeback_Inv_SD, addr & almask); + cache_op(Hit_Writeback_Inv_SD, + (addr + size - 1) & almask); blast_inv_scache_range(addr, addr + size); + } return; } if (cpu_has_safe_index_cacheops && size >= dcache_size) { r4k_blast_dcache(); } else { + unsigned long lsize = cpu_dcache_line_size(); + unsigned long almask = ~(lsize - 1); + R4600_HIT_CACHEOP_WAR_IMPL; + cache_op(Hit_Writeback_Inv_D, addr & almask); + cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask); blast_inv_dcache_range(addr, addr + size); } diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index fa636fc6b7b..55767ad9f00 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -97,7 +97,6 @@ good_area: goto bad_area; } -survive: /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo @@ -167,21 +166,13 @@ no_context: field, regs->regs[31]); die("Oops", regs); -/* - * We ran out of memory, or some other thing happened to us that made - * us unable to handle the page fault gracefully. - */ out_of_memory: - up_read(&mm->mmap_sem); - if (is_global_init(tsk)) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk("VM: killing process %s\n", tsk->comm); - if (user_mode(regs)) - do_group_exit(SIGKILL); - goto no_context; + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). + */ + pagefault_out_of_memory(); + return; do_sigbus: up_read(&mm->mmap_sem); diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c index 1c2821e2f49..71f7d27b0d4 100644 --- a/arch/mips/pci/pci-rc32434.c +++ b/arch/mips/pci/pci-rc32434.c @@ -205,6 +205,8 @@ static int __init rc32434_pcibridge_init(void) static int __init rc32434_pci_init(void) { + void __iomem *io_map_base; + pr_info("PCI: Initializing PCI\n"); ioport_resource.start = rc32434_res_pci_io1.start; @@ -212,6 +214,15 @@ static int __init rc32434_pci_init(void) rc32434_pcibridge_init(); + io_map_base = ioremap(rc32434_res_pci_io1.start, + rc32434_res_pci_io1.end - rc32434_res_pci_io1.start + 1); + + if (!io_map_base) + return -ENOMEM; + + rc32434_controller.io_map_base = + (unsigned long)io_map_base - rc32434_res_pci_io1.start; + register_pci_controller(&rc32434_controller); rc32434_sync(); diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c index c1c29181bd4..4a5f05b662a 100644 --- a/arch/mips/rb532/devices.c +++ b/arch/mips/rb532/devices.c @@ -24,6 +24,7 @@ #include <linux/mtd/partitions.h> #include <linux/gpio_keys.h> #include <linux/input.h> +#include <linux/serial_8250.h> #include <asm/bootinfo.h> @@ -39,6 +40,29 @@ #define ETH0_RX_DMA_ADDR (DMA0_BASE_ADDR + 0 * DMA_CHAN_OFFSET) #define ETH0_TX_DMA_ADDR (DMA0_BASE_ADDR + 1 * DMA_CHAN_OFFSET) +extern unsigned int idt_cpu_freq; + +static struct mpmc_device dev3; + +void set_latch_u5(unsigned char or_mask, unsigned char nand_mask) +{ + unsigned long flags; + + spin_lock_irqsave(&dev3.lock, flags); + + dev3.state = (dev3.state | or_mask) & ~nand_mask; + writeb(dev3.state, dev3.base); + + spin_unlock_irqrestore(&dev3.lock, flags); +} +EXPORT_SYMBOL(set_latch_u5); + +unsigned char get_latch_u5(void) +{ + return dev3.state; +} +EXPORT_SYMBOL(get_latch_u5); + static struct resource korina_dev0_res[] = { { .name = "korina_regs", @@ -86,7 +110,7 @@ static struct korina_device korina_dev0_data = { static struct platform_device korina_dev0 = { .id = -1, .name = "korina", - .dev.platform_data = &korina_dev0_data, + .dev.driver_data = &korina_dev0_data, .resource = korina_dev0_res, .num_resources = ARRAY_SIZE(korina_dev0_res), }; @@ -214,12 +238,32 @@ static struct platform_device rb532_wdt = { .num_resources = ARRAY_SIZE(rb532_wdt_res), }; +static struct plat_serial8250_port rb532_uart_res[] = { + { + .membase = (char *)KSEG1ADDR(REGBASE + UART0BASE), + .irq = UART0_IRQ, + .regshift = 2, + .iotype = UPIO_MEM, + .flags = UPF_BOOT_AUTOCONF, + }, + { + .flags = 0, + } +}; + +static struct platform_device rb532_uart = { + .name = "serial8250", + .id = PLAT8250_DEV_PLATFORM, + .dev.platform_data = &rb532_uart_res, +}; + static struct platform_device *rb532_devs[] = { &korina_dev0, &nand_slot0, &cf_slot0, &rb532_led, &rb532_button, + &rb532_uart, &rb532_wdt }; @@ -291,9 +335,20 @@ static int __init plat_setup_devices(void) nand_slot0_res[0].start = readl(IDT434_REG_BASE + DEV2BASE); nand_slot0_res[0].end = nand_slot0_res[0].start + 0x1000; + /* Read and map device controller 3 */ + dev3.base = ioremap_nocache(readl(IDT434_REG_BASE + DEV3BASE), 1); + + if (!dev3.base) { + printk(KERN_ERR "rb532: cannot remap device controller 3\n"); + return -ENXIO; + } + /* Initialise the NAND device */ rb532_nand_setup(); + /* set the uart clock to the current cpu frequency */ + rb532_uart_res[0].uartclk = idt_cpu_freq; + return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs)); } diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c index 0e84c8ab6a3..37de05d595e 100644 --- a/arch/mips/rb532/gpio.c +++ b/arch/mips/rb532/gpio.c @@ -41,8 +41,6 @@ struct rb532_gpio_chip { void __iomem *regbase; }; -struct mpmc_device dev3; - static struct resource rb532_gpio_reg0_res[] = { { .name = "gpio_reg0", @@ -52,61 +50,6 @@ static struct resource rb532_gpio_reg0_res[] = { } }; -static struct resource rb532_dev3_ctl_res[] = { - { - .name = "dev3_ctl", - .start = REGBASE + DEV3BASE, - .end = REGBASE + DEV3BASE + sizeof(struct dev_reg) - 1, - .flags = IORESOURCE_MEM, - } -}; - -void set_434_reg(unsigned reg_offs, unsigned bit, unsigned len, unsigned val) -{ - unsigned long flags; - unsigned data; - unsigned i = 0; - - spin_lock_irqsave(&dev3.lock, flags); - - data = readl(IDT434_REG_BASE + reg_offs); - for (i = 0; i != len; ++i) { - if (val & (1 << i)) - data |= (1 << (i + bit)); - else - data &= ~(1 << (i + bit)); - } - writel(data, (IDT434_REG_BASE + reg_offs)); - - spin_unlock_irqrestore(&dev3.lock, flags); -} -EXPORT_SYMBOL(set_434_reg); - -unsigned get_434_reg(unsigned reg_offs) -{ - return readl(IDT434_REG_BASE + reg_offs); -} -EXPORT_SYMBOL(get_434_reg); - -void set_latch_u5(unsigned char or_mask, unsigned char nand_mask) -{ - unsigned long flags; - - spin_lock_irqsave(&dev3.lock, flags); - - dev3.state = (dev3.state | or_mask) & ~nand_mask; - writel(dev3.state, &dev3.base); - - spin_unlock_irqrestore(&dev3.lock, flags); -} -EXPORT_SYMBOL(set_latch_u5); - -unsigned char get_latch_u5(void) -{ - return dev3.state; -} -EXPORT_SYMBOL(get_latch_u5); - /* rb532_set_bit - sanely set a bit * * bitval: new value for the bit @@ -119,13 +62,11 @@ static inline void rb532_set_bit(unsigned bitval, unsigned long flags; u32 val; - bitval = !!bitval; /* map parameter to {0,1} */ - local_irq_save(flags); val = readl(ioaddr); - val &= ~( ~bitval << offset ); /* unset bit if bitval == 0 */ - val |= ( bitval << offset ); /* set bit if bitval == 1 */ + val &= ~(!bitval << offset); /* unset bit if bitval == 0 */ + val |= (!!bitval << offset); /* set bit if bitval == 1 */ writel(val, ioaddr); local_irq_restore(flags); @@ -171,8 +112,8 @@ static int rb532_gpio_direction_input(struct gpio_chip *chip, unsigned offset) gpch = container_of(chip, struct rb532_gpio_chip, chip); - if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC)) - return 1; /* alternate function, GPIOCFG is ignored */ + /* disable alternate function in case it's set */ + rb532_set_bit(0, offset, gpch->regbase + GPIOFUNC); rb532_set_bit(0, offset, gpch->regbase + GPIOCFG); return 0; @@ -188,8 +129,8 @@ static int rb532_gpio_direction_output(struct gpio_chip *chip, gpch = container_of(chip, struct rb532_gpio_chip, chip); - if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC)) - return 1; /* alternate function, GPIOCFG is ignored */ + /* disable alternate function in case it's set */ + rb532_set_bit(0, offset, gpch->regbase + GPIOFUNC); /* set the initial output value */ rb532_set_bit(value, offset, gpch->regbase + GPIOD); @@ -233,10 +174,11 @@ EXPORT_SYMBOL(rb532_gpio_set_istat); /* * Configure GPIO alternate function */ -static void rb532_gpio_set_func(int bit, unsigned gpio) +void rb532_gpio_set_func(unsigned gpio) { - rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOFUNC); + rb532_set_bit(1, gpio, rb532_gpio_chip->regbase + GPIOFUNC); } +EXPORT_SYMBOL(rb532_gpio_set_func); int __init rb532_gpio_init(void) { @@ -253,20 +195,6 @@ int __init rb532_gpio_init(void) /* Register our GPIO chip */ gpiochip_add(&rb532_gpio_chip->chip); - r = rb532_dev3_ctl_res; - dev3.base = ioremap_nocache(r->start, r->end - r->start); - - if (!dev3.base) { - printk(KERN_ERR "rb532: cannot remap device controller 3\n"); - return -ENXIO; - } - - /* configure CF_GPIO_NUM as CFRDY IRQ source */ - rb532_gpio_set_func(0, CF_GPIO_NUM); - rb532_gpio_direction_input(&rb532_gpio_chip->chip, CF_GPIO_NUM); - rb532_gpio_set_ilevel(1, CF_GPIO_NUM); - rb532_gpio_set_istat(0, CF_GPIO_NUM); - return 0; } arch_initcall(rb532_gpio_init); diff --git a/arch/mips/rb532/irq.c b/arch/mips/rb532/irq.c index 549b46d2fce..53eeb5e7bc5 100644 --- a/arch/mips/rb532/irq.c +++ b/arch/mips/rb532/irq.c @@ -46,6 +46,7 @@ #include <asm/system.h> #include <asm/mach-rc32434/irq.h> +#include <asm/mach-rc32434/gpio.h> struct intr_group { u32 mask; /* mask of valid bits in pending/mask registers */ @@ -150,6 +151,9 @@ static void rb532_disable_irq(unsigned int irq_nr) mask |= intr_bit; WRITE_MASK(addr, mask); + if (group == GPIO_MAPPED_IRQ_GROUP) + rb532_gpio_set_istat(0, irq_nr - GPIO_MAPPED_IRQ_BASE); + /* * if there are no more interrupts enabled in this * group, disable corresponding IP @@ -165,12 +169,35 @@ static void rb532_mask_and_ack_irq(unsigned int irq_nr) ack_local_irq(group_to_ip(irq_to_group(irq_nr))); } +static int rb532_set_type(unsigned int irq_nr, unsigned type) +{ + int gpio = irq_nr - GPIO_MAPPED_IRQ_BASE; + int group = irq_to_group(irq_nr); + + if (group != GPIO_MAPPED_IRQ_GROUP) + return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL; + + switch (type) { + case IRQ_TYPE_LEVEL_HIGH: + rb532_gpio_set_ilevel(1, gpio); + break; + case IRQ_TYPE_LEVEL_LOW: + rb532_gpio_set_ilevel(0, gpio); + break; + default: + return -EINVAL; + } + + return 0; +} + static struct irq_chip rc32434_irq_type = { .name = "RB532", .ack = rb532_disable_irq, .mask = rb532_disable_irq, .mask_ack = rb532_mask_and_ack_irq, .unmask = rb532_enable_irq, + .set_type = rb532_set_type, }; void __init arch_init_irq(void) diff --git a/arch/mips/rb532/serial.c b/arch/mips/rb532/serial.c index 3e0d7ec3a57..00ed19f0bdb 100644 --- a/arch/mips/rb532/serial.c +++ b/arch/mips/rb532/serial.c @@ -36,7 +36,7 @@ extern unsigned int idt_cpu_freq; static struct uart_port rb532_uart = { - .type = PORT_16550A, + .flags = UPF_BOOT_AUTOCONF, .line = 0, .irq = UART0_IRQ, .iotype = UPIO_MEM, diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c index 6c0049a5bbc..55440967b3a 100644 --- a/arch/mips/txx9/generic/setup_tx4939.c +++ b/arch/mips/txx9/generic/setup_tx4939.c @@ -435,6 +435,28 @@ void __init tx4939_ata_init(void) platform_device_register(&ata1_dev); } +void __init tx4939_rtc_init(void) +{ + static struct resource res[] = { + { + .start = TX4939_RTC_REG & 0xfffffffffULL, + .end = (TX4939_RTC_REG & 0xfffffffffULL) + 0x100 - 1, + .flags = IORESOURCE_MEM, + }, { + .start = TXX9_IRQ_BASE + TX4939_IR_RTC, + .flags = IORESOURCE_IRQ, + }, + }; + static struct platform_device rtc_dev = { + .name = "tx4939rtc", + .id = -1, + .num_resources = ARRAY_SIZE(res), + .resource = res, + }; + + platform_device_register(&rtc_dev); +} + static void __init tx4939_stop_unused_modules(void) { __u64 pcfg, rst = 0, ckd = 0; diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c index 98fbd9391bf..656603b85b7 100644 --- a/arch/mips/txx9/rbtx4939/setup.c +++ b/arch/mips/txx9/rbtx4939/setup.c @@ -336,6 +336,7 @@ static void __init rbtx4939_device_init(void) rbtx4939_led_setup(); tx4939_wdt_init(); tx4939_ata_init(); + tx4939_rtc_init(); } static void __init rbtx4939_setup(void) diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c index 75115849af3..4a58c8ce3f6 100644 --- a/arch/x86/boot/video-vesa.c +++ b/arch/x86/boot/video-vesa.c @@ -269,9 +269,8 @@ void vesa_store_edid(void) we genuinely have to assume all registers are destroyed here. */ asm("pushw %%es; movw %2,%%es; "INT10"; popw %%es" - : "+a" (ax), "+b" (bx) - : "c" (cx), "D" (di) - : "esi"); + : "+a" (ax), "+b" (bx), "+c" (cx), "+D" (di) + : : "esi", "edx"); if (ax != 0x004f) return; /* No EDID */ @@ -285,9 +284,9 @@ void vesa_store_edid(void) dx = 0; /* EDID block number */ di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */ asm(INT10 - : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info) - : "c" (cx), "D" (di) - : "esi"); + : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info), + "+c" (cx), "+D" (di) + : : "esi"); #endif /* CONFIG_FIRMWARE_EDID */ } diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 549f2ada55f..430e5c38a54 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -30,7 +30,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { /* Unmask CPUID levels if masked: */ - if (c->x86 == 6 && c->x86_model >= 15) { + if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { u64 misc_enable; rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index d259e5d2e05..236a401b825 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -1594,8 +1594,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) /* kvm/qemu doesn't have mtrr set right, don't trim them all */ if (!highest_pfn) { - WARN(!kvm_para_available(), KERN_WARNING - "WARNING: strange, CPU MTRRs all blank?\n"); + printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); return 0; } diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index da91701a234..169a120587b 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c @@ -15,8 +15,8 @@ * - buffer allocation (memory accounting) * * - * Copyright (C) 2007-2008 Intel Corporation. - * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008 + * Copyright (C) 2007-2009 Intel Corporation. + * Markus Metzger <markus.t.metzger@intel.com>, 2007-2009 */ @@ -890,7 +890,7 @@ int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value) } static const struct ds_configuration ds_cfg_netburst = { - .name = "netburst", + .name = "Netburst", .ctl[dsf_bts] = (1 << 2) | (1 << 3), .ctl[dsf_bts_kernel] = (1 << 5), .ctl[dsf_bts_user] = (1 << 6), @@ -904,7 +904,7 @@ static const struct ds_configuration ds_cfg_netburst = { #endif }; static const struct ds_configuration ds_cfg_pentium_m = { - .name = "pentium m", + .name = "Pentium M", .ctl[dsf_bts] = (1 << 6) | (1 << 7), .sizeof_field = sizeof(long), @@ -915,8 +915,8 @@ static const struct ds_configuration ds_cfg_pentium_m = { .sizeof_rec[ds_pebs] = sizeof(long) * 18, #endif }; -static const struct ds_configuration ds_cfg_core2 = { - .name = "core 2", +static const struct ds_configuration ds_cfg_core2_atom = { + .name = "Core 2/Atom", .ctl[dsf_bts] = (1 << 6) | (1 << 7), .ctl[dsf_bts_kernel] = (1 << 9), .ctl[dsf_bts_user] = (1 << 10), @@ -949,19 +949,22 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c) switch (c->x86) { case 0x6: switch (c->x86_model) { - case 0 ... 0xC: - /* sorry, don't know about them */ - break; - case 0xD: - case 0xE: /* Pentium M */ + case 0x9: + case 0xd: /* Pentium M */ ds_configure(&ds_cfg_pentium_m); break; - default: /* Core2, Atom, ... */ - ds_configure(&ds_cfg_core2); + case 0xf: + case 0x17: /* Core2 */ + case 0x1c: /* Atom */ + ds_configure(&ds_cfg_core2_atom); + break; + case 0x1a: /* i7 */ + default: + /* sorry, don't know about them */ break; } break; - case 0xF: + case 0xf: switch (c->x86_model) { case 0x0: case 0x1: diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 8eba4e43bb0..f7dae57e6ca 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -302,7 +302,7 @@ static void bio_end_empty_barrier(struct bio *bio, int err) * Description: * Issue a flush for the block device in question. Caller can supply * room for storing the error offset in case of a flush error, if they - * wish to. Caller must run wait_for_completion() on its own. + * wish to. */ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) { diff --git a/block/blk-core.c b/block/blk-core.c index a824e49c0d0..ca69f3d9410 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -64,11 +64,12 @@ static struct workqueue_struct *kblockd_workqueue; static void drive_stat_acct(struct request *rq, int new_io) { + struct gendisk *disk = rq->rq_disk; struct hd_struct *part; int rw = rq_data_dir(rq); int cpu; - if (!blk_fs_request(rq) || !rq->rq_disk) + if (!blk_fs_request(rq) || !disk || !blk_queue_io_stat(disk->queue)) return; cpu = part_stat_lock(); @@ -599,8 +600,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) q->request_fn = rfn; q->prep_rq_fn = NULL; q->unplug_fn = generic_unplug_device; - q->queue_flags = (1 << QUEUE_FLAG_CLUSTER | - 1 << QUEUE_FLAG_STACKABLE); + q->queue_flags = QUEUE_FLAG_DEFAULT; q->queue_lock = lock; blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); @@ -1125,6 +1125,8 @@ void init_request_from_bio(struct request *req, struct bio *bio) if (bio_sync(bio)) req->cmd_flags |= REQ_RW_SYNC; + if (bio_unplug(bio)) + req->cmd_flags |= REQ_UNPLUG; if (bio_rw_meta(bio)) req->cmd_flags |= REQ_RW_META; @@ -1141,6 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) int el_ret, nr_sectors; const unsigned short prio = bio_prio(bio); const int sync = bio_sync(bio); + const int unplug = bio_unplug(bio); int rw_flags; nr_sectors = bio_sectors(bio); @@ -1244,7 +1247,7 @@ get_rq: blk_plug_device(q); add_request(q, req); out: - if (sync || blk_queue_nonrot(q)) + if (unplug || blk_queue_nonrot(q)) __generic_unplug_device(q); spin_unlock_irq(q->queue_lock); return 0; @@ -1448,6 +1451,11 @@ static inline void __generic_make_request(struct bio *bio) err = -EOPNOTSUPP; goto end_io; } + if (bio_barrier(bio) && bio_has_data(bio) && + (q->next_ordered == QUEUE_ORDERED_NONE)) { + err = -EOPNOTSUPP; + goto end_io; + } ret = q->make_request_fn(q, bio); } while (ret); @@ -1655,6 +1663,55 @@ void blkdev_dequeue_request(struct request *req) } EXPORT_SYMBOL(blkdev_dequeue_request); +static void blk_account_io_completion(struct request *req, unsigned int bytes) +{ + struct gendisk *disk = req->rq_disk; + + if (!disk || !blk_queue_io_stat(disk->queue)) + return; + + if (blk_fs_request(req)) { + const int rw = rq_data_dir(req); + struct hd_struct *part; + int cpu; + + cpu = part_stat_lock(); + part = disk_map_sector_rcu(req->rq_disk, req->sector); + part_stat_add(cpu, part, sectors[rw], bytes >> 9); + part_stat_unlock(); + } +} + +static void blk_account_io_done(struct request *req) +{ + struct gendisk *disk = req->rq_disk; + + if (!disk || !blk_queue_io_stat(disk->queue)) + return; + + /* + * Account IO completion. bar_rq isn't accounted as a normal + * IO on queueing nor completion. Accounting the containing + * request is enough. + */ + if (blk_fs_request(req) && req != &req->q->bar_rq) { + unsigned long duration = jiffies - req->start_time; + const int rw = rq_data_dir(req); + struct hd_struct *part; + int cpu; + + cpu = part_stat_lock(); + part = disk_map_sector_rcu(disk, req->sector); + + part_stat_inc(cpu, part, ios[rw]); + part_stat_add(cpu, part, ticks[rw], duration); + part_round_stats(cpu, part); + part_dec_in_flight(part); + + part_stat_unlock(); + } +} + /** * __end_that_request_first - end I/O on a request * @req: the request being processed @@ -1690,16 +1747,7 @@ static int __end_that_request_first(struct request *req, int error, (unsigned long long)req->sector); } - if (blk_fs_request(req) && req->rq_disk) { - const int rw = rq_data_dir(req); - struct hd_struct *part; - int cpu; - - cpu = part_stat_lock(); - part = disk_map_sector_rcu(req->rq_disk, req->sector); - part_stat_add(cpu, part, sectors[rw], nr_bytes >> 9); - part_stat_unlock(); - } + blk_account_io_completion(req, nr_bytes); total_bytes = bio_nbytes = 0; while ((bio = req->bio) != NULL) { @@ -1779,8 +1827,6 @@ static int __end_that_request_first(struct request *req, int error, */ static void end_that_request_last(struct request *req, int error) { - struct gendisk *disk = req->rq_disk; - if (blk_rq_tagged(req)) blk_queue_end_tag(req->q, req); @@ -1792,27 +1838,7 @@ static void end_that_request_last(struct request *req, int error) blk_delete_timer(req); - /* - * Account IO completion. bar_rq isn't accounted as a normal - * IO on queueing nor completion. Accounting the containing - * request is enough. - */ - if (disk && blk_fs_request(req) && req != &req->q->bar_rq) { - unsigned long duration = jiffies - req->start_time; - const int rw = rq_data_dir(req); - struct hd_struct *part; - int cpu; - - cpu = part_stat_lock(); - part = disk_map_sector_rcu(disk, req->sector); - - part_stat_inc(cpu, part, ios[rw]); - part_stat_add(cpu, part, ticks[rw], duration); - part_round_stats(cpu, part); - part_dec_in_flight(part); - - part_stat_unlock(); - } + blk_account_io_done(req); if (req->end_io) req->end_io(req, error); diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 61a8e2f8fdd..91fa8e06b6a 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -309,24 +309,24 @@ static struct kobj_type integrity_ktype = { /** * blk_integrity_register - Register a gendisk as being integrity-capable * @disk: struct gendisk pointer to make integrity-aware - * @template: integrity profile + * @template: optional integrity profile to register * * Description: When a device needs to advertise itself as being able * to send/receive integrity metadata it must use this function to * register the capability with the block layer. The template is a * blk_integrity struct with values appropriate for the underlying - * hardware. See Documentation/block/data-integrity.txt. + * hardware. If template is NULL the new profile is allocated but + * not filled out. See Documentation/block/data-integrity.txt. */ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) { struct blk_integrity *bi; BUG_ON(disk == NULL); - BUG_ON(template == NULL); if (disk->integrity == NULL) { bi = kmem_cache_alloc(integrity_cachep, - GFP_KERNEL | __GFP_ZERO); + GFP_KERNEL | __GFP_ZERO); if (!bi) return -1; @@ -346,13 +346,16 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) bi = disk->integrity; /* Use the provided profile as template */ - bi->name = template->name; - bi->generate_fn = template->generate_fn; - bi->verify_fn = template->verify_fn; - bi->tuple_size = template->tuple_size; - bi->set_tag_fn = template->set_tag_fn; - bi->get_tag_fn = template->get_tag_fn; - bi->tag_size = template->tag_size; + if (template != NULL) { + bi->name = template->name; + bi->generate_fn = template->generate_fn; + bi->verify_fn = template->verify_fn; + bi->tuple_size = template->tuple_size; + bi->set_tag_fn = template->set_tag_fn; + bi->get_tag_fn = template->get_tag_fn; + bi->tag_size = template->tag_size; + } else + bi->name = "unsupported"; return 0; } diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index a29cb788e40..e29ddfc73cf 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -130,6 +130,27 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) return queue_var_show(max_hw_sectors_kb, (page)); } +static ssize_t queue_nonrot_show(struct request_queue *q, char *page) +{ + return queue_var_show(!blk_queue_nonrot(q), page); +} + +static ssize_t queue_nonrot_store(struct request_queue *q, const char *page, + size_t count) +{ + unsigned long nm; + ssize_t ret = queue_var_store(&nm, page, count); + + spin_lock_irq(q->queue_lock); + if (nm) + queue_flag_clear(QUEUE_FLAG_NONROT, q); + else + queue_flag_set(QUEUE_FLAG_NONROT, q); + spin_unlock_irq(q->queue_lock); + + return ret; +} + static ssize_t queue_nomerges_show(struct request_queue *q, char *page) { return queue_var_show(blk_queue_nomerges(q), page); @@ -146,8 +167,8 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, queue_flag_set(QUEUE_FLAG_NOMERGES, q); else queue_flag_clear(QUEUE_FLAG_NOMERGES, q); - spin_unlock_irq(q->queue_lock); + return ret; } @@ -176,6 +197,27 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) return ret; } +static ssize_t queue_iostats_show(struct request_queue *q, char *page) +{ + return queue_var_show(blk_queue_io_stat(q), page); +} + +static ssize_t queue_iostats_store(struct request_queue *q, const char *page, + size_t count) +{ + unsigned long stats; + ssize_t ret = queue_var_store(&stats, page, count); + + spin_lock_irq(q->queue_lock); + if (stats) + queue_flag_set(QUEUE_FLAG_IO_STAT, q); + else + queue_flag_clear(QUEUE_FLAG_IO_STAT, q); + spin_unlock_irq(q->queue_lock); + + return ret; +} + static struct queue_sysfs_entry queue_requests_entry = { .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, .show = queue_requests_show, @@ -210,6 +252,12 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { .show = queue_hw_sector_size_show, }; +static struct queue_sysfs_entry queue_nonrot_entry = { + .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, + .show = queue_nonrot_show, + .store = queue_nonrot_store, +}; + static struct queue_sysfs_entry queue_nomerges_entry = { .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, .show = queue_nomerges_show, @@ -222,6 +270,12 @@ static struct queue_sysfs_entry queue_rq_affinity_entry = { .store = queue_rq_affinity_store, }; +static struct queue_sysfs_entry queue_iostats_entry = { + .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, + .show = queue_iostats_show, + .store = queue_iostats_store, +}; + static struct attribute *default_attrs[] = { &queue_requests_entry.attr, &queue_ra_entry.attr, @@ -229,8 +283,10 @@ static struct attribute *default_attrs[] = { &queue_max_sectors_entry.attr, &queue_iosched_entry.attr, &queue_hw_sector_size_entry.attr, + &queue_nonrot_entry.attr, &queue_nomerges_entry.attr, &queue_rq_affinity_entry.attr, + &queue_iostats_entry.attr, NULL, }; diff --git a/block/blktrace.c b/block/blktrace.c index b0a2cae886d..39cc3bfe56e 100644 --- a/block/blktrace.c +++ b/block/blktrace.c @@ -187,59 +187,12 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, static struct dentry *blk_tree_root; static DEFINE_MUTEX(blk_tree_mutex); -static unsigned int root_users; - -static inline void blk_remove_root(void) -{ - if (blk_tree_root) { - debugfs_remove(blk_tree_root); - blk_tree_root = NULL; - } -} - -static void blk_remove_tree(struct dentry *dir) -{ - mutex_lock(&blk_tree_mutex); - debugfs_remove(dir); - if (--root_users == 0) - blk_remove_root(); - mutex_unlock(&blk_tree_mutex); -} - -static struct dentry *blk_create_tree(const char *blk_name) -{ - struct dentry *dir = NULL; - int created = 0; - - mutex_lock(&blk_tree_mutex); - - if (!blk_tree_root) { - blk_tree_root = debugfs_create_dir("block", NULL); - if (!blk_tree_root) - goto err; - created = 1; - } - - dir = debugfs_create_dir(blk_name, blk_tree_root); - if (dir) - root_users++; - else { - /* Delete root only if we created it */ - if (created) - blk_remove_root(); - } - -err: - mutex_unlock(&blk_tree_mutex); - return dir; -} static void blk_trace_cleanup(struct blk_trace *bt) { - relay_close(bt->rchan); debugfs_remove(bt->msg_file); debugfs_remove(bt->dropped_file); - blk_remove_tree(bt->dir); + relay_close(bt->rchan); free_percpu(bt->sequence); free_percpu(bt->msg_data); kfree(bt); @@ -346,7 +299,18 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, static int blk_remove_buf_file_callback(struct dentry *dentry) { + struct dentry *parent = dentry->d_parent; debugfs_remove(dentry); + + /* + * this will fail for all but the last file, but that is ok. what we + * care about is the top level buts->name directory going away, when + * the last trace file is gone. Then we don't have to rmdir() that + * manually on trace stop, so it nicely solves the issue with + * force killing of running traces. + */ + + debugfs_remove(parent); return 0; } @@ -404,7 +368,15 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, goto err; ret = -ENOENT; - dir = blk_create_tree(buts->name); + + if (!blk_tree_root) { + blk_tree_root = debugfs_create_dir("block", NULL); + if (!blk_tree_root) + return -ENOMEM; + } + + dir = debugfs_create_dir(buts->name, blk_tree_root); + if (!dir) goto err; @@ -458,8 +430,6 @@ probe_err: atomic_dec(&blk_probes_ref); mutex_unlock(&blk_probe_mutex); err: - if (dir) - blk_remove_tree(dir); if (bt) { if (bt->msg_file) debugfs_remove(bt->msg_file); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e8525fa7282..664ebfd092e 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -84,6 +84,11 @@ struct cfq_data { */ struct cfq_rb_root service_tree; unsigned int busy_queues; + /* + * Used to track any pending rt requests so we can pre-empt current + * non-RT cfqq in service when this value is non-zero. + */ + unsigned int busy_rt_queues; int rq_in_driver; int sync_flight; @@ -562,6 +567,8 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) BUG_ON(cfq_cfqq_on_rr(cfqq)); cfq_mark_cfqq_on_rr(cfqq); cfqd->busy_queues++; + if (cfq_class_rt(cfqq)) + cfqd->busy_rt_queues++; cfq_resort_rr_list(cfqd, cfqq); } @@ -581,6 +588,8 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) BUG_ON(!cfqd->busy_queues); cfqd->busy_queues--; + if (cfq_class_rt(cfqq)) + cfqd->busy_rt_queues--; } /* @@ -1005,6 +1014,20 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) goto expire; /* + * If we have a RT cfqq waiting, then we pre-empt the current non-rt + * cfqq. + */ + if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) { + /* + * We simulate this as cfqq timed out so that it gets to bank + * the remaining of its time slice. + */ + cfq_log_cfqq(cfqd, cfqq, "preempt"); + cfq_slice_expired(cfqd, 1); + goto new_queue; + } + + /* * The active queue has requests and isn't expired, allow it to * dispatch. */ @@ -1067,6 +1090,13 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, if (RB_EMPTY_ROOT(&cfqq->sort_list)) break; + /* + * If there is a non-empty RT cfqq waiting for current + * cfqq's timeslice to complete, pre-empt this cfqq + */ + if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) + break; + } while (dispatched < max_dispatch); /* @@ -1801,6 +1831,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, if (rq_is_meta(rq) && !cfqq->meta_pending) return 1; + /* + * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. + */ + if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) + return 1; + if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) return 0; @@ -1870,7 +1906,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, /* * not the active queue - expire current slice if it is * idle and has expired it's mean thinktime or this new queue - * has some old slice time left and is of higher priority + * has some old slice time left and is of higher priority or + * this new queue is RT and the current one is BE */ cfq_preempt_queue(cfqd, cfqq); cfq_mark_cfqq_must_dispatch(cfqq); diff --git a/drivers/char/selection.c b/drivers/char/selection.c index f29fbe9b8ed..cb8ca569896 100644 --- a/drivers/char/selection.c +++ b/drivers/char/selection.c @@ -268,7 +268,7 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t /* Allocate a new buffer before freeing the old one ... */ multiplier = use_unicode ? 3 : 1; /* chars can take up to 3 bytes */ - bp = kmalloc((sel_end-sel_start)/2*multiplier+1, GFP_KERNEL); + bp = kmalloc(((sel_end-sel_start)/2+1)*multiplier, GFP_KERNEL); if (!bp) { printk(KERN_WARNING "selection: kmalloc() failed\n"); clear_selection(); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 9da58145287..6915fb82d0b 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -136,7 +136,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); obj->dev = dev; - obj->filp = shmem_file_setup("drm mm object", size, 0); + obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); if (IS_ERR(obj->filp)) { kfree(obj); return NULL; diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 26474c92193..c986978ce76 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -31,7 +31,7 @@ char e1000_driver_name[] = "e1000"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; -#define DRV_VERSION "7.3.20-k3-NAPI" +#define DRV_VERSION "7.3.21-k3-NAPI" const char e1000_driver_version[] = DRV_VERSION; static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; @@ -3712,7 +3712,7 @@ static irqreturn_t e1000_intr(int irq, void *data) struct e1000_hw *hw = &adapter->hw; u32 rctl, icr = er32(ICR); - if (unlikely(!icr)) + if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags))) return IRQ_NONE; /* Not our interrupt */ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c index f3706e415b4..f49a426ad68 100644 --- a/drivers/net/gianfar_mii.c +++ b/drivers/net/gianfar_mii.c @@ -234,6 +234,8 @@ static int gfar_mdio_probe(struct of_device *ofdev, if (NULL == new_bus) return -ENOMEM; + device_init_wakeup(&ofdev->dev, 1); + new_bus->name = "Gianfar MII Bus", new_bus->read = &gfar_mdio_read, new_bus->write = &gfar_mdio_write, diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index f5e2e7235fc..13ca73f96ec 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c @@ -699,11 +699,18 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw) /* SGMII link check is done through the PCS register. */ if ((hw->phy.media_type != e1000_media_type_copper) || - (igb_sgmii_active_82575(hw))) + (igb_sgmii_active_82575(hw))) { ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, &duplex); - else + /* + * Use this flag to determine if link needs to be checked or + * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + } else { ret_val = igb_check_for_copper_link(hw); + } return ret_val; } diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 5a27825cc48..aebef8e48e7 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -300,11 +300,10 @@ struct igb_adapter { #define IGB_FLAG_HAS_MSI (1 << 0) #define IGB_FLAG_MSI_ENABLE (1 << 1) -#define IGB_FLAG_HAS_DCA (1 << 2) -#define IGB_FLAG_DCA_ENABLED (1 << 3) -#define IGB_FLAG_IN_NETPOLL (1 << 5) -#define IGB_FLAG_QUAD_PORT_A (1 << 6) -#define IGB_FLAG_NEED_CTX_IDX (1 << 7) +#define IGB_FLAG_DCA_ENABLED (1 << 2) +#define IGB_FLAG_IN_NETPOLL (1 << 3) +#define IGB_FLAG_QUAD_PORT_A (1 << 4) +#define IGB_FLAG_NEED_CTX_IDX (1 << 5) enum e1000_state_t { __IGB_TESTING, diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index b82b0fb2056..a50db5398fa 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -206,10 +206,11 @@ static int __init igb_init_module(void) global_quad_port_a = 0; - ret = pci_register_driver(&igb_driver); #ifdef CONFIG_IGB_DCA dca_register_notify(&dca_notifier); #endif + + ret = pci_register_driver(&igb_driver); return ret; } @@ -1156,11 +1157,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, /* set flags */ switch (hw->mac.type) { - case e1000_82576: case e1000_82575: - adapter->flags |= IGB_FLAG_HAS_DCA; adapter->flags |= IGB_FLAG_NEED_CTX_IDX; break; + case e1000_82576: default: break; } @@ -1310,8 +1310,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, goto err_register; #ifdef CONFIG_IGB_DCA - if ((adapter->flags & IGB_FLAG_HAS_DCA) && - (dca_add_requester(&pdev->dev) == 0)) { + if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; dev_info(&pdev->dev, "DCA enabled\n"); /* Always use CB2 mode, difference is masked @@ -1835,11 +1834,11 @@ static void igb_setup_rctl(struct igb_adapter *adapter) rctl |= E1000_RCTL_SECRC; /* - * disable store bad packets, long packet enable, and clear size bits. + * disable store bad packets and clear size bits. */ - rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_LPE | E1000_RCTL_SZ_256); + rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); - if (adapter->netdev->mtu > ETH_DATA_LEN) + /* enable LPE when to prevent packets larger than max_frame_size */ rctl |= E1000_RCTL_LPE; /* Setup buffer sizes */ @@ -1865,7 +1864,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter) */ /* allocations using alloc_page take too long for regular MTU * so only enable packet split for jumbo frames */ - if (rctl & E1000_RCTL_LPE) { + if (adapter->netdev->mtu > ETH_DATA_LEN) { adapter->rx_ps_hdr_size = IGB_RXBUFFER_128; srrctl |= adapter->rx_ps_hdr_size << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; @@ -3473,19 +3472,16 @@ static int __igb_notify_dca(struct device *dev, void *data) struct e1000_hw *hw = &adapter->hw; unsigned long event = *(unsigned long *)data; - if (!(adapter->flags & IGB_FLAG_HAS_DCA)) - goto out; - switch (event) { case DCA_PROVIDER_ADD: /* if already enabled, don't do it again */ if (adapter->flags & IGB_FLAG_DCA_ENABLED) break; - adapter->flags |= IGB_FLAG_DCA_ENABLED; /* Always use CB2 mode, difference is masked * in the CB driver. */ wr32(E1000_DCA_CTRL, 2); if (dca_add_requester(dev) == 0) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; dev_info(&adapter->pdev->dev, "DCA enabled\n"); igb_setup_dca(adapter); break; @@ -3502,7 +3498,7 @@ static int __igb_notify_dca(struct device *dev, void *data) } break; } -out: + return 0; } diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index a75a31005fd..9c78c963b72 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h @@ -210,7 +210,7 @@ #define MAX_CMD_DESCRIPTORS_HOST 1024 #define MAX_RCV_DESCRIPTORS_1G 2048 #define MAX_RCV_DESCRIPTORS_10G 4096 -#define MAX_JUMBO_RCV_DESCRIPTORS 512 +#define MAX_JUMBO_RCV_DESCRIPTORS 1024 #define MAX_LRO_RCV_DESCRIPTORS 8 #define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS #define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index ca7c8d8050c..ffd37bea162 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -947,8 +947,10 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) } for (i = 0; i < n; i++) { if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || - netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) + netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { + kfree(buf); return -EIO; + } buf[i].addr = addr; buf[i].data = val; diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 72fd9e97c19..b2dcdb5ed8b 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c @@ -438,7 +438,6 @@ static void r6040_down(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; - struct pci_dev *pdev = lp->pdev; int limit = 2048; u16 *adrp; u16 cmd; diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 7673fd92eaf..ab0e09bf154 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -676,9 +676,8 @@ static int efx_init_port(struct efx_nic *efx) rc = efx->phy_op->init(efx); if (rc) return rc; - efx->phy_op->reconfigure(efx); - mutex_lock(&efx->mac_lock); + efx->phy_op->reconfigure(efx); rc = falcon_switch_mac(efx); mutex_unlock(&efx->mac_lock); if (rc) @@ -686,7 +685,7 @@ static int efx_init_port(struct efx_nic *efx) efx->mac_op->reconfigure(efx); efx->port_initialized = true; - efx->stats_enabled = true; + efx_stats_enable(efx); return 0; fail: @@ -735,6 +734,7 @@ static void efx_fini_port(struct efx_nic *efx) if (!efx->port_initialized) return; + efx_stats_disable(efx); efx->phy_op->fini(efx); efx->port_initialized = false; @@ -1361,6 +1361,20 @@ static int efx_net_stop(struct net_device *net_dev) return 0; } +void efx_stats_disable(struct efx_nic *efx) +{ + spin_lock(&efx->stats_lock); + ++efx->stats_disable_count; + spin_unlock(&efx->stats_lock); +} + +void efx_stats_enable(struct efx_nic *efx) +{ + spin_lock(&efx->stats_lock); + --efx->stats_disable_count; + spin_unlock(&efx->stats_lock); +} + /* Context: process, dev_base_lock or RTNL held, non-blocking. */ static struct net_device_stats *efx_net_stats(struct net_device *net_dev) { @@ -1369,12 +1383,12 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev) struct net_device_stats *stats = &net_dev->stats; /* Update stats if possible, but do not wait if another thread - * is updating them (or resetting the NIC); slightly stale - * stats are acceptable. + * is updating them or if MAC stats fetches are temporarily + * disabled; slightly stale stats are acceptable. */ if (!spin_trylock(&efx->stats_lock)) return stats; - if (efx->stats_enabled) { + if (!efx->stats_disable_count) { efx->mac_op->update_stats(efx); falcon_update_nic_stats(efx); } @@ -1622,16 +1636,12 @@ static void efx_unregister_netdev(struct efx_nic *efx) /* Tears down the entire software state and most of the hardware state * before reset. */ -void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) +void efx_reset_down(struct efx_nic *efx, enum reset_type method, + struct ethtool_cmd *ecmd) { EFX_ASSERT_RESET_SERIALISED(efx); - /* The net_dev->get_stats handler is quite slow, and will fail - * if a fetch is pending over reset. Serialise against it. */ - spin_lock(&efx->stats_lock); - efx->stats_enabled = false; - spin_unlock(&efx->stats_lock); - + efx_stats_disable(efx); efx_stop_all(efx); mutex_lock(&efx->mac_lock); mutex_lock(&efx->spi_lock); @@ -1639,6 +1649,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) efx->phy_op->get_settings(efx, ecmd); efx_fini_channels(efx); + if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) + efx->phy_op->fini(efx); } /* This function will always ensure that the locks acquired in @@ -1646,7 +1658,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) * that we were unable to reinitialise the hardware, and the * driver should be disabled. If ok is false, then the rx and tx * engines are not restarted, pending a RESET_DISABLE. */ -int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok) +int efx_reset_up(struct efx_nic *efx, enum reset_type method, + struct ethtool_cmd *ecmd, bool ok) { int rc; @@ -1658,6 +1671,15 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok) ok = false; } + if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { + if (ok) { + rc = efx->phy_op->init(efx); + if (rc) + ok = false; + } else + efx->port_initialized = false; + } + if (ok) { efx_init_channels(efx); @@ -1670,7 +1692,7 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok) if (ok) { efx_start_all(efx); - efx->stats_enabled = true; + efx_stats_enable(efx); } return rc; } @@ -1702,7 +1724,7 @@ static int efx_reset(struct efx_nic *efx) EFX_INFO(efx, "resetting (%d)\n", method); - efx_reset_down(efx, &ecmd); + efx_reset_down(efx, method, &ecmd); rc = falcon_reset_hw(efx, method); if (rc) { @@ -1721,10 +1743,10 @@ static int efx_reset(struct efx_nic *efx) /* Leave device stopped if necessary */ if (method == RESET_TYPE_DISABLE) { - efx_reset_up(efx, &ecmd, false); + efx_reset_up(efx, method, &ecmd, false); rc = -EIO; } else { - rc = efx_reset_up(efx, &ecmd, true); + rc = efx_reset_up(efx, method, &ecmd, true); } out_disable: @@ -1876,6 +1898,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, efx->rx_checksum_enabled = true; spin_lock_init(&efx->netif_stop_lock); spin_lock_init(&efx->stats_lock); + efx->stats_disable_count = 1; mutex_init(&efx->mac_lock); efx->mac_op = &efx_dummy_mac_operations; efx->phy_op = &efx_dummy_phy_operations; diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index 0dd7a532c78..55d0f131b0e 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h @@ -36,13 +36,16 @@ extern void efx_process_channel_now(struct efx_channel *channel); extern void efx_flush_queues(struct efx_nic *efx); /* Ports */ +extern void efx_stats_disable(struct efx_nic *efx); +extern void efx_stats_enable(struct efx_nic *efx); extern void efx_reconfigure_port(struct efx_nic *efx); extern void __efx_reconfigure_port(struct efx_nic *efx); /* Reset handling */ -extern void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd); -extern int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, - bool ok); +extern void efx_reset_down(struct efx_nic *efx, enum reset_type method, + struct ethtool_cmd *ecmd); +extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, + struct ethtool_cmd *ecmd, bool ok); /* Global */ extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index 53d259e9018..7b5924c039b 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c @@ -219,9 +219,6 @@ int efx_ethtool_set_settings(struct net_device *net_dev, struct efx_nic *efx = netdev_priv(net_dev); int rc; - if (EFX_WORKAROUND_13963(efx) && !ecmd->autoneg) - return -EINVAL; - /* Falcon GMAC does not support 1000Mbps HD */ if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { EFX_LOG(efx, "rejecting unsupported 1000Mbps HD" diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 5b9f2d9cc4e..d5378e60fcd 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c @@ -824,10 +824,6 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, rx_ev_pause_frm ? " [PAUSE]" : ""); } #endif - - if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) && - efx->phy_type == PHY_TYPE_SFX7101)) - tenxpress_crc_err(efx); } /* Handle receive events that are not in-order. */ @@ -1887,7 +1883,7 @@ static int falcon_reset_macs(struct efx_nic *efx) /* MAC stats will fail whilst the TX fifo is draining. Serialise * the drain sequence with the statistics fetch */ - spin_lock(&efx->stats_lock); + efx_stats_disable(efx); falcon_read(efx, ®, MAC0_CTRL_REG_KER); EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1); @@ -1917,7 +1913,7 @@ static int falcon_reset_macs(struct efx_nic *efx) udelay(10); } - spin_unlock(&efx->stats_lock); + efx_stats_enable(efx); /* If we've reset the EM block and the link is up, then * we'll have to kick the XAUI link so the PHY can recover */ @@ -2277,6 +2273,10 @@ int falcon_switch_mac(struct efx_nic *efx) struct efx_mac_operations *old_mac_op = efx->mac_op; efx_oword_t nic_stat; unsigned strap_val; + int rc = 0; + + /* Don't try to fetch MAC stats while we're switching MACs */ + efx_stats_disable(efx); /* Internal loopbacks override the phy speed setting */ if (efx->loopback_mode == LOOPBACK_GMAC) { @@ -2287,16 +2287,12 @@ int falcon_switch_mac(struct efx_nic *efx) efx->link_fd = true; } + WARN_ON(!mutex_is_locked(&efx->mac_lock)); efx->mac_op = (EFX_IS10G(efx) ? &falcon_xmac_operations : &falcon_gmac_operations); - if (old_mac_op == efx->mac_op) - return 0; - - WARN_ON(!mutex_is_locked(&efx->mac_lock)); - - /* Not all macs support a mac-level link state */ - efx->mac_up = true; + /* Always push the NIC_STAT_REG setting even if the mac hasn't + * changed, because this function is run post online reset */ falcon_read(efx, &nic_stat, NIC_STAT_REG); strap_val = EFX_IS10G(efx) ? 5 : 3; if (falcon_rev(efx) >= FALCON_REV_B0) { @@ -2309,9 +2305,17 @@ int falcon_switch_mac(struct efx_nic *efx) BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val); } + if (old_mac_op == efx->mac_op) + goto out; EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G'); - return falcon_reset_macs(efx); + /* Not all macs support a mac-level link state */ + efx->mac_up = true; + + rc = falcon_reset_macs(efx); +out: + efx_stats_enable(efx); + return rc; } /* This call is responsible for hooking in the MAC and PHY operations */ diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c index f6a16428113..f9e2f95c3b4 100644 --- a/drivers/net/sfc/mdio_10g.c +++ b/drivers/net/sfc/mdio_10g.c @@ -15,6 +15,7 @@ #include "net_driver.h" #include "mdio_10g.h" #include "boards.h" +#include "workarounds.h" int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd, int spins, int spintime) @@ -179,17 +180,12 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) return false; else if (efx_phy_mode_disabled(efx->phy_mode)) return false; - else if (efx->loopback_mode == LOOPBACK_PHYXS) { + else if (efx->loopback_mode == LOOPBACK_PHYXS) mmd_mask &= ~(MDIO_MMDREG_DEVS_PHYXS | MDIO_MMDREG_DEVS_PCS | MDIO_MMDREG_DEVS_PMAPMD | MDIO_MMDREG_DEVS_AN); - if (!mmd_mask) { - reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, - MDIO_PHYXS_STATUS2); - return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN)); - } - } else if (efx->loopback_mode == LOOPBACK_PCS) + else if (efx->loopback_mode == LOOPBACK_PCS) mmd_mask &= ~(MDIO_MMDREG_DEVS_PCS | MDIO_MMDREG_DEVS_PMAPMD | MDIO_MMDREG_DEVS_AN); @@ -197,6 +193,13 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) mmd_mask &= ~(MDIO_MMDREG_DEVS_PMAPMD | MDIO_MMDREG_DEVS_AN); + if (!mmd_mask) { + /* Use presence of XGMII faults in leui of link state */ + reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, + MDIO_PHYXS_STATUS2); + return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN)); + } + while (mmd_mask) { if (mmd_mask & 1) { /* Double reads because link state is latched, and a @@ -263,7 +266,7 @@ void mdio_clause45_set_mmds_lpower(struct efx_nic *efx, } } -static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp) +static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr) { int phy_id = efx->mii.phy_id; u32 result = 0; @@ -278,9 +281,6 @@ static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp) result |= ADVERTISED_100baseT_Half; if (reg & ADVERTISE_100FULL) result |= ADVERTISED_100baseT_Full; - if (reg & LPA_RESV) - result |= xnp; - return result; } @@ -310,7 +310,7 @@ void mdio_clause45_get_settings(struct efx_nic *efx, */ void mdio_clause45_get_settings_ext(struct efx_nic *efx, struct ethtool_cmd *ecmd, - u32 xnp, u32 xnp_lpa) + u32 npage_adv, u32 npage_lpa) { int phy_id = efx->mii.phy_id; int reg; @@ -361,8 +361,8 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx, ecmd->autoneg = AUTONEG_ENABLE; ecmd->advertising |= ADVERTISED_Autoneg | - mdio_clause45_get_an(efx, - MDIO_AN_ADVERTISE, xnp); + mdio_clause45_get_an(efx, MDIO_AN_ADVERTISE) | + npage_adv; } else ecmd->autoneg = AUTONEG_DISABLE; } else @@ -371,27 +371,30 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx, if (ecmd->autoneg) { /* If AN is complete, report best common mode, * otherwise report best advertised mode. */ - u32 common = ecmd->advertising; + u32 modes = 0; if (mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, MDIO_MMDREG_STAT1) & - (1 << MDIO_AN_STATUS_AN_DONE_LBN)) { - common &= mdio_clause45_get_an(efx, MDIO_AN_LPA, - xnp_lpa); - } - if (common & ADVERTISED_10000baseT_Full) { + (1 << MDIO_AN_STATUS_AN_DONE_LBN)) + modes = (ecmd->advertising & + (mdio_clause45_get_an(efx, MDIO_AN_LPA) | + npage_lpa)); + if (modes == 0) + modes = ecmd->advertising; + + if (modes & ADVERTISED_10000baseT_Full) { ecmd->speed = SPEED_10000; ecmd->duplex = DUPLEX_FULL; - } else if (common & (ADVERTISED_1000baseT_Full | - ADVERTISED_1000baseT_Half)) { + } else if (modes & (ADVERTISED_1000baseT_Full | + ADVERTISED_1000baseT_Half)) { ecmd->speed = SPEED_1000; - ecmd->duplex = !!(common & ADVERTISED_1000baseT_Full); - } else if (common & (ADVERTISED_100baseT_Full | - ADVERTISED_100baseT_Half)) { + ecmd->duplex = !!(modes & ADVERTISED_1000baseT_Full); + } else if (modes & (ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half)) { ecmd->speed = SPEED_100; - ecmd->duplex = !!(common & ADVERTISED_100baseT_Full); + ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full); } else { ecmd->speed = SPEED_10; - ecmd->duplex = !!(common & ADVERTISED_10baseT_Full); + ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full); } } else { /* Report forced settings */ @@ -415,7 +418,7 @@ int mdio_clause45_set_settings(struct efx_nic *efx, int phy_id = efx->mii.phy_id; struct ethtool_cmd prev; u32 required; - int ctrl1_bits, reg; + int reg; efx->phy_op->get_settings(efx, &prev); @@ -430,99 +433,83 @@ int mdio_clause45_set_settings(struct efx_nic *efx, if (prev.port != PORT_TP || ecmd->port != PORT_TP) return -EINVAL; - /* Check that PHY supports these settings and work out the - * basic control bits */ - if (ecmd->duplex) { + /* Check that PHY supports these settings */ + if (ecmd->autoneg) { + required = SUPPORTED_Autoneg; + } else if (ecmd->duplex) { switch (ecmd->speed) { - case SPEED_10: - ctrl1_bits = BMCR_FULLDPLX; - required = SUPPORTED_10baseT_Full; - break; - case SPEED_100: - ctrl1_bits = BMCR_SPEED100 | BMCR_FULLDPLX; - required = SUPPORTED_100baseT_Full; - break; - case SPEED_1000: - ctrl1_bits = BMCR_SPEED1000 | BMCR_FULLDPLX; - required = SUPPORTED_1000baseT_Full; - break; - case SPEED_10000: - ctrl1_bits = (BMCR_SPEED1000 | BMCR_SPEED100 | - BMCR_FULLDPLX); - required = SUPPORTED_10000baseT_Full; - break; - default: - return -EINVAL; + case SPEED_10: required = SUPPORTED_10baseT_Full; break; + case SPEED_100: required = SUPPORTED_100baseT_Full; break; + default: return -EINVAL; } } else { switch (ecmd->speed) { - case SPEED_10: - ctrl1_bits = 0; - required = SUPPORTED_10baseT_Half; - break; - case SPEED_100: - ctrl1_bits = BMCR_SPEED100; - required = SUPPORTED_100baseT_Half; - break; - case SPEED_1000: - ctrl1_bits = BMCR_SPEED1000; - required = SUPPORTED_1000baseT_Half; - break; - default: - return -EINVAL; + case SPEED_10: required = SUPPORTED_10baseT_Half; break; + case SPEED_100: required = SUPPORTED_100baseT_Half; break; + default: return -EINVAL; } } - if (ecmd->autoneg) - required |= SUPPORTED_Autoneg; required |= ecmd->advertising; if (required & ~prev.supported) return -EINVAL; - /* Set the basic control bits */ - reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, - MDIO_MMDREG_CTRL1); - reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX | 0x003c); - reg |= ctrl1_bits; - mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL1, - reg); - - /* Set the AN registers */ - if (ecmd->autoneg != prev.autoneg || - ecmd->advertising != prev.advertising) { - bool xnp = false; - - if (efx->phy_op->set_xnp_advertise) - xnp = efx->phy_op->set_xnp_advertise(efx, - ecmd->advertising); - - if (ecmd->autoneg) { - reg = 0; - if (ecmd->advertising & ADVERTISED_10baseT_Half) - reg |= ADVERTISE_10HALF; - if (ecmd->advertising & ADVERTISED_10baseT_Full) - reg |= ADVERTISE_10FULL; - if (ecmd->advertising & ADVERTISED_100baseT_Half) - reg |= ADVERTISE_100HALF; - if (ecmd->advertising & ADVERTISED_100baseT_Full) - reg |= ADVERTISE_100FULL; - if (xnp) - reg |= ADVERTISE_RESV; - mdio_clause45_write(efx, phy_id, MDIO_MMD_AN, - MDIO_AN_ADVERTISE, reg); - } + if (ecmd->autoneg) { + bool xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full + || EFX_WORKAROUND_13204(efx)); + + /* Set up the base page */ + reg = ADVERTISE_CSMA; + if (ecmd->advertising & ADVERTISED_10baseT_Half) + reg |= ADVERTISE_10HALF; + if (ecmd->advertising & ADVERTISED_10baseT_Full) + reg |= ADVERTISE_10FULL; + if (ecmd->advertising & ADVERTISED_100baseT_Half) + reg |= ADVERTISE_100HALF; + if (ecmd->advertising & ADVERTISED_100baseT_Full) + reg |= ADVERTISE_100FULL; + if (xnp) + reg |= ADVERTISE_RESV; + else if (ecmd->advertising & (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full)) + reg |= ADVERTISE_NPAGE; + reg |= efx_fc_advertise(efx->wanted_fc); + mdio_clause45_write(efx, phy_id, MDIO_MMD_AN, + MDIO_AN_ADVERTISE, reg); + + /* Set up the (extended) next page if necessary */ + if (efx->phy_op->set_npage_adv) + efx->phy_op->set_npage_adv(efx, ecmd->advertising); + /* Enable and restart AN */ reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, MDIO_MMDREG_CTRL1); - if (ecmd->autoneg) - reg |= BMCR_ANENABLE | BMCR_ANRESTART; - else - reg &= ~BMCR_ANENABLE; + reg |= BMCR_ANENABLE; + if (!(EFX_WORKAROUND_15195(efx) && + LOOPBACK_MASK(efx) & efx->phy_op->loopbacks)) + reg |= BMCR_ANRESTART; if (xnp) reg |= 1 << MDIO_AN_CTRL_XNP_LBN; else reg &= ~(1 << MDIO_AN_CTRL_XNP_LBN); mdio_clause45_write(efx, phy_id, MDIO_MMD_AN, MDIO_MMDREG_CTRL1, reg); + } else { + /* Disable AN */ + mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN, + MDIO_MMDREG_CTRL1, + __ffs(BMCR_ANENABLE), false); + + /* Set the basic control bits */ + reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, + MDIO_MMDREG_CTRL1); + reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX | + 0x003c); + if (ecmd->speed == SPEED_100) + reg |= BMCR_SPEED100; + if (ecmd->duplex) + reg |= BMCR_FULLDPLX; + mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, + MDIO_MMDREG_CTRL1, reg); } return 0; diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h index 09bf801d056..8ba49773ce7 100644 --- a/drivers/net/sfc/mdio_10g.h +++ b/drivers/net/sfc/mdio_10g.h @@ -155,7 +155,8 @@ #define MDIO_AN_XNP 22 #define MDIO_AN_LPA_XNP 25 -#define MDIO_AN_10GBT_ADVERTISE 32 +#define MDIO_AN_10GBT_CTRL 32 +#define MDIO_AN_10GBT_CTRL_ADV_10G_LBN 12 #define MDIO_AN_10GBT_STATUS (33) #define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */ #define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */ diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 5f255f75754..e019ad1fb9a 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h @@ -566,7 +566,7 @@ struct efx_mac_operations { * @poll: Poll for hardware state. Serialised by the mac_lock. * @get_settings: Get ethtool settings. Serialised by the mac_lock. * @set_settings: Set ethtool settings. Serialised by the mac_lock. - * @set_xnp_advertise: Set abilities advertised in Extended Next Page + * @set_npage_adv: Set abilities advertised in (Extended) Next Page * (only needed where AN bit is set in mmds) * @num_tests: Number of PHY-specific tests/results * @test_names: Names of the tests/results @@ -586,7 +586,7 @@ struct efx_phy_operations { struct ethtool_cmd *ecmd); int (*set_settings) (struct efx_nic *efx, struct ethtool_cmd *ecmd); - bool (*set_xnp_advertise) (struct efx_nic *efx, u32); + void (*set_npage_adv) (struct efx_nic *efx, u32); u32 num_tests; const char *const *test_names; int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); @@ -754,8 +754,7 @@ union efx_multicast_hash { * &struct net_device_stats. * @stats_buffer: DMA buffer for statistics * @stats_lock: Statistics update lock. Serialises statistics fetches - * @stats_enabled: Temporarily disable statistics fetches. - * Serialised by @stats_lock + * @stats_disable_count: Nest count for disabling statistics fetches * @mac_op: MAC interface * @mac_address: Permanent MAC address * @phy_type: PHY type @@ -837,7 +836,7 @@ struct efx_nic { struct efx_mac_stats mac_stats; struct efx_buffer stats_buffer; spinlock_t stats_lock; - bool stats_enabled; + unsigned int stats_disable_count; struct efx_mac_operations *mac_op; unsigned char mac_address[ETH_ALEN]; diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h index 58c493ef81b..07e855c148b 100644 --- a/drivers/net/sfc/phy.h +++ b/drivers/net/sfc/phy.h @@ -17,7 +17,6 @@ extern struct efx_phy_operations falcon_sfx7101_phy_ops; extern struct efx_phy_operations falcon_sft9001_phy_ops; extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink); -extern void tenxpress_crc_err(struct efx_nic *efx); /**************************************************************************** * Exported functions from the driver for XFP optical PHYs diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index dba0d64d50c..0a598084c51 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c @@ -665,6 +665,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, { enum efx_loopback_mode loopback_mode = efx->loopback_mode; int phy_mode = efx->phy_mode; + enum reset_type reset_method = RESET_TYPE_INVISIBLE; struct ethtool_cmd ecmd; struct efx_channel *channel; int rc_test = 0, rc_reset = 0, rc; @@ -718,21 +719,21 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, mutex_unlock(&efx->mac_lock); /* free up all consumers of SRAM (including all the queues) */ - efx_reset_down(efx, &ecmd); + efx_reset_down(efx, reset_method, &ecmd); rc = efx_test_chip(efx, tests); if (rc && !rc_test) rc_test = rc; /* reset the chip to recover from the register test */ - rc_reset = falcon_reset_hw(efx, RESET_TYPE_ALL); + rc_reset = falcon_reset_hw(efx, reset_method); /* Ensure that the phy is powered and out of loopback * for the bist and loopback tests */ efx->phy_mode &= ~PHY_MODE_LOW_POWER; efx->loopback_mode = LOOPBACK_NONE; - rc = efx_reset_up(efx, &ecmd, rc_reset == 0); + rc = efx_reset_up(efx, reset_method, &ecmd, rc_reset == 0); if (rc && !rc_reset) rc_reset = rc; diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c index 16b80acb999..cb25ae5b257 100644 --- a/drivers/net/sfc/sfe4001.c +++ b/drivers/net/sfc/sfe4001.c @@ -186,19 +186,22 @@ static int sfn4111t_reset(struct efx_nic *efx) { efx_oword_t reg; - /* GPIO pins are also used for I2C, so block that temporarily */ + /* GPIO 3 and the GPIO register are shared with I2C, so block that */ mutex_lock(&efx->i2c_adap.bus_lock); + /* Pull RST_N (GPIO 2) low then let it up again, setting the + * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the + * output enables; the output levels should always be 0 (low) + * and we rely on external pull-ups. */ falcon_read(efx, ®, GPIO_CTL_REG_KER); EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true); - EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, false); falcon_write(efx, ®, GPIO_CTL_REG_KER); msleep(1000); - EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, true); - EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, true); - EFX_SET_OWORD_FIELD(reg, GPIO3_OUT, - !(efx->phy_mode & PHY_MODE_SPECIAL)); + EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, false); + EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, + !!(efx->phy_mode & PHY_MODE_SPECIAL)); falcon_write(efx, ®, GPIO_CTL_REG_KER); + msleep(1); mutex_unlock(&efx->i2c_adap.bus_lock); @@ -232,12 +235,18 @@ static ssize_t set_phy_flash_cfg(struct device *dev, } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { err = -EBUSY; } else { + /* Reset the PHY, reconfigure the MAC and enable/disable + * MAC stats accordingly. */ efx->phy_mode = new_mode; + if (new_mode & PHY_MODE_SPECIAL) + efx_stats_disable(efx); if (efx->board_info.type == EFX_BOARD_SFE4001) err = sfe4001_poweron(efx); else err = sfn4111t_reset(efx); efx_reconfigure_port(efx); + if (!(new_mode & PHY_MODE_SPECIAL)) + efx_stats_enable(efx); } rtnl_unlock(); @@ -326,6 +335,11 @@ int sfe4001_init(struct efx_nic *efx) efx->board_info.monitor = sfe4001_check_hw; efx->board_info.fini = sfe4001_fini; + if (efx->phy_mode & PHY_MODE_SPECIAL) { + /* PHY won't generate a 156.25 MHz clock and MAC stats fetch + * will fail. */ + efx_stats_disable(efx); + } rc = sfe4001_poweron(efx); if (rc) goto fail_ioexp; @@ -372,17 +386,25 @@ static void sfn4111t_fini(struct efx_nic *efx) i2c_unregister_device(efx->board_info.hwmon_client); } -static struct i2c_board_info sfn4111t_hwmon_info = { +static struct i2c_board_info sfn4111t_a0_hwmon_info = { I2C_BOARD_INFO("max6647", 0x4e), .irq = -1, }; +static struct i2c_board_info sfn4111t_r5_hwmon_info = { + I2C_BOARD_INFO("max6646", 0x4d), + .irq = -1, +}; + int sfn4111t_init(struct efx_nic *efx) { int rc; efx->board_info.hwmon_client = - i2c_new_device(&efx->i2c_adap, &sfn4111t_hwmon_info); + i2c_new_device(&efx->i2c_adap, + (efx->board_info.minor < 5) ? + &sfn4111t_a0_hwmon_info : + &sfn4111t_r5_hwmon_info); if (!efx->board_info.hwmon_client) return -EIO; @@ -394,8 +416,10 @@ int sfn4111t_init(struct efx_nic *efx) if (rc) goto fail_hwmon; - if (efx->phy_mode & PHY_MODE_SPECIAL) + if (efx->phy_mode & PHY_MODE_SPECIAL) { + efx_stats_disable(efx); sfn4111t_reset(efx); + } return 0; diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index 9ecb77da954..f0efd246962 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c @@ -67,6 +67,8 @@ #define PMA_PMD_EXT_CLK312_WIDTH 1 #define PMA_PMD_EXT_LPOWER_LBN 12 #define PMA_PMD_EXT_LPOWER_WIDTH 1 +#define PMA_PMD_EXT_ROBUST_LBN 14 +#define PMA_PMD_EXT_ROBUST_WIDTH 1 #define PMA_PMD_EXT_SSR_LBN 15 #define PMA_PMD_EXT_SSR_WIDTH 1 @@ -177,35 +179,24 @@ #define C22EXT_STATUS_LINK_LBN 2 #define C22EXT_STATUS_LINK_WIDTH 1 -#define C22EXT_MSTSLV_REG 49162 -#define C22EXT_MSTSLV_1000_HD_LBN 10 -#define C22EXT_MSTSLV_1000_HD_WIDTH 1 -#define C22EXT_MSTSLV_1000_FD_LBN 11 -#define C22EXT_MSTSLV_1000_FD_WIDTH 1 +#define C22EXT_MSTSLV_CTRL 49161 +#define C22EXT_MSTSLV_CTRL_ADV_1000_HD_LBN 8 +#define C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN 9 + +#define C22EXT_MSTSLV_STATUS 49162 +#define C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN 10 +#define C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN 11 /* Time to wait between powering down the LNPGA and turning off the power * rails */ #define LNPGA_PDOWN_WAIT (HZ / 5) -static int crc_error_reset_threshold = 100; -module_param(crc_error_reset_threshold, int, 0644); -MODULE_PARM_DESC(crc_error_reset_threshold, - "Max number of CRC errors before XAUI reset"); - struct tenxpress_phy_data { enum efx_loopback_mode loopback_mode; - atomic_t bad_crc_count; enum efx_phy_mode phy_mode; int bad_lp_tries; }; -void tenxpress_crc_err(struct efx_nic *efx) -{ - struct tenxpress_phy_data *phy_data = efx->phy_data; - if (phy_data != NULL) - atomic_inc(&phy_data->bad_crc_count); -} - static ssize_t show_phy_short_reach(struct device *dev, struct device_attribute *attr, char *buf) { @@ -284,7 +275,9 @@ static int tenxpress_init(struct efx_nic *efx) PMA_PMD_XCONTROL_REG); reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) | (1 << PMA_PMD_EXT_CLK_OUT_LBN) | - (1 << PMA_PMD_EXT_CLK312_LBN)); + (1 << PMA_PMD_EXT_CLK312_LBN) | + (1 << PMA_PMD_EXT_ROBUST_LBN)); + mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg); mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT, @@ -346,6 +339,7 @@ static int tenxpress_phy_init(struct efx_nic *efx) rc = tenxpress_init(efx); if (rc < 0) goto fail; + mdio_clause45_set_pause(efx); if (efx->phy_type == PHY_TYPE_SFT9001B) { rc = device_create_file(&efx->pci_dev->dev, @@ -376,8 +370,8 @@ static int tenxpress_special_reset(struct efx_nic *efx) /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so * a special software reset can glitch the XGMAC sufficiently for stats - * requests to fail. Since we don't often special_reset, just lock. */ - spin_lock(&efx->stats_lock); + * requests to fail. */ + efx_stats_disable(efx); /* Initiate reset */ reg = mdio_clause45_read(efx, efx->mii.phy_id, @@ -392,17 +386,17 @@ static int tenxpress_special_reset(struct efx_nic *efx) rc = mdio_clause45_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); if (rc < 0) - goto unlock; + goto out; /* Try and reconfigure the device */ rc = tenxpress_init(efx); if (rc < 0) - goto unlock; + goto out; /* Wait for the XGXS state machine to churn */ mdelay(10); -unlock: - spin_unlock(&efx->stats_lock); +out: + efx_stats_enable(efx); return rc; } @@ -520,7 +514,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx) { struct tenxpress_phy_data *phy_data = efx->phy_data; struct ethtool_cmd ecmd; - bool phy_mode_change, loop_reset, loop_toggle, loopback; + bool phy_mode_change, loop_reset; if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) { phy_data->phy_mode = efx->phy_mode; @@ -531,12 +525,10 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx) phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL && phy_data->phy_mode != PHY_MODE_NORMAL); - loopback = LOOPBACK_MASK(efx) & efx->phy_op->loopbacks; - loop_toggle = LOOPBACK_CHANGED(phy_data, efx, efx->phy_op->loopbacks); loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, efx->phy_op->loopbacks) || LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY)); - if (loop_reset || loop_toggle || loopback || phy_mode_change) { + if (loop_reset || phy_mode_change) { int rc; efx->phy_op->get_settings(efx, &ecmd); @@ -551,20 +543,6 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx) falcon_reset_xaui(efx); } - if (efx->phy_type != PHY_TYPE_SFX7101) { - /* Only change autoneg once, on coming out or - * going into loopback */ - if (loop_toggle) - ecmd.autoneg = !loopback; - if (loopback) { - ecmd.duplex = DUPLEX_FULL; - if (efx->loopback_mode == LOOPBACK_GPHY) - ecmd.speed = SPEED_1000; - else - ecmd.speed = SPEED_10000; - } - } - rc = efx->phy_op->set_settings(efx, &ecmd); WARN_ON(rc); } @@ -623,13 +601,6 @@ static void tenxpress_phy_poll(struct efx_nic *efx) if (phy_data->phy_mode != PHY_MODE_NORMAL) return; - - if (EFX_WORKAROUND_10750(efx) && - atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) { - EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n"); - falcon_reset_xaui(efx); - atomic_set(&phy_data->bad_crc_count, 0); - } } static void tenxpress_phy_fini(struct efx_nic *efx) @@ -772,107 +743,76 @@ reset: return rc; } -static u32 tenxpress_get_xnp_lpa(struct efx_nic *efx) +static void +tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) { - int phy = efx->mii.phy_id; - u32 lpa = 0; + int phy_id = efx->mii.phy_id; + u32 adv = 0, lpa = 0; int reg; if (efx->phy_type != PHY_TYPE_SFX7101) { - reg = mdio_clause45_read(efx, phy, MDIO_MMD_C22EXT, - C22EXT_MSTSLV_REG); - if (reg & (1 << C22EXT_MSTSLV_1000_HD_LBN)) + reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT, + C22EXT_MSTSLV_CTRL); + if (reg & (1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN)) + adv |= ADVERTISED_1000baseT_Full; + reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT, + C22EXT_MSTSLV_STATUS); + if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN)) lpa |= ADVERTISED_1000baseT_Half; - if (reg & (1 << C22EXT_MSTSLV_1000_FD_LBN)) + if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN)) lpa |= ADVERTISED_1000baseT_Full; } - reg = mdio_clause45_read(efx, phy, MDIO_MMD_AN, MDIO_AN_10GBT_STATUS); + reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, + MDIO_AN_10GBT_CTRL); + if (reg & (1 << MDIO_AN_10GBT_CTRL_ADV_10G_LBN)) + adv |= ADVERTISED_10000baseT_Full; + reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, + MDIO_AN_10GBT_STATUS); if (reg & (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN)) lpa |= ADVERTISED_10000baseT_Full; - return lpa; -} -static void sfx7101_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) -{ - mdio_clause45_get_settings_ext(efx, ecmd, ADVERTISED_10000baseT_Full, - tenxpress_get_xnp_lpa(efx)); - ecmd->supported |= SUPPORTED_10000baseT_Full; - ecmd->advertising |= ADVERTISED_10000baseT_Full; + mdio_clause45_get_settings_ext(efx, ecmd, adv, lpa); + + if (efx->phy_type != PHY_TYPE_SFX7101) + ecmd->supported |= (SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full); + + /* In loopback, the PHY automatically brings up the correct interface, + * but doesn't advertise the correct speed. So override it */ + if (efx->loopback_mode == LOOPBACK_GPHY) + ecmd->speed = SPEED_1000; + else if (LOOPBACK_MASK(efx) & efx->phy_op->loopbacks) + ecmd->speed = SPEED_10000; } -static void sft9001_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) +static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) { - int phy_id = efx->mii.phy_id; - u32 xnp_adv = 0; - int reg; - - reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, - PMA_PMD_SPEED_ENABLE_REG); - if (EFX_WORKAROUND_13204(efx) && (reg & (1 << PMA_PMD_100TX_ADV_LBN))) - xnp_adv |= ADVERTISED_100baseT_Full; - if (reg & (1 << PMA_PMD_1000T_ADV_LBN)) - xnp_adv |= ADVERTISED_1000baseT_Full; - if (reg & (1 << PMA_PMD_10000T_ADV_LBN)) - xnp_adv |= ADVERTISED_10000baseT_Full; - - mdio_clause45_get_settings_ext(efx, ecmd, xnp_adv, - tenxpress_get_xnp_lpa(efx)); - - ecmd->supported |= (SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full); + if (!ecmd->autoneg) + return -EINVAL; - /* Use the vendor defined C22ext register for duplex settings */ - if (ecmd->speed != SPEED_10000 && !ecmd->autoneg) { - reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT, - GPHY_XCONTROL_REG); - ecmd->duplex = (reg & (1 << GPHY_DUPLEX_LBN) ? - DUPLEX_FULL : DUPLEX_HALF); - } + return mdio_clause45_set_settings(efx, ecmd); } -static int sft9001_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) +static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising) { - int phy_id = efx->mii.phy_id; - int rc; - - rc = mdio_clause45_set_settings(efx, ecmd); - if (rc) - return rc; - - if (ecmd->speed != SPEED_10000 && !ecmd->autoneg) - mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT, - GPHY_XCONTROL_REG, GPHY_DUPLEX_LBN, - ecmd->duplex == DUPLEX_FULL); - - return rc; + mdio_clause45_set_flag(efx, efx->mii.phy_id, MDIO_MMD_AN, + MDIO_AN_10GBT_CTRL, + MDIO_AN_10GBT_CTRL_ADV_10G_LBN, + advertising & ADVERTISED_10000baseT_Full); } -static bool sft9001_set_xnp_advertise(struct efx_nic *efx, u32 advertising) +static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising) { - int phy = efx->mii.phy_id; - int reg = mdio_clause45_read(efx, phy, MDIO_MMD_PMAPMD, - PMA_PMD_SPEED_ENABLE_REG); - bool enabled; - - reg &= ~((1 << 2) | (1 << 3)); - if (EFX_WORKAROUND_13204(efx) && - (advertising & ADVERTISED_100baseT_Full)) - reg |= 1 << PMA_PMD_100TX_ADV_LBN; - if (advertising & ADVERTISED_1000baseT_Full) - reg |= 1 << PMA_PMD_1000T_ADV_LBN; - if (advertising & ADVERTISED_10000baseT_Full) - reg |= 1 << PMA_PMD_10000T_ADV_LBN; - mdio_clause45_write(efx, phy, MDIO_MMD_PMAPMD, - PMA_PMD_SPEED_ENABLE_REG, reg); - - enabled = (advertising & - (ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full | - ADVERTISED_10000baseT_Full)); - if (EFX_WORKAROUND_13204(efx)) - enabled |= (advertising & ADVERTISED_100baseT_Full); - return enabled; + int phy_id = efx->mii.phy_id; + + mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT, + C22EXT_MSTSLV_CTRL, + C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN, + advertising & ADVERTISED_1000baseT_Full); + mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN, + MDIO_AN_10GBT_CTRL, + MDIO_AN_10GBT_CTRL_ADV_10G_LBN, + advertising & ADVERTISED_10000baseT_Full); } struct efx_phy_operations falcon_sfx7101_phy_ops = { @@ -882,8 +822,9 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = { .poll = tenxpress_phy_poll, .fini = tenxpress_phy_fini, .clear_interrupt = efx_port_dummy_op_void, - .get_settings = sfx7101_get_settings, - .set_settings = mdio_clause45_set_settings, + .get_settings = tenxpress_get_settings, + .set_settings = tenxpress_set_settings, + .set_npage_adv = sfx7101_set_npage_adv, .num_tests = ARRAY_SIZE(sfx7101_test_names), .test_names = sfx7101_test_names, .run_tests = sfx7101_run_tests, @@ -898,9 +839,9 @@ struct efx_phy_operations falcon_sft9001_phy_ops = { .poll = tenxpress_phy_poll, .fini = tenxpress_phy_fini, .clear_interrupt = efx_port_dummy_op_void, - .get_settings = sft9001_get_settings, - .set_settings = sft9001_set_settings, - .set_xnp_advertise = sft9001_set_xnp_advertise, + .get_settings = tenxpress_get_settings, + .set_settings = tenxpress_set_settings, + .set_npage_adv = sft9001_set_npage_adv, .num_tests = ARRAY_SIZE(sft9001_test_names), .test_names = sft9001_test_names, .run_tests = sft9001_run_tests, diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index 82e03e1d737..78de68f4a95 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h @@ -18,8 +18,8 @@ #define EFX_WORKAROUND_ALWAYS(efx) 1 #define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) #define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx) -#define EFX_WORKAROUND_SFX7101(efx) ((efx)->phy_type == PHY_TYPE_SFX7101) -#define EFX_WORKAROUND_SFT9001A(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A) +#define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \ + (efx)->phy_type == PHY_TYPE_SFT9001B) /* XAUI resets if link not detected */ #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS @@ -29,8 +29,6 @@ #define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G /* TX pkt parser problem with <= 16 byte TXes */ #define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS -/* Low rate CRC errors require XAUI reset */ -#define EFX_WORKAROUND_10750 EFX_WORKAROUND_SFX7101 /* TX_EV_PKT_ERR can be caused by a dangling TX descriptor * or a PCIe error (bug 11028) */ #define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS @@ -55,8 +53,8 @@ #define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A /* Need to send XNP pages for 100BaseT */ -#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001A -/* Need to keep AN enabled */ -#define EFX_WORKAROUND_13963 EFX_WORKAROUND_SFT9001A +#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001 +/* Don't restart AN in near-side loopback */ +#define EFX_WORKAROUND_15195 EFX_WORKAROUND_SFT9001 #endif /* EFX_WORKAROUNDS_H */ diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c index 607efeaf0bc..9a00e5566af 100644 --- a/drivers/net/skfp/skfddi.c +++ b/drivers/net/skfp/skfddi.c @@ -1003,9 +1003,9 @@ static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) break; case SKFP_CLR_STATS: /* Zero out the driver statistics */ if (!capable(CAP_NET_ADMIN)) { - memset(&lp->MacStat, 0, sizeof(lp->MacStat)); - } else { status = -EPERM; + } else { + memset(&lp->MacStat, 0, sizeof(lp->MacStat)); } break; default: diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 3668e81e474..994703cc0db 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -1403,9 +1403,6 @@ static int sky2_up(struct net_device *dev) } - if (netif_msg_ifup(sky2)) - printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); - netif_carrier_off(dev); /* must be power of 2 */ @@ -1484,6 +1481,9 @@ static int sky2_up(struct net_device *dev) sky2_write32(hw, B0_IMSK, imask); sky2_set_multicast(dev); + + if (netif_msg_ifup(sky2)) + printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); return 0; err_out: diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index f513bdf1c88..783c1a7b869 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -953,7 +953,7 @@ smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes) do { udelay(1); val = smsc911x_reg_read(pdata, RX_DP_CTRL); - } while (timeout-- && (val & RX_DP_CTRL_RX_FFWD_)); + } while (--timeout && (val & RX_DP_CTRL_RX_FFWD_)); if (unlikely(timeout == 0)) SMSC_WARNING(HW, "Timed out waiting for " diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c index c14a4c6452c..d801900a503 100644 --- a/drivers/net/smsc9420.c +++ b/drivers/net/smsc9420.c @@ -1378,6 +1378,7 @@ static int smsc9420_open(struct net_device *dev) /* test the IRQ connection to the ISR */ smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq); + pd->software_irq_signal = false; spin_lock_irqsave(&pd->int_lock, flags); /* configure interrupt deassertion timer and enable interrupts */ @@ -1393,8 +1394,6 @@ static int smsc9420_open(struct net_device *dev) smsc9420_pci_flush_write(pd); timeout = 1000; - pd->software_irq_signal = false; - smp_wmb(); while (timeout--) { if (pd->software_irq_signal) break; diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c index 1210fb3748a..db7d5e11855 100644 --- a/drivers/net/tulip/21142.c +++ b/drivers/net/tulip/21142.c @@ -9,6 +9,11 @@ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} for more information on this driver. + + DC21143 manual "21143 PCI/CardBus 10/100Mb/s Ethernet LAN Controller + Hardware Reference Manual" is currently available at : + http://developer.intel.com/design/network/manuals/278074.htm + Please submit bugs to http://bugzilla.kernel.org/ . */ @@ -32,7 +37,11 @@ void t21142_media_task(struct work_struct *work) int csr12 = ioread32(ioaddr + CSR12); int next_tick = 60*HZ; int new_csr6 = 0; + int csr14 = ioread32(ioaddr + CSR14); + /* CSR12[LS10,LS100] are not reliable during autonegotiation */ + if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000) + csr12 |= 6; if (tulip_debug > 2) printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n", dev->name, csr12, medianame[dev->if_port]); @@ -76,7 +85,7 @@ void t21142_media_task(struct work_struct *work) new_csr6 = 0x83860000; dev->if_port = 3; iowrite32(0, ioaddr + CSR13); - iowrite32(0x0003FF7F, ioaddr + CSR14); + iowrite32(0x0003FFFF, ioaddr + CSR14); iowrite16(8, ioaddr + CSR15); iowrite32(1, ioaddr + CSR13); } @@ -132,10 +141,14 @@ void t21142_lnk_change(struct net_device *dev, int csr5) struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; int csr12 = ioread32(ioaddr + CSR12); + int csr14 = ioread32(ioaddr + CSR14); + /* CSR12[LS10,LS100] are not reliable during autonegotiation */ + if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000) + csr12 |= 6; if (tulip_debug > 1) printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, " - "%8.8x.\n", dev->name, csr12, csr5, ioread32(ioaddr + CSR14)); + "%8.8x.\n", dev->name, csr12, csr5, csr14); /* If NWay finished and we have a negotiated partner capability. */ if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) { @@ -143,7 +156,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5) int negotiated = tp->sym_advertise & (csr12 >> 16); tp->lpar = csr12 >> 16; tp->nwayset = 1; - if (negotiated & 0x0100) dev->if_port = 5; + /* If partner cannot negotiate, it is 10Mbps Half Duplex */ + if (!(csr12 & 0x8000)) dev->if_port = 0; + else if (negotiated & 0x0100) dev->if_port = 5; else if (negotiated & 0x0080) dev->if_port = 3; else if (negotiated & 0x0040) dev->if_port = 4; else if (negotiated & 0x0020) dev->if_port = 0; @@ -214,7 +229,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5) tp->timer.expires = RUN_AT(3*HZ); add_timer(&tp->timer); } else if (dev->if_port == 5) - iowrite32(ioread32(ioaddr + CSR14) & ~0x080, ioaddr + CSR14); + iowrite32(csr14 & ~0x080, ioaddr + CSR14); } else if (dev->if_port == 0 || dev->if_port == 4) { if ((csr12 & 4) == 0) printk(KERN_INFO"%s: 21143 10baseT link beat good.\n", diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 11441225bf4..e87986867ba 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -1536,6 +1536,11 @@ static void adjust_link(struct net_device *dev) static int init_phy(struct net_device *dev) { struct ucc_geth_private *priv = netdev_priv(dev); + struct device_node *np = priv->node; + struct device_node *phy, *mdio; + const phandle *ph; + char bus_name[MII_BUS_ID_SIZE]; + const unsigned int *id; struct phy_device *phydev; char phy_id[BUS_ID_SIZE]; @@ -1543,8 +1548,18 @@ static int init_phy(struct net_device *dev) priv->oldspeed = 0; priv->oldduplex = -1; - snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->ug_info->mdio_bus, - priv->ug_info->phy_address); + ph = of_get_property(np, "phy-handle", NULL); + phy = of_find_node_by_phandle(*ph); + mdio = of_get_parent(phy); + + id = of_get_property(phy, "reg", NULL); + + of_node_put(phy); + of_node_put(mdio); + + uec_mdio_bus_name(bus_name, mdio); + snprintf(phy_id, sizeof(phy_id), "%s:%02x", + bus_name, *id); phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface); @@ -3748,6 +3763,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma ugeth->ug_info = ug_info; ugeth->dev = dev; + ugeth->node = np; return 0; } diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index 8f699cb773e..16cbe42ba43 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h @@ -1186,6 +1186,8 @@ struct ucc_geth_private { int oldspeed; int oldduplex; int oldlink; + + struct device_node *node; }; void uec_set_ethtool_ops(struct net_device *netdev); diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c index c001d261366..54635911305 100644 --- a/drivers/net/ucc_geth_mii.c +++ b/drivers/net/ucc_geth_mii.c @@ -156,7 +156,7 @@ static int uec_mdio_probe(struct of_device *ofdev, const struct of_device_id *ma if (err) goto reg_map_fail; - snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); + uec_mdio_bus_name(new_bus->id, np); new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL); @@ -283,3 +283,13 @@ void uec_mdio_exit(void) { of_unregister_platform_driver(&uec_mdio_driver); } + +void uec_mdio_bus_name(char *name, struct device_node *np) +{ + const u32 *reg; + + reg = of_get_property(np, "reg", NULL); + + snprintf(name, MII_BUS_ID_SIZE, "%s@%x", np->name, reg ? *reg : 0); +} + diff --git a/drivers/net/ucc_geth_mii.h b/drivers/net/ucc_geth_mii.h index 1e45b2028a5..840cf80235b 100644 --- a/drivers/net/ucc_geth_mii.h +++ b/drivers/net/ucc_geth_mii.h @@ -97,4 +97,5 @@ int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum); int uec_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); int __init uec_mdio_init(void); void uec_mdio_exit(void); +void uec_mdio_bus_name(char *name, struct device_node *np); #endif /* __UEC_MII_H */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 63ef2a8905f..c68808336c8 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -287,7 +287,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi) skb_put(skb, MAX_PACKET_LEN); hdr = skb_vnet_hdr(skb); - sg_init_one(sg, hdr, sizeof(*hdr)); + sg_set_buf(sg, hdr, sizeof(*hdr)); if (vi->big_packets) { for (i = 0; i < MAX_SKB_FRAGS; i++) { @@ -488,9 +488,9 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) /* Encode metadata header at front. */ if (vi->mergeable_rx_bufs) - sg_init_one(sg, mhdr, sizeof(*mhdr)); + sg_set_buf(sg, mhdr, sizeof(*mhdr)); else - sg_init_one(sg, hdr, sizeof(*hdr)); + sg_set_buf(sg, hdr, sizeof(*hdr)); num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c index 62663298597..9b81af3f80a 100644 --- a/drivers/net/wimax/i2400m/debugfs.c +++ b/drivers/net/wimax/i2400m/debugfs.c @@ -234,20 +234,6 @@ struct dentry *debugfs_create_i2400m_reset( &fops_i2400m_reset); } -/* - * Debug levels control; see debug.h - */ -struct d_level D_LEVEL[] = { - D_SUBMODULE_DEFINE(control), - D_SUBMODULE_DEFINE(driver), - D_SUBMODULE_DEFINE(debugfs), - D_SUBMODULE_DEFINE(fw), - D_SUBMODULE_DEFINE(netdev), - D_SUBMODULE_DEFINE(rfkill), - D_SUBMODULE_DEFINE(rx), - D_SUBMODULE_DEFINE(tx), -}; -size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); #define __debugfs_register(prefix, name, parent) \ do { \ diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c index 5f98047e18c..e80a0b65a75 100644 --- a/drivers/net/wimax/i2400m/driver.c +++ b/drivers/net/wimax/i2400m/driver.c @@ -707,6 +707,22 @@ void i2400m_release(struct i2400m *i2400m) EXPORT_SYMBOL_GPL(i2400m_release); +/* + * Debug levels control; see debug.h + */ +struct d_level D_LEVEL[] = { + D_SUBMODULE_DEFINE(control), + D_SUBMODULE_DEFINE(driver), + D_SUBMODULE_DEFINE(debugfs), + D_SUBMODULE_DEFINE(fw), + D_SUBMODULE_DEFINE(netdev), + D_SUBMODULE_DEFINE(rfkill), + D_SUBMODULE_DEFINE(rx), + D_SUBMODULE_DEFINE(tx), +}; +size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); + + static int __init i2400m_driver_init(void) { diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index 8ef87356e08..a533ed60bb4 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c @@ -1028,6 +1028,8 @@ ath5k_setup_bands(struct ieee80211_hw *hw) * it's done by reseting the chip. To accomplish this we must * first cleanup any pending DMA, then restart stuff after a la * ath5k_init. + * + * Called with sc->lock. */ static int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) @@ -2814,11 +2816,17 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed) { struct ath5k_softc *sc = hw->priv; struct ieee80211_conf *conf = &hw->conf; + int ret; + + mutex_lock(&sc->lock); sc->bintval = conf->beacon_int; sc->power_level = conf->power_level; - return ath5k_chan_set(sc, conf->channel); + ret = ath5k_chan_set(sc, conf->channel); + + mutex_unlock(&sc->lock); + return ret; } static int diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 0dc8eed1640..b35c8813bef 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -1719,6 +1719,10 @@ static int iwl_read_ucode(struct iwl_priv *priv) priv->ucode_data_backup.len = data_size; iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || + !priv->ucode_data_backup.v_addr) + goto err_pci_alloc; + /* Initialization instructions and data */ if (init_size && init_data_size) { priv->ucode_init.len = init_size; diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c index 4e75e8e7fa9..78df281b297 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c +++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c @@ -285,7 +285,10 @@ static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel) ofdm_power = priv->channels[channel - 1].hw_value >> 4; cck_power = min(cck_power, (u8)11); - ofdm_power = min(ofdm_power, (u8)35); + if (ofdm_power > (u8)15) + ofdm_power = 25; + else + ofdm_power += 10; rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1); @@ -536,7 +539,10 @@ static void rtl8225z2_rf_set_tx_power(struct ieee80211_hw *dev, int channel) cck_power += priv->txpwr_base & 0xF; cck_power = min(cck_power, (u8)35); - ofdm_power = min(ofdm_power, (u8)15); + if (ofdm_power > (u8)15) + ofdm_power = 25; + else + ofdm_power += 10; ofdm_power += priv->txpwr_base >> 4; ofdm_power = min(ofdm_power, (u8)35); diff --git a/drivers/regulator/bq24022.c b/drivers/regulator/bq24022.c index 366565aba86..c175e38a4cd 100644 --- a/drivers/regulator/bq24022.c +++ b/drivers/regulator/bq24022.c @@ -152,11 +152,7 @@ static void __exit bq24022_exit(void) platform_driver_unregister(&bq24022_driver); } -/* - * make sure this is probed before gpio_vbus and pda_power, - * but after asic3 or other GPIO expander drivers. - */ -subsys_initcall(bq24022_init); +module_init(bq24022_init); module_exit(bq24022_exit); MODULE_AUTHOR("Philipp Zabel"); diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c index 7aa35248181..5056e23e441 100644 --- a/drivers/regulator/wm8350-regulator.c +++ b/drivers/regulator/wm8350-regulator.c @@ -1435,7 +1435,7 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink, struct platform_device *pdev; int ret; - if (lednum > ARRAY_SIZE(wm8350->pmic.led) || lednum < 0) { + if (lednum >= ARRAY_SIZE(wm8350->pmic.led) || lednum < 0) { dev_err(wm8350->dev, "Invalid LED index %d\n", lednum); return -ENODEV; } diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c index 3547558d2ca..324c74d2f66 100644 --- a/drivers/serial/jsm/jsm_tty.c +++ b/drivers/serial/jsm/jsm_tty.c @@ -161,6 +161,11 @@ static void jsm_tty_stop_rx(struct uart_port *port) channel->ch_bd->bd_ops->disable_receiver(channel); } +static void jsm_tty_enable_ms(struct uart_port *port) +{ + /* Nothing needed */ +} + static void jsm_tty_break(struct uart_port *port, int break_state) { unsigned long lock_flags; @@ -345,6 +350,7 @@ static struct uart_ops jsm_ops = { .start_tx = jsm_tty_start_tx, .send_xchar = jsm_tty_send_xchar, .stop_rx = jsm_tty_stop_rx, + .enable_ms = jsm_tty_enable_ms, .break_ctl = jsm_tty_break, .startup = jsm_tty_open, .shutdown = jsm_tty_close, diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 2ba8f95516a..efa4b363ce7 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -498,7 +498,7 @@ static ssize_t store_target_kb(struct sys_device *dev, if (!capable(CAP_SYS_ADMIN)) return -EPERM; - target_bytes = memparse(buf, &endchar); + target_bytes = simple_strtoull(buf, &endchar, 0) * 1024; balloon_set_new_target(target_bytes >> PAGE_SHIFT); @@ -508,8 +508,39 @@ static ssize_t store_target_kb(struct sys_device *dev, static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); + +static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr, + char *buf) +{ + return sprintf(buf, "%llu\n", + (u64)balloon_stats.target_pages << PAGE_SHIFT); +} + +static ssize_t store_target(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, + size_t count) +{ + char *endchar; + unsigned long long target_bytes; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + target_bytes = memparse(buf, &endchar); + + balloon_set_new_target(target_bytes >> PAGE_SHIFT); + + return count; +} + +static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR, + show_target, store_target); + + static struct sysdev_attribute *balloon_attrs[] = { &attr_target_kb, + &attr_target, }; static struct attribute *balloon_info_attrs[] = { diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 77ebc3c263d..549b0144da1 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c @@ -140,7 +140,6 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, iv = bip_vec_idx(bip, bip->bip_vcnt); BUG_ON(iv == NULL); - BUG_ON(iv->bv_page != NULL); iv->bv_page = page; iv->bv_len = len; @@ -465,7 +464,7 @@ static int bio_integrity_verify(struct bio *bio) if (ret) { kunmap_atomic(kaddr, KM_USER0); - break; + return ret; } sectors = bv->bv_len / bi->sector_size; @@ -493,18 +492,13 @@ static void bio_integrity_verify_fn(struct work_struct *work) struct bio_integrity_payload *bip = container_of(work, struct bio_integrity_payload, bip_work); struct bio *bio = bip->bip_bio; - int error = bip->bip_error; + int error; - if (bio_integrity_verify(bio)) { - clear_bit(BIO_UPTODATE, &bio->bi_flags); - error = -EIO; - } + error = bio_integrity_verify(bio); /* Restore original bio completion handler */ bio->bi_end_io = bip->bip_end_io; - - if (bio->bi_end_io) - bio->bi_end_io(bio, error); + bio_endio(bio, error); } /** @@ -525,7 +519,17 @@ void bio_integrity_endio(struct bio *bio, int error) BUG_ON(bip->bip_bio != bio); - bip->bip_error = error; + /* In case of an I/O error there is no point in verifying the + * integrity metadata. Restore original bio end_io handler + * and run it. + */ + if (error) { + bio->bi_end_io = bip->bip_end_io; + bio_endio(bio, error); + + return; + } + INIT_WORK(&bip->bip_work, bio_integrity_verify_fn); queue_work(kintegrityd_wq, &bip->bip_work); } diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 5235c67e759..c8f8d5904f5 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -538,6 +538,7 @@ static int dev_ifsioc(unsigned int fd, unsigned int cmd, unsigned long arg) * cannot be fixed without breaking all existing apps. */ case TUNSETIFF: + case TUNGETIFF: case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: @@ -1982,6 +1983,11 @@ COMPATIBLE_IOCTL(TUNSETNOCSUM) COMPATIBLE_IOCTL(TUNSETDEBUG) COMPATIBLE_IOCTL(TUNSETPERSIST) COMPATIBLE_IOCTL(TUNSETOWNER) +COMPATIBLE_IOCTL(TUNSETLINK) +COMPATIBLE_IOCTL(TUNSETGROUP) +COMPATIBLE_IOCTL(TUNGETFEATURES) +COMPATIBLE_IOCTL(TUNSETOFFLOAD) +COMPATIBLE_IOCTL(TUNSETTXFILTER) /* Big V */ COMPATIBLE_IOCTL(VT_SETMODE) COMPATIBLE_IOCTL(VT_GETMODE) @@ -2573,6 +2579,7 @@ HANDLE_IOCTL(SIOCGIFPFLAGS, dev_ifsioc) HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc) HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc) HANDLE_IOCTL(TUNSETIFF, dev_ifsioc) +HANDLE_IOCTL(TUNGETIFF, dev_ifsioc) HANDLE_IOCTL(SIOCETHTOOL, ethtool_ioctl) HANDLE_IOCTL(SIOCBONDENSLAVE, bond_ioctl) HANDLE_IOCTL(SIOCBONDRELEASE, bond_ioctl) diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 69a3d19ca9f..4db4ffa1eda 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -1358,7 +1358,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, struct fake_dirent *fde; blocksize = dir->i_sb->s_blocksize; - dxtrace(printk("Creating index\n")); + dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino)); retval = ext3_journal_get_write_access(handle, bh); if (retval) { ext3_std_error(dir->i_sb, retval); @@ -1367,6 +1367,19 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, } root = (struct dx_root *) bh->b_data; + /* The 0th block becomes the root, move the dirents out */ + fde = &root->dotdot; + de = (struct ext3_dir_entry_2 *)((char *)fde + + ext3_rec_len_from_disk(fde->rec_len)); + if ((char *) de >= (((char *) root) + blocksize)) { + ext3_error(dir->i_sb, __func__, + "invalid rec_len for '..' in inode %lu", + dir->i_ino); + brelse(bh); + return -EIO; + } + len = ((char *) root) + blocksize - (char *) de; + bh2 = ext3_append (handle, dir, &block, &retval); if (!(bh2)) { brelse(bh); @@ -1375,11 +1388,6 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, EXT3_I(dir)->i_flags |= EXT3_INDEX_FL; data1 = bh2->b_data; - /* The 0th block becomes the root, move the dirents out */ - fde = &root->dotdot; - de = (struct ext3_dir_entry_2 *)((char *)fde + - ext3_rec_len_from_disk(fde->rec_len)); - len = ((char *) root) + blocksize - (char *) de; memcpy (data1, de, len); de = (struct ext3_dir_entry_2 *) data1; top = data1 + len; diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 6bba06b09dd..9a50b8052dc 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -684,15 +684,15 @@ ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb) gdp = ext4_get_group_desc(sb, i, NULL); if (!gdp) continue; - desc_count += le16_to_cpu(gdp->bg_free_blocks_count); + desc_count += ext4_free_blks_count(sb, gdp); brelse(bitmap_bh); bitmap_bh = ext4_read_block_bitmap(sb, i); if (bitmap_bh == NULL) continue; x = ext4_count_free(bitmap_bh, sb->s_blocksize); - printk(KERN_DEBUG "group %lu: stored = %d, counted = %u\n", - i, le16_to_cpu(gdp->bg_free_blocks_count), x); + printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n", + i, ext4_free_blks_count(sb, gdp), x); bitmap_count += x; } brelse(bitmap_bh); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index c668e4377d7..aafc9eba1c2 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1206,8 +1206,11 @@ static inline void ext4_r_blocks_count_set(struct ext4_super_block *es, static inline loff_t ext4_isize(struct ext4_inode *raw_inode) { - return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) | - le32_to_cpu(raw_inode->i_size_lo); + if (S_ISREG(le16_to_cpu(raw_inode->i_mode))) + return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) | + le32_to_cpu(raw_inode->i_size_lo); + else + return (loff_t) le32_to_cpu(raw_inode->i_size_lo); } static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size) diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 54bf0623a9a..e2eab196875 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3048,7 +3048,7 @@ retry: WARN_ON(ret <= 0); printk(KERN_ERR "%s: ext4_ext_get_blocks " "returned error inode#%lu, block=%u, " - "max_blocks=%lu", __func__, + "max_blocks=%u", __func__, inode->i_ino, block, max_blocks); #endif ext4_mark_inode_dirty(handle, inode); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index a6444cee0c7..03ba20be132 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -360,9 +360,9 @@ static int ext4_block_to_path(struct inode *inode, final = ptrs; } else { ext4_warning(inode->i_sb, "ext4_block_to_path", - "block %lu > max", + "block %lu > max in inode %lu", i_block + direct_blocks + - indirect_blocks + double_blocks); + indirect_blocks + double_blocks, inode->i_ino); } if (boundary) *boundary = final - 1 - (i_block & (ptrs - 1)); @@ -2821,9 +2821,6 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block) filemap_write_and_wait(mapping); } - BUG_ON(!EXT4_JOURNAL(inode) && - EXT4_I(inode)->i_state & EXT4_STATE_JDATA); - if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { /* * This is a REALLY heavyweight approach, but the use of @@ -3622,7 +3619,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode, * block pointed to itself, it would have been detached when * the block was cleared. Check for this instead of OOPSing. */ - if (bh2jh(this_bh)) + if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) ext4_handle_dirty_metadata(handle, inode, this_bh); else ext4_error(inode->i_sb, __func__, diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 918aec0c8a1..deba54f6cbe 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3025,7 +3025,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, goto out_err; ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, - gdp->bg_free_blocks_count); + ext4_free_blks_count(sb, gdp)); err = ext4_journal_get_write_access(handle, gdp_bh); if (err) diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index fec0b4c2f5f..ba702bd7910 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1368,7 +1368,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, struct fake_dirent *fde; blocksize = dir->i_sb->s_blocksize; - dxtrace(printk(KERN_DEBUG "Creating index\n")); + dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino)); retval = ext4_journal_get_write_access(handle, bh); if (retval) { ext4_std_error(dir->i_sb, retval); @@ -1377,6 +1377,20 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, } root = (struct dx_root *) bh->b_data; + /* The 0th block becomes the root, move the dirents out */ + fde = &root->dotdot; + de = (struct ext4_dir_entry_2 *)((char *)fde + + ext4_rec_len_from_disk(fde->rec_len)); + if ((char *) de >= (((char *) root) + blocksize)) { + ext4_error(dir->i_sb, __func__, + "invalid rec_len for '..' in inode %lu", + dir->i_ino); + brelse(bh); + return -EIO; + } + len = ((char *) root) + blocksize - (char *) de; + + /* Allocate new block for the 0th block's dirents */ bh2 = ext4_append(handle, dir, &block, &retval); if (!(bh2)) { brelse(bh); @@ -1385,11 +1399,6 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, EXT4_I(dir)->i_flags |= EXT4_INDEX_FL; data1 = bh2->b_data; - /* The 0th block becomes the root, move the dirents out */ - fde = &root->dotdot; - de = (struct ext4_dir_entry_2 *)((char *)fde + - ext4_rec_len_from_disk(fde->rec_len)); - len = ((char *) root) + blocksize - (char *) de; memcpy (data1, de, len); de = (struct ext4_dir_entry_2 *) data1; top = data1 + len; diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index c328be5d688..c06886abd65 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -861,12 +861,13 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) gdp = (struct ext4_group_desc *)((char *)primary->b_data + gdb_off * EXT4_DESC_SIZE(sb)); + memset(gdp, 0, EXT4_DESC_SIZE(sb)); ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */ ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */ ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */ ext4_free_blks_set(sb, gdp, input->free_blocks_count); ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); - gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED); + gdp->bg_flags = cpu_to_le16(EXT4_BG_INODE_ZEROED); gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp); /* diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 56675306ed8..eb343008ede 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -37,10 +37,10 @@ #include <linux/proc_fs.h> #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/math64.h> #include <asm/uaccess.h> #include <asm/page.h> -#include <asm/div64.h> EXPORT_SYMBOL(jbd2_journal_start); EXPORT_SYMBOL(jbd2_journal_restart); @@ -846,8 +846,8 @@ static int jbd2_seq_info_show(struct seq_file *seq, void *v) jiffies_to_msecs(s->stats->u.run.rs_flushing / s->stats->ts_tid)); seq_printf(seq, " %ums logging transaction\n", jiffies_to_msecs(s->stats->u.run.rs_logging / s->stats->ts_tid)); - seq_printf(seq, " %luus average transaction commit time\n", - do_div(s->journal->j_average_commit_time, 1000)); + seq_printf(seq, " %lluus average transaction commit time\n", + div_u64(s->journal->j_average_commit_time, 1000)); seq_printf(seq, " %lu handles per transaction\n", s->stats->u.run.rs_handle_count / s->stats->ts_tid); seq_printf(seq, " %lu blocks per transaction\n", diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 12e9a2957ca..2124c063a7e 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -41,6 +41,7 @@ header-y += baycom.h header-y += bfs_fs.h header-y += blkpg.h header-y += bpqether.h +header-y += bsg.h header-y += can.h header-y += cdk.h header-y += chio.h diff --git a/include/linux/bio.h b/include/linux/bio.h index 18462c5b8ff..0942765cf8c 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -144,7 +144,7 @@ struct bio { * bit 1 -- rw-ahead when set * bit 2 -- barrier * Insert a serialization point in the IO queue, forcing previously - * submitted IO to be completed before this oen is issued. + * submitted IO to be completed before this one is issued. * bit 3 -- synchronous I/O hint: the block layer will unplug immediately * Note that this does NOT indicate that the IO itself is sync, just * that the block layer will not postpone issue of this IO by plugging. @@ -163,12 +163,33 @@ struct bio { #define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ #define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ #define BIO_RW_BARRIER 2 -#define BIO_RW_SYNC 3 -#define BIO_RW_META 4 -#define BIO_RW_DISCARD 5 -#define BIO_RW_FAILFAST_DEV 6 -#define BIO_RW_FAILFAST_TRANSPORT 7 -#define BIO_RW_FAILFAST_DRIVER 8 +#define BIO_RW_SYNCIO 3 +#define BIO_RW_UNPLUG 4 +#define BIO_RW_META 5 +#define BIO_RW_DISCARD 6 +#define BIO_RW_FAILFAST_DEV 7 +#define BIO_RW_FAILFAST_TRANSPORT 8 +#define BIO_RW_FAILFAST_DRIVER 9 + +#define BIO_RW_SYNC (BIO_RW_SYNCIO | BIO_RW_UNPLUG) + +#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag))) + +/* + * Old defines, these should eventually be replaced by direct usage of + * bio_rw_flagged() + */ +#define bio_barrier(bio) bio_rw_flagged(bio, BIO_RW_BARRIER) +#define bio_sync(bio) bio_rw_flagged(bio, BIO_RW_SYNCIO) +#define bio_unplug(bio) bio_rw_flagged(bio, BIO_RW_UNPLUG) +#define bio_failfast_dev(bio) bio_rw_flagged(bio, BIO_RW_FAILFAST_DEV) +#define bio_failfast_transport(bio) \ + bio_rw_flagged(bio, BIO_RW_FAILFAST_TRANSPORT) +#define bio_failfast_driver(bio) \ + bio_rw_flagged(bio, BIO_RW_FAILFAST_DRIVER) +#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD) +#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META) +#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD) /* * upper 16 bits of bi_rw define the io priority of this bio @@ -193,15 +214,6 @@ struct bio { #define bio_offset(bio) bio_iovec((bio))->bv_offset #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) #define bio_sectors(bio) ((bio)->bi_size >> 9) -#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER)) -#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC)) -#define bio_failfast_dev(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DEV)) -#define bio_failfast_transport(bio) \ - ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_TRANSPORT)) -#define bio_failfast_driver(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DRIVER)) -#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) -#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) -#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD)) #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) static inline unsigned int bio_cur_sectors(struct bio *bio) @@ -312,7 +324,6 @@ struct bio_integrity_payload { void *bip_buf; /* generated integrity data */ bio_end_io_t *bip_end_io; /* saved I/O completion fn */ - int bip_error; /* saved I/O error */ unsigned int bip_size; unsigned short bip_pool; /* pool the ivec came from */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 044467ef7b1..d08c4b8219a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -108,6 +108,7 @@ enum rq_flag_bits { __REQ_RW_META, /* metadata io request */ __REQ_COPY_USER, /* contains copies of user pages */ __REQ_INTEGRITY, /* integrity metadata has been remapped */ + __REQ_UNPLUG, /* unplug queue on submission */ __REQ_NR_BITS, /* stops here */ }; @@ -134,6 +135,7 @@ enum rq_flag_bits { #define REQ_RW_META (1 << __REQ_RW_META) #define REQ_COPY_USER (1 << __REQ_COPY_USER) #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) +#define REQ_UNPLUG (1 << __REQ_UNPLUG) #define BLK_MAX_CDB 16 @@ -449,6 +451,11 @@ struct request_queue #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ +#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ + +#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ + (1 << QUEUE_FLAG_CLUSTER) | \ + 1 << QUEUE_FLAG_STACKABLE) static inline int queue_is_locked(struct request_queue *q) { @@ -565,6 +572,7 @@ enum { #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) +#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) #define blk_queue_flushing(q) ((q)->ordseq) #define blk_queue_stackable(q) \ test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index b45109c61fb..b28b37eb11c 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -308,7 +308,8 @@ void buffer_assertion_failure(struct buffer_head *bh); int val = (expr); \ if (!val) { \ printk(KERN_ERR \ - "EXT3-fs unexpected failure: %s;\n",# expr); \ + "JBD2 unexpected failure: %s: %s;\n", \ + __func__, #expr); \ printk(KERN_ERR why "\n"); \ } \ val; \ diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index f44bb5c77a7..d0a043153cc 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -182,7 +182,7 @@ static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) size = 2048; if (nr_pcpus >= 32) size = 4096; - if (sizeof(rwlock_t) != 0) { + if (sizeof(spinlock_t) != 0) { #ifdef CONFIG_NUMA if (size * sizeof(spinlock_t) > PAGE_SIZE) hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t)); diff --git a/ipc/shm.c b/ipc/shm.c index a9e09ad2263..c0a021f7f41 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -368,14 +368,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) file = hugetlb_file_setup(name, size); shp->mlock_user = current_user(); } else { - int acctflag = VM_ACCOUNT; + int acctflag = 0; /* * Do not allow no accounting for OVERCOMMIT_NEVER, even * if it's asked for. */ if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) - acctflag = 0; + acctflag = VM_NORESERVE; file = shmem_file_setup(name, size, acctflag); } error = PTR_ERR(file); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 2f32969c09d..7dcf6e9f2b0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -17,6 +17,7 @@ #include <linux/clocksource.h> #include <linux/kallsyms.h> #include <linux/seq_file.h> +#include <linux/suspend.h> #include <linux/debugfs.h> #include <linux/hardirq.h> #include <linux/kthread.h> @@ -1965,6 +1966,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, #ifdef CONFIG_FUNCTION_GRAPH_TRACER static atomic_t ftrace_graph_active; +static struct notifier_block ftrace_suspend_notifier; int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) { @@ -2043,6 +2045,27 @@ static int start_graph_tracing(void) return ret; } +/* + * Hibernation protection. + * The state of the current task is too much unstable during + * suspend/restore to disk. We want to protect against that. + */ +static int +ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, + void *unused) +{ + switch (state) { + case PM_HIBERNATION_PREPARE: + pause_graph_tracing(); + break; + + case PM_POST_HIBERNATION: + unpause_graph_tracing(); + break; + } + return NOTIFY_DONE; +} + int register_ftrace_graph(trace_func_graph_ret_t retfunc, trace_func_graph_ent_t entryfunc) { @@ -2050,6 +2073,9 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, mutex_lock(&ftrace_sysctl_lock); + ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; + register_pm_notifier(&ftrace_suspend_notifier); + atomic_inc(&ftrace_graph_active); ret = start_graph_tracing(); if (ret) { @@ -2075,6 +2101,7 @@ void unregister_ftrace_graph(void) ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; ftrace_graph_entry = ftrace_graph_entry_stub; ftrace_shutdown(FTRACE_STOP_FUNC_RET); + unregister_pm_notifier(&ftrace_suspend_notifier); mutex_unlock(&ftrace_sysctl_lock); } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 8b0daf0662e..bd38c5cfd8a 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -246,7 +246,7 @@ static inline int test_time_stamp(u64 delta) return 0; } -#define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page)) +#define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data)) /* * head_page == tail_page && head == tail then buffer is empty. @@ -1025,12 +1025,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, } if (next_page == head_page) { - if (!(buffer->flags & RB_FL_OVERWRITE)) { - /* reset write */ - if (tail <= BUF_PAGE_SIZE) - local_set(&tail_page->write, tail); + if (!(buffer->flags & RB_FL_OVERWRITE)) goto out_unlock; - } /* tail_page has not moved yet? */ if (tail_page == cpu_buffer->tail_page) { @@ -1105,6 +1101,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, return event; out_unlock: + /* reset write */ + if (tail <= BUF_PAGE_SIZE) + local_set(&tail_page->write, tail); + __raw_spin_unlock(&cpu_buffer->lock); local_irq_restore(flags); return NULL; @@ -2174,6 +2174,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) cpu_buffer->overrun = 0; cpu_buffer->entries = 0; + + cpu_buffer->write_stamp = 0; + cpu_buffer->read_stamp = 0; } /** diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c580233add9..17bb88d86ac 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -40,7 +40,7 @@ #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) -unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; +unsigned long __read_mostly tracing_max_latency; unsigned long __read_mostly tracing_thresh; /* @@ -3736,7 +3736,7 @@ static struct notifier_block trace_die_notifier = { * it if we decide to change what log level the ftrace dump * should be at. */ -#define KERN_TRACE KERN_INFO +#define KERN_TRACE KERN_EMERG static void trace_printk_seq(struct trace_seq *s) @@ -3770,6 +3770,7 @@ void ftrace_dump(void) dump_ran = 1; /* No turning back! */ + tracing_off(); ftrace_kill(); for_each_tracing_cpu(cpu) { diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 7c2e326bbc8..62a78d94353 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -380,6 +380,7 @@ static void stop_irqsoff_tracer(struct trace_array *tr) static void __irqsoff_tracer_init(struct trace_array *tr) { + tracing_max_latency = 0; irqsoff_trace = tr; /* make sure that the tracer is visible */ smp_wmb(); diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 43586b689e3..42ae1e77b6b 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -333,6 +333,7 @@ static void stop_wakeup_tracer(struct trace_array *tr) static int wakeup_tracer_init(struct trace_array *tr) { + tracing_max_latency = 0; wakeup_trace = tr; start_wakeup_tracer(tr); return 0; diff --git a/mm/mmap.c b/mm/mmap.c index d3fa10a726c..214b6a258ee 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -658,6 +658,9 @@ again: remove_next = 1 + (end > next->vm_end); validate_mm(mm); } +/* Flags that can be inherited from an existing mapping when merging */ +#define VM_MERGEABLE_FLAGS (VM_CAN_NONLINEAR) + /* * If the vma has a ->close operation then the driver probably needs to release * per-vma resources, so we don't attempt to merge those. @@ -665,7 +668,7 @@ again: remove_next = 1 + (end > next->vm_end); static inline int is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags) { - if (vma->vm_flags != vm_flags) + if ((vma->vm_flags ^ vm_flags) & ~VM_MERGEABLE_FLAGS) return 0; if (vma->vm_file != file) return 0; @@ -1087,6 +1090,15 @@ int vma_wants_writenotify(struct vm_area_struct *vma) mapping_cap_account_dirty(vma->vm_file->f_mapping); } +/* + * We account for memory if it's a private writeable mapping, + * and VM_NORESERVE wasn't set. + */ +static inline int accountable_mapping(unsigned int vm_flags) +{ + return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; +} + unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, unsigned long flags, unsigned int vm_flags, unsigned long pgoff, @@ -1114,23 +1126,24 @@ munmap_back: if (!may_expand_vm(mm, len >> PAGE_SHIFT)) return -ENOMEM; - if (flags & MAP_NORESERVE) + /* + * Set 'VM_NORESERVE' if we should not account for the + * memory use of this mapping. We only honor MAP_NORESERVE + * if we're allowed to overcommit memory. + */ + if ((flags & MAP_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) + vm_flags |= VM_NORESERVE; + if (!accountable) vm_flags |= VM_NORESERVE; - if (accountable && (!(flags & MAP_NORESERVE) || - sysctl_overcommit_memory == OVERCOMMIT_NEVER)) { - if (vm_flags & VM_SHARED) { - /* Check memory availability in shmem_file_setup? */ - vm_flags |= VM_ACCOUNT; - } else if (vm_flags & VM_WRITE) { - /* - * Private writable mapping: check memory availability - */ - charged = len >> PAGE_SHIFT; - if (security_vm_enough_memory(charged)) - return -ENOMEM; - vm_flags |= VM_ACCOUNT; - } + /* + * Private writable mapping: check memory availability + */ + if (accountable_mapping(vm_flags)) { + charged = len >> PAGE_SHIFT; + if (security_vm_enough_memory(charged)) + return -ENOMEM; + vm_flags |= VM_ACCOUNT; } /* @@ -1181,14 +1194,6 @@ munmap_back: goto free_vma; } - /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform - * shmem_zero_setup (perhaps called through /dev/zero's ->mmap) - * that memory reservation must be checked; but that reservation - * belongs to shared memory object, not to vma: so now clear it. - */ - if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT)) - vma->vm_flags &= ~VM_ACCOUNT; - /* Can addr have changed?? * * Answer: Yes, several device drivers can do it in their diff --git a/mm/shmem.c b/mm/shmem.c index 5d0de96c978..19d566ccdee 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2628,7 +2628,7 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) goto close_file; #ifdef CONFIG_SHMEM - SHMEM_I(inode)->flags = flags & VM_ACCOUNT; + SHMEM_I(inode)->flags = (flags & VM_NORESERVE) ? 0 : VM_ACCOUNT; #endif d_instantiate(dentry, inode); inode->i_size = size; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2e5f2ca3bdc..da74b844f4e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2212,10 +2212,10 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, return 0; next_skb: - block_limit = skb_headlen(st->cur_skb); + block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; if (abs_offset < block_limit) { - *data = st->cur_skb->data + abs_offset; + *data = st->cur_skb->data + (abs_offset - st->stepped_offset); return block_limit - abs_offset; } @@ -2250,13 +2250,14 @@ next_skb: st->frag_data = NULL; } - if (st->cur_skb->next) { - st->cur_skb = st->cur_skb->next; + if (st->root_skb == st->cur_skb && + skb_shinfo(st->root_skb)->frag_list) { + st->cur_skb = skb_shinfo(st->root_skb)->frag_list; st->frag_idx = 0; goto next_skb; - } else if (st->root_skb == st->cur_skb && - skb_shinfo(st->root_skb)->frag_list) { - st->cur_skb = skb_shinfo(st->root_skb)->frag_list; + } else if (st->cur_skb->next) { + st->cur_skb = st->cur_skb->next; + st->frag_idx = 0; goto next_skb; } diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 42a0f3dd3fd..d722013c1ca 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -1268,6 +1268,9 @@ __be32 __init root_nfs_parse_addr(char *name) static int __init ip_auto_config(void) { __be32 addr; +#ifdef IPCONFIG_DYNAMIC + int retries = CONF_OPEN_RETRIES; +#endif #ifdef CONFIG_PROC_FS proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); @@ -1304,9 +1307,6 @@ static int __init ip_auto_config(void) #endif ic_first_dev->next) { #ifdef IPCONFIG_DYNAMIC - - int retries = CONF_OPEN_RETRIES; - if (ic_dynamic() < 0) { ic_close_devs(); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0cd71b84e48..76b148bcb0d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -524,7 +524,8 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, struct tcp_splice_state *tss = rd_desc->arg.data; int ret; - ret = skb_splice_bits(skb, offset, tss->pipe, rd_desc->count, tss->flags); + ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len), + tss->flags); if (ret > 0) rd_desc->count -= ret; return ret; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index cf5ab0581eb..b7faffe5c02 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -120,8 +120,11 @@ EXPORT_SYMBOL(sysctl_udp_wmem_min); atomic_t udp_memory_allocated; EXPORT_SYMBOL(udp_memory_allocated); +#define PORTS_PER_CHAIN (65536 / UDP_HTABLE_SIZE) + static int udp_lib_lport_inuse(struct net *net, __u16 num, const struct udp_hslot *hslot, + unsigned long *bitmap, struct sock *sk, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2)) @@ -132,12 +135,17 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, sk_nulls_for_each(sk2, node, &hslot->head) if (net_eq(sock_net(sk2), net) && sk2 != sk && - sk2->sk_hash == num && + (bitmap || sk2->sk_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && - (*saddr_comp)(sk, sk2)) - return 1; + (*saddr_comp)(sk, sk2)) { + if (bitmap) + __set_bit(sk2->sk_hash / UDP_HTABLE_SIZE, + bitmap); + else + return 1; + } return 0; } @@ -160,32 +168,47 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, if (!snum) { int low, high, remaining; unsigned rand; - unsigned short first; + unsigned short first, last; + DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); inet_get_local_port_range(&low, &high); remaining = (high - low) + 1; rand = net_random(); - snum = first = rand % remaining + low; - rand |= 1; - for (;;) { - hslot = &udptable->hash[udp_hashfn(net, snum)]; + first = (((u64)rand * remaining) >> 32) + low; + /* + * force rand to be an odd multiple of UDP_HTABLE_SIZE + */ + rand = (rand | 1) * UDP_HTABLE_SIZE; + for (last = first + UDP_HTABLE_SIZE; first != last; first++) { + hslot = &udptable->hash[udp_hashfn(net, first)]; + bitmap_zero(bitmap, PORTS_PER_CHAIN); spin_lock_bh(&hslot->lock); - if (!udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) - break; - spin_unlock_bh(&hslot->lock); + udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, + saddr_comp); + + snum = first; + /* + * Iterate on all possible values of snum for this hash. + * Using steps of an odd multiple of UDP_HTABLE_SIZE + * give us randomization and full range coverage. + */ do { - snum = snum + rand; - } while (snum < low || snum > high); - if (snum == first) - goto fail; + if (low <= snum && snum <= high && + !test_bit(snum / UDP_HTABLE_SIZE, bitmap)) + goto found; + snum += rand; + } while (snum != first); + spin_unlock_bh(&hslot->lock); } + goto fail; } else { hslot = &udptable->hash[udp_hashfn(net, snum)]; spin_lock_bh(&hslot->lock); - if (udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) + if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp)) goto fail_unlock; } +found: inet_sk(sk)->num = snum; sk->sk_hash = snum; if (sk_unhashed(sk)) { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e92ad8455c6..f9afb452249 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4250,7 +4250,7 @@ static struct addrconf_sysctl_table .procname = "mc_forwarding", .data = &ipv6_devconf.mc_forwarding, .maxlen = sizeof(int), - .mode = 0644, + .mode = 0444, .proc_handler = proc_dointvec, }, #endif diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 4f433847d95..36dff880718 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -443,10 +443,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6)) goto relookup_failed; - if (ip6_dst_lookup(sk, &dst2, &fl)) + if (ip6_dst_lookup(sk, &dst2, &fl2)) goto relookup_failed; - err = xfrm_lookup(net, &dst2, &fl, sk, XFRM_LOOKUP_ICMP); + err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP); switch (err) { case 0: dst_release(dst); diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 936f48946e2..f171e8dbac9 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -255,6 +255,7 @@ int ip6_mc_input(struct sk_buff *skb) * IPv6 multicast router mode is now supported ;) */ if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && + !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) && likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { /* * Okay, we try to forward - split and duplicate @@ -316,7 +317,6 @@ int ip6_mc_input(struct sk_buff *skb) } if (skb2) { - skb2->dev = skb2->dst->dev; ip6_mr_input(skb2); } } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 3c51b2d827f..228be551e9c 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -48,6 +48,7 @@ #include <linux/pim.h> #include <net/addrconf.h> #include <linux/netfilter_ipv6.h> +#include <net/ip6_checksum.h> /* Big lock, protecting vif table, mrt cache and mroute socket state. Note that the changes are semaphored via rtnl_lock. @@ -365,7 +366,9 @@ static int pim6_rcv(struct sk_buff *skb) pim = (struct pimreghdr *)skb_transport_header(skb); if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) || (pim->flags & PIM_NULL_REGISTER) || - (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && + (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, + sizeof(*pim), IPPROTO_PIM, + csum_partial((void *)pim, sizeof(*pim), 0)) && csum_fold(skb_checksum(skb, 0, skb->len, 0)))) goto drop; @@ -392,7 +395,7 @@ static int pim6_rcv(struct sk_buff *skb) skb_pull(skb, (u8 *)encap - skb->data); skb_reset_network_header(skb); skb->dev = reg_dev; - skb->protocol = htons(ETH_P_IP); + skb->protocol = htons(ETH_P_IPV6); skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; dst_release(skb->dst); @@ -481,6 +484,7 @@ static int mif6_delete(struct net *net, int vifi) { struct mif_device *v; struct net_device *dev; + struct inet6_dev *in6_dev; if (vifi < 0 || vifi >= net->ipv6.maxvif) return -EADDRNOTAVAIL; @@ -513,6 +517,10 @@ static int mif6_delete(struct net *net, int vifi) dev_set_allmulti(dev, -1); + in6_dev = __in6_dev_get(dev); + if (in6_dev) + in6_dev->cnf.mc_forwarding--; + if (v->flags & MIFF_REGISTER) unregister_netdevice(dev); @@ -622,6 +630,7 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) int vifi = vifc->mif6c_mifi; struct mif_device *v = &net->ipv6.vif6_table[vifi]; struct net_device *dev; + struct inet6_dev *in6_dev; int err; /* Is vif busy ? */ @@ -662,6 +671,10 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) return -EINVAL; } + in6_dev = __in6_dev_get(dev); + if (in6_dev) + in6_dev->cnf.mc_forwarding++; + /* * Fill in the VIF structures */ @@ -838,8 +851,6 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, skb->dst = dst_clone(pkt->dst); skb->ip_summed = CHECKSUM_UNNECESSARY; - - skb_pull(skb, sizeof(struct ipv6hdr)); } if (net->ipv6.mroute6_sk == NULL) { @@ -1222,8 +1233,10 @@ static int ip6mr_sk_init(struct sock *sk) rtnl_lock(); write_lock_bh(&mrt_lock); - if (likely(net->ipv6.mroute6_sk == NULL)) + if (likely(net->ipv6.mroute6_sk == NULL)) { net->ipv6.mroute6_sk = sk; + net->ipv6.devconf_all->mc_forwarding++; + } else err = -EADDRINUSE; write_unlock_bh(&mrt_lock); @@ -1242,6 +1255,7 @@ int ip6mr_sk_done(struct sock *sk) if (sk == net->ipv6.mroute6_sk) { write_lock_bh(&mrt_lock); net->ipv6.mroute6_sk = NULL; + net->ipv6.devconf_all->mc_forwarding--; write_unlock_bh(&mrt_lock); mroute_clean_tables(net); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c4a59824ac2..9c574235c90 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -794,7 +794,7 @@ void ip6_route_input(struct sk_buff *skb) .proto = iph->nexthdr, }; - if (rt6_need_strict(&iph->daddr)) + if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG) flags |= RT6_LOOKUP_F_IFACE; skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5f94db2f3e9..9454d4ae46d 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -77,6 +77,7 @@ #include <linux/poll.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/mutex.h> #ifdef CONFIG_INET #include <net/inet_common.h> @@ -175,6 +176,7 @@ struct packet_sock { #endif struct packet_type prot_hook; spinlock_t bind_lock; + struct mutex pg_vec_lock; unsigned int running:1, /* prot_hook is attached*/ auxdata:1, origdev:1; @@ -1069,6 +1071,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol) */ spin_lock_init(&po->bind_lock); + mutex_init(&po->pg_vec_lock); po->prot_hook.func = packet_rcv; if (sock->type == SOCK_PACKET) @@ -1865,6 +1868,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing synchronize_net(); err = -EBUSY; + mutex_lock(&po->pg_vec_lock); if (closing || atomic_read(&po->mapped) == 0) { err = 0; #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; }) @@ -1886,6 +1890,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing if (atomic_read(&po->mapped)) printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); } + mutex_unlock(&po->pg_vec_lock); spin_lock(&po->bind_lock); if (was_running && !po->running) { @@ -1918,7 +1923,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st size = vma->vm_end - vma->vm_start; - lock_sock(sk); + mutex_lock(&po->pg_vec_lock); if (po->pg_vec == NULL) goto out; if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE) @@ -1941,7 +1946,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st err = 0; out: - release_sock(sk); + mutex_unlock(&po->pg_vec_lock); return err; } #endif diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c index 87cf4430079..94d216a4640 100644 --- a/net/wimax/debugfs.c +++ b/net/wimax/debugfs.c @@ -28,17 +28,6 @@ #include "debug-levels.h" -/* Debug framework control of debug levels */ -struct d_level D_LEVEL[] = { - D_SUBMODULE_DEFINE(debugfs), - D_SUBMODULE_DEFINE(id_table), - D_SUBMODULE_DEFINE(op_msg), - D_SUBMODULE_DEFINE(op_reset), - D_SUBMODULE_DEFINE(op_rfkill), - D_SUBMODULE_DEFINE(stack), -}; -size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); - #define __debugfs_register(prefix, name, parent) \ do { \ result = d_level_register_debugfs(prefix, name, parent); \ diff --git a/net/wimax/stack.c b/net/wimax/stack.c index d4da92f8981..3869c032788 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c @@ -516,6 +516,19 @@ void wimax_dev_rm(struct wimax_dev *wimax_dev) } EXPORT_SYMBOL_GPL(wimax_dev_rm); + +/* Debug framework control of debug levels */ +struct d_level D_LEVEL[] = { + D_SUBMODULE_DEFINE(debugfs), + D_SUBMODULE_DEFINE(id_table), + D_SUBMODULE_DEFINE(op_msg), + D_SUBMODULE_DEFINE(op_reset), + D_SUBMODULE_DEFINE(op_rfkill), + D_SUBMODULE_DEFINE(stack), +}; +size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); + + struct genl_family wimax_gnl_family = { .id = GENL_ID_GENERATE, .name = "WiMAX", diff --git a/net/wireless/reg.c b/net/wireless/reg.c index bc494cef210..85c9034c59b 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -498,6 +498,7 @@ static struct ieee80211_regdomain *country_ie_2_rd( * calculate the number of reg rules we will need. We will need one * for each channel subband */ while (country_ie_len >= 3) { + int end_channel = 0; struct ieee80211_country_ie_triplet *triplet = (struct ieee80211_country_ie_triplet *) country_ie; int cur_sub_max_channel = 0, cur_channel = 0; @@ -509,9 +510,25 @@ static struct ieee80211_regdomain *country_ie_2_rd( continue; } + /* 2 GHz */ + if (triplet->chans.first_channel <= 14) + end_channel = triplet->chans.first_channel + + triplet->chans.num_channels; + else + /* + * 5 GHz -- For example in country IEs if the first + * channel given is 36 and the number of channels is 4 + * then the individual channel numbers defined for the + * 5 GHz PHY by these parameters are: 36, 40, 44, and 48 + * and not 36, 37, 38, 39. + * + * See: http://tinyurl.com/11d-clarification + */ + end_channel = triplet->chans.first_channel + + (4 * (triplet->chans.num_channels - 1)); + cur_channel = triplet->chans.first_channel; - cur_sub_max_channel = ieee80211_channel_to_frequency( - cur_channel + triplet->chans.num_channels); + cur_sub_max_channel = end_channel; /* Basic sanity check */ if (cur_sub_max_channel < cur_channel) @@ -590,15 +607,6 @@ static struct ieee80211_regdomain *country_ie_2_rd( end_channel = triplet->chans.first_channel + triplet->chans.num_channels; else - /* - * 5 GHz -- For example in country IEs if the first - * channel given is 36 and the number of channels is 4 - * then the individual channel numbers defined for the - * 5 GHz PHY by these parameters are: 36, 40, 44, and 48 - * and not 36, 37, 38, 39. - * - * See: http://tinyurl.com/11d-clarification - */ end_channel = triplet->chans.first_channel + (4 * (triplet->chans.num_channels - 1)); @@ -1276,7 +1284,7 @@ static void reg_country_ie_process_debug( if (intersected_rd) { printk(KERN_DEBUG "cfg80211: We intersect both of these " "and get:\n"); - print_regdomain_info(rd); + print_regdomain_info(intersected_rd); return; } printk(KERN_DEBUG "cfg80211: Intersection between both failed\n"); |