diff options
355 files changed, 3645 insertions, 2438 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index aa9bbd1c0e2..eeecb504494 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2813,38 +2813,19 @@ F: Documentation/gpio.txt F: drivers/gpio/ F: include/linux/gpio* +GRE DEMULTIPLEXER DRIVER +M: Dmitry Kozlov <xeb@mail.ru> +L: netdev@vger.kernel.org +S: Maintained +F: net/ipv4/gre.c +F: include/net/gre.h + GRETH 10/100/1G Ethernet MAC device driver M: Kristoffer Glembo <kristoffer@gaisler.com> L: netdev@vger.kernel.org S: Maintained F: drivers/net/greth* -HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER -M: Frank Seidel <frank@f-seidel.de> -L: platform-driver-x86@vger.kernel.org -W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/ -S: Maintained -F: drivers/platform/x86/hdaps.c - -HWPOISON MEMORY FAILURE HANDLING -M: Andi Kleen <andi@firstfloor.org> -L: linux-mm@kvack.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison -S: Maintained -F: mm/memory-failure.c -F: mm/hwpoison-inject.c - -HYPERVISOR VIRTUAL CONSOLE DRIVER -L: linuxppc-dev@lists.ozlabs.org -S: Odd Fixes -F: drivers/tty/hvc/ - -iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER -M: Peter Jones <pjones@redhat.com> -M: Konrad Rzeszutek Wilk <konrad@kernel.org> -S: Maintained -F: drivers/firmware/iscsi_ibft* - GSPCA FINEPIX SUBDRIVER M: Frank Zago <frank@zago.net> L: linux-media@vger.kernel.org @@ -2895,6 +2876,26 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git S: Maintained F: drivers/media/video/gspca/ +HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER +M: Frank Seidel <frank@f-seidel.de> +L: platform-driver-x86@vger.kernel.org +W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/ +S: Maintained +F: drivers/platform/x86/hdaps.c + +HWPOISON MEMORY FAILURE HANDLING +M: Andi Kleen <andi@firstfloor.org> +L: linux-mm@kvack.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison +S: Maintained +F: mm/memory-failure.c +F: mm/hwpoison-inject.c + +HYPERVISOR VIRTUAL CONSOLE DRIVER +L: linuxppc-dev@lists.ozlabs.org +S: Odd Fixes +F: drivers/tty/hvc/ + HARDWARE MONITORING M: Jean Delvare <khali@linux-fr.org> M: Guenter Roeck <guenter.roeck@ericsson.com> @@ -3478,6 +3479,12 @@ F: Documentation/isapnp.txt F: drivers/pnp/isapnp/ F: include/linux/isapnp.h +iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER +M: Peter Jones <pjones@redhat.com> +M: Konrad Rzeszutek Wilk <konrad@kernel.org> +S: Maintained +F: drivers/firmware/iscsi_ibft* + ISCSI M: Mike Christie <michaelc@cs.wisc.edu> L: open-iscsi@googlegroups.com @@ -4989,6 +4996,13 @@ F: Documentation/pps/ F: drivers/pps/ F: include/linux/pps*.h +PPTP DRIVER +M: Dmitry Kozlov <xeb@mail.ru> +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/pptp.c +W: http://sourceforge.net/projects/accel-pptp + PREEMPTIBLE KERNEL M: Robert Love <rml@tech9.net> L: kpreempt-tech@lists.sourceforge.net @@ -7024,20 +7038,6 @@ M: "Maciej W. Rozycki" <macro@linux-mips.org> S: Maintained F: drivers/tty/serial/zs.* -GRE DEMULTIPLEXER DRIVER -M: Dmitry Kozlov <xeb@mail.ru> -L: netdev@vger.kernel.org -S: Maintained -F: net/ipv4/gre.c -F: include/net/gre.h - -PPTP DRIVER -M: Dmitry Kozlov <xeb@mail.ru> -L: netdev@vger.kernel.org -S: Maintained -F: drivers/net/pptp.c -W: http://sourceforge.net/projects/accel-pptp - THE REST M: Linus Torvalds <torvalds@linux-foundation.org> L: linux-kernel@vger.kernel.org @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 39 -EXTRAVERSION = -rc6 +EXTRAVERSION = NAME = Flesh-Eating Bats with Fangs # *DOCUMENTATION* diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index 058937bf5a7..b1834166922 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h @@ -452,10 +452,14 @@ #define __NR_fanotify_init 494 #define __NR_fanotify_mark 495 #define __NR_prlimit64 496 +#define __NR_name_to_handle_at 497 +#define __NR_open_by_handle_at 498 +#define __NR_clock_adjtime 499 +#define __NR_syncfs 500 #ifdef __KERNEL__ -#define NR_SYSCALLS 497 +#define NR_SYSCALLS 501 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index a6a1de9db16..15f999d41c7 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S @@ -498,23 +498,27 @@ sys_call_table: .quad sys_ni_syscall /* sys_timerfd */ .quad sys_eventfd .quad sys_recvmmsg - .quad sys_fallocate /* 480 */ + .quad sys_fallocate /* 480 */ .quad sys_timerfd_create .quad sys_timerfd_settime .quad sys_timerfd_gettime .quad sys_signalfd4 - .quad sys_eventfd2 /* 485 */ + .quad sys_eventfd2 /* 485 */ .quad sys_epoll_create1 .quad sys_dup3 .quad sys_pipe2 .quad sys_inotify_init1 - .quad sys_preadv /* 490 */ + .quad sys_preadv /* 490 */ .quad sys_pwritev .quad sys_rt_tgsigqueueinfo .quad sys_perf_event_open .quad sys_fanotify_init - .quad sys_fanotify_mark /* 495 */ + .quad sys_fanotify_mark /* 495 */ .quad sys_prlimit64 + .quad sys_name_to_handle_at + .quad sys_open_by_handle_at + .quad sys_clock_adjtime + .quad sys_syncfs /* 500 */ .size sys_call_table, . - sys_call_table .type sys_call_table, @object diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 918e8e0b72f..818e74ed45d 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -375,8 +375,7 @@ static struct clocksource clocksource_rpcc = { static inline void register_rpcc_clocksource(long cycle_freq) { - clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4); - clocksource_register(&clocksource_rpcc); + clocksource_register_hz(&clocksource_rpcc, cycle_freq); } #else /* !CONFIG_SMP */ static inline void register_rpcc_clocksource(long cycle_freq) diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 8ebbb511c78..0c6852d9350 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -74,7 +74,7 @@ ZTEXTADDR := $(CONFIG_ZBOOT_ROM_TEXT) ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS) else ZTEXTADDR := 0 -ZBSSADDR := ALIGN(4) +ZBSSADDR := ALIGN(8) endif SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index adf583cd0c3..49f5b2eaaa8 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -179,15 +179,14 @@ not_angel: bl cache_on restart: adr r0, LC0 - ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12} - ldr sp, [r0, #32] + ldmia r0, {r1, r2, r3, r6, r9, r11, r12} + ldr sp, [r0, #28] /* * We might be running at a different address. We need * to fix up various pointers. */ sub r0, r0, r1 @ calculate the delta offset - add r5, r5, r0 @ _start add r6, r6, r0 @ _edata #ifndef CONFIG_ZBOOT_ROM @@ -206,31 +205,40 @@ restart: adr r0, LC0 /* * Check to see if we will overwrite ourselves. * r4 = final kernel address - * r5 = start of this image * r9 = size of decompressed image * r10 = end of this image, including bss/stack/malloc space if non XIP * We basically want: - * r4 >= r10 -> OK - * r4 + image length <= r5 -> OK + * r4 - 16k page directory >= r10 -> OK + * r4 + image length <= current position (pc) -> OK */ + add r10, r10, #16384 cmp r4, r10 bhs wont_overwrite add r10, r4, r9 - cmp r10, r5 + ARM( cmp r10, pc ) + THUMB( mov lr, pc ) + THUMB( cmp r10, lr ) bls wont_overwrite /* * Relocate ourselves past the end of the decompressed kernel. - * r5 = start of this image * r6 = _edata * r10 = end of the decompressed kernel * Because we always copy ahead, we need to do it from the end and go * backward in case the source and destination overlap. */ - /* Round up to next 256-byte boundary. */ - add r10, r10, #256 + /* + * Bump to the next 256-byte boundary with the size of + * the relocation code added. This avoids overwriting + * ourself when the offset is small. + */ + add r10, r10, #((reloc_code_end - restart + 256) & ~255) bic r10, r10, #255 + /* Get start of code we want to copy and align it down. */ + adr r5, restart + bic r5, r5, #31 + sub r9, r6, r5 @ size to copy add r9, r9, #31 @ rounded up to a multiple bic r9, r9, #31 @ ... of 32 bytes @@ -245,6 +253,11 @@ restart: adr r0, LC0 /* Preserve offset to relocated code. */ sub r6, r9, r6 +#ifndef CONFIG_ZBOOT_ROM + /* cache_clean_flush may use the stack, so relocate it */ + add sp, sp, r6 +#endif + bl cache_clean_flush adr r0, BSYM(restart) @@ -333,7 +346,6 @@ not_relocated: mov r0, #0 LC0: .word LC0 @ r1 .word __bss_start @ r2 .word _end @ r3 - .word _start @ r5 .word _edata @ r6 .word _image_size @ r9 .word _got_start @ r11 @@ -1062,6 +1074,7 @@ memdump: mov r12, r0 #endif .ltorg +reloc_code_end: .align .section ".stack", "aw", %nobits diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in index 5309909d728..ea80abe7884 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.in +++ b/arch/arm/boot/compressed/vmlinux.lds.in @@ -54,6 +54,7 @@ SECTIONS .bss : { *(.bss) } _end = .; + . = ALIGN(8); /* the stack must be 64-bit aligned */ .stack : { *(.stack) } .stab 0 : { *(.stab) } diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 885be097769..832888d0c20 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -159,7 +159,7 @@ extern unsigned int user_debug; #include <mach/barriers.h> #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) #define mb() do { dsb(); outer_sync(); } while (0) -#define rmb() dmb() +#define rmb() dsb() #define wmb() mb() #else #include <asm/memory.h> diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 2bf27f364d0..8182f45ca49 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -767,12 +767,20 @@ long arch_ptrace(struct task_struct *child, long request, #ifdef CONFIG_HAVE_HW_BREAKPOINT case PTRACE_GETHBPREGS: + if (ptrace_get_breakpoints(child) < 0) + return -ESRCH; + ret = ptrace_gethbpregs(child, addr, (unsigned long __user *)data); + ptrace_put_breakpoints(child); break; case PTRACE_SETHBPREGS: + if (ptrace_get_breakpoints(child) < 0) + return -ESRCH; + ret = ptrace_sethbpregs(child, addr, (unsigned long __user *)data); + ptrace_put_breakpoints(child); break; #endif diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index cb839831764..0340224cf73 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -597,19 +597,13 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, return err; } -static inline void setup_syscall_restart(struct pt_regs *regs) -{ - regs->ARM_r0 = regs->ARM_ORIG_r0; - regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; -} - /* * OK, we're invoking a handler */ static int handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, - struct pt_regs * regs, int syscall) + struct pt_regs * regs) { struct thread_info *thread = current_thread_info(); struct task_struct *tsk = current; @@ -617,26 +611,6 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, int ret; /* - * If we were from a system call, check for system call restarting... - */ - if (syscall) { - switch (regs->ARM_r0) { - case -ERESTART_RESTARTBLOCK: - case -ERESTARTNOHAND: - regs->ARM_r0 = -EINTR; - break; - case -ERESTARTSYS: - if (!(ka->sa.sa_flags & SA_RESTART)) { - regs->ARM_r0 = -EINTR; - break; - } - /* fallthrough */ - case -ERESTARTNOINTR: - setup_syscall_restart(regs); - } - } - - /* * translate the signal */ if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) @@ -685,6 +659,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, */ static void do_signal(struct pt_regs *regs, int syscall) { + unsigned int retval = 0, continue_addr = 0, restart_addr = 0; struct k_sigaction ka; siginfo_t info; int signr; @@ -698,18 +673,61 @@ static void do_signal(struct pt_regs *regs, int syscall) if (!user_mode(regs)) return; + /* + * If we were from a system call, check for system call restarting... + */ + if (syscall) { + continue_addr = regs->ARM_pc; + restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4); + retval = regs->ARM_r0; + + /* + * Prepare for system call restart. We do this here so that a + * debugger will see the already changed PSW. + */ + switch (retval) { + case -ERESTARTNOHAND: + case -ERESTARTSYS: + case -ERESTARTNOINTR: + regs->ARM_r0 = regs->ARM_ORIG_r0; + regs->ARM_pc = restart_addr; + break; + case -ERESTART_RESTARTBLOCK: + regs->ARM_r0 = -EINTR; + break; + } + } + if (try_to_freeze()) goto no_signal; + /* + * Get the signal to deliver. When running under ptrace, at this + * point the debugger may change all our registers ... + */ signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { sigset_t *oldset; + /* + * Depending on the signal settings we may need to revert the + * decision to restart the system call. But skip this if a + * debugger has chosen to restart at a different PC. + */ + if (regs->ARM_pc == restart_addr) { + if (retval == -ERESTARTNOHAND + || (retval == -ERESTARTSYS + && !(ka.sa.sa_flags & SA_RESTART))) { + regs->ARM_r0 = -EINTR; + regs->ARM_pc = continue_addr; + } + } + if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = ¤t->saved_sigmask; else oldset = ¤t->blocked; - if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) { + if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { /* * A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, @@ -723,11 +741,14 @@ static void do_signal(struct pt_regs *regs, int syscall) } no_signal: - /* - * No signal to deliver to the process - restart the syscall. - */ if (syscall) { - if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { + /* + * Handle restarting a different system call. As above, + * if a debugger has chosen to restart at a different PC, + * ignore the restart. + */ + if (retval == -ERESTART_RESTARTBLOCK + && regs->ARM_pc == continue_addr) { if (thumb_mode(regs)) { regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; regs->ARM_pc -= 2; @@ -750,11 +771,6 @@ static void do_signal(struct pt_regs *regs, int syscall) #endif } } - if (regs->ARM_r0 == -ERESTARTNOHAND || - regs->ARM_r0 == -ERESTARTSYS || - regs->ARM_r0 == -ERESTARTNOINTR) { - setup_syscall_restart(regs); - } /* If there's no signal to deliver, we just put the saved sigmask * back. diff --git a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c index b2b1e37bb6b..d6e34dd9e7e 100644 --- a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c +++ b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c @@ -115,6 +115,7 @@ int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate) sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, 0, 0, 0, 0); + clk->rate = rate; return 0; } diff --git a/arch/arm/mach-realview/include/mach/barriers.h b/arch/arm/mach-realview/include/mach/barriers.h index 0c5d749d7b5..9a732195aa1 100644 --- a/arch/arm/mach-realview/include/mach/barriers.h +++ b/arch/arm/mach-realview/include/mach/barriers.h @@ -4,5 +4,5 @@ * operation to deadlock the system. */ #define mb() dsb() -#define rmb() dmb() +#define rmb() dsb() #define wmb() mb() diff --git a/arch/arm/mach-tegra/include/mach/barriers.h b/arch/arm/mach-tegra/include/mach/barriers.h index cc115174899..425b42e91ef 100644 --- a/arch/arm/mach-tegra/include/mach/barriers.h +++ b/arch/arm/mach-tegra/include/mach/barriers.h @@ -23,7 +23,7 @@ #include <asm/outercache.h> -#define rmb() dmb() +#define rmb() dsb() #define wmb() do { dsb(); outer_sync(); } while (0) #define mb() wmb() diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index e5f6fc42834..e591513bb53 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -392,7 +392,7 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) * Convert start_pfn/end_pfn to a struct page pointer. */ start_pg = pfn_to_page(start_pfn - 1) + 1; - end_pg = pfn_to_page(end_pfn); + end_pg = pfn_to_page(end_pfn - 1) + 1; /* * Convert to physical addresses, and @@ -426,6 +426,14 @@ static void __init free_unused_memmap(struct meminfo *mi) bank_start = bank_pfn_start(bank); +#ifdef CONFIG_SPARSEMEM + /* + * Take care not to free memmap entries that don't exist + * due to SPARSEMEM sections which aren't present. + */ + bank_start = min(bank_start, + ALIGN(prev_bank_end, PAGES_PER_SECTION)); +#endif /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. @@ -440,6 +448,12 @@ static void __init free_unused_memmap(struct meminfo *mi) */ prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); } + +#ifdef CONFIG_SPARSEMEM + if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) + free_memmap(prev_bank_end, + ALIGN(prev_bank_end, PAGES_PER_SECTION)); +#endif } static void __init free_highpages(void) diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c index 8a51fd58f65..34fc31ee908 100644 --- a/arch/arm/plat-omap/iommu.c +++ b/arch/arm/plat-omap/iommu.c @@ -793,6 +793,8 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) clk_enable(obj->clk); errs = iommu_report_fault(obj, &da); clk_disable(obj->clk); + if (errs == 0) + return IRQ_HANDLED; /* Fault callback or TLB/PTE Dynamic loading */ if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 8e256cc5dcd..351c80fbba7 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -997,9 +997,6 @@ config IRQ_GT641XX config IRQ_GIC bool -config IRQ_CPU_OCTEON - bool - config MIPS_BOARDS_GEN bool @@ -1359,8 +1356,6 @@ config CPU_SB1 config CPU_CAVIUM_OCTEON bool "Cavium Octeon processor" depends on SYS_HAS_CPU_CAVIUM_OCTEON - select IRQ_CPU - select IRQ_CPU_OCTEON select CPU_HAS_PREFETCH select CPU_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_SMP diff --git a/arch/mips/alchemy/devboards/db1x00/board_setup.c b/arch/mips/alchemy/devboards/db1x00/board_setup.c index 05f120ff90f..5c956fe8760 100644 --- a/arch/mips/alchemy/devboards/db1x00/board_setup.c +++ b/arch/mips/alchemy/devboards/db1x00/board_setup.c @@ -127,13 +127,10 @@ const char *get_system_type(void) void __init board_setup(void) { unsigned long bcsr1, bcsr2; - u32 pin_func; bcsr1 = DB1000_BCSR_PHYS_ADDR; bcsr2 = DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS; - pin_func = 0; - #ifdef CONFIG_MIPS_DB1000 printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n"); #endif @@ -164,12 +161,16 @@ void __init board_setup(void) /* Not valid for Au1550 */ #if defined(CONFIG_IRDA) && \ (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100)) - /* Set IRFIRSEL instead of GPIO15 */ - pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; - au_writel(pin_func, SYS_PINFUNC); - /* Power off until the driver is in use */ - bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, - BCSR_RESETS_IRDA_MODE_OFF); + { + u32 pin_func; + + /* Set IRFIRSEL instead of GPIO15 */ + pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; + au_writel(pin_func, SYS_PINFUNC); + /* Power off until the driver is in use */ + bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, + BCSR_RESETS_IRDA_MODE_OFF); + } #endif bcsr_write(BCSR_PCMCIA, 0); /* turn off PCMCIA power */ @@ -177,31 +178,35 @@ void __init board_setup(void) alchemy_gpio1_input_enable(); #ifdef CONFIG_MIPS_MIRAGE - /* GPIO[20] is output */ - alchemy_gpio_direction_output(20, 0); + { + u32 pin_func; - /* Set GPIO[210:208] instead of SSI_0 */ - pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; + /* GPIO[20] is output */ + alchemy_gpio_direction_output(20, 0); - /* Set GPIO[215:211] for LEDs */ - pin_func |= 5 << 2; + /* Set GPIO[210:208] instead of SSI_0 */ + pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; - /* Set GPIO[214:213] for more LEDs */ - pin_func |= 5 << 12; + /* Set GPIO[215:211] for LEDs */ + pin_func |= 5 << 2; - /* Set GPIO[207:200] instead of PCMCIA/LCD */ - pin_func |= SYS_PF_LCD | SYS_PF_PC; - au_writel(pin_func, SYS_PINFUNC); + /* Set GPIO[214:213] for more LEDs */ + pin_func |= 5 << 12; - /* - * Enable speaker amplifier. This should - * be part of the audio driver. - */ - alchemy_gpio_direction_output(209, 1); + /* Set GPIO[207:200] instead of PCMCIA/LCD */ + pin_func |= SYS_PF_LCD | SYS_PF_PC; + au_writel(pin_func, SYS_PINFUNC); - pm_power_off = mirage_power_off; - _machine_halt = mirage_power_off; - _machine_restart = (void(*)(char *))mips_softreset; + /* + * Enable speaker amplifier. This should + * be part of the audio driver. + */ + alchemy_gpio_direction_output(209, 1); + + pm_power_off = mirage_power_off; + _machine_halt = mirage_power_off; + _machine_restart = (void(*)(char *))mips_softreset; + } #endif #ifdef CONFIG_MIPS_BOSPORUS diff --git a/arch/mips/alchemy/xxs1500/init.c b/arch/mips/alchemy/xxs1500/init.c index 15125c2fda7..34a90a4bb6f 100644 --- a/arch/mips/alchemy/xxs1500/init.c +++ b/arch/mips/alchemy/xxs1500/init.c @@ -51,10 +51,9 @@ void __init prom_init(void) prom_init_cmdline(); memsize_str = prom_getenv("memsize"); - if (!memsize_str) + if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize)) memsize = 0x04000000; - else - strict_strtoul(memsize_str, 0, &memsize); + add_memory_region(0, memsize, BOOT_MEM_RAM); } diff --git a/arch/mips/ar7/gpio.c b/arch/mips/ar7/gpio.c index 425dfa5d6e1..bb571bcdb8f 100644 --- a/arch/mips/ar7/gpio.c +++ b/arch/mips/ar7/gpio.c @@ -325,9 +325,7 @@ int __init ar7_gpio_init(void) size = 0x1f; } - gpch->regs = ioremap_nocache(AR7_REGS_GPIO, - AR7_REGS_GPIO + 0x10); - + gpch->regs = ioremap_nocache(AR7_REGS_GPIO, size); if (!gpch->regs) { printk(KERN_ERR "%s: failed to ioremap regs\n", gpch->chip.label); diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c index 88c9d963be8..9a6243676e2 100644 --- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c +++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c @@ -16,8 +16,8 @@ int main(int argc, char *argv[]) { + unsigned long long vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr; struct stat sb; - uint64_t vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr; if (argc != 3) { fprintf(stderr, "Usage: %s <pathname> <vmlinux_load_addr>\n", diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig index caae2285816..cad555ebeca 100644 --- a/arch/mips/cavium-octeon/Kconfig +++ b/arch/mips/cavium-octeon/Kconfig @@ -1,11 +1,7 @@ -config CAVIUM_OCTEON_SPECIFIC_OPTIONS - bool "Enable Octeon specific options" - depends on CPU_CAVIUM_OCTEON - default "y" +if CPU_CAVIUM_OCTEON config CAVIUM_CN63XXP1 bool "Enable CN63XXP1 errata worarounds" - depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "n" help The CN63XXP1 chip requires build time workarounds to @@ -16,7 +12,6 @@ config CAVIUM_CN63XXP1 config CAVIUM_OCTEON_2ND_KERNEL bool "Build the kernel to be used as a 2nd kernel on the same chip" - depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "n" help This option configures this kernel to be linked at a different @@ -26,7 +21,6 @@ config CAVIUM_OCTEON_2ND_KERNEL config CAVIUM_OCTEON_HW_FIX_UNALIGNED bool "Enable hardware fixups of unaligned loads and stores" - depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "y" help Configure the Octeon hardware to automatically fix unaligned loads @@ -38,7 +32,6 @@ config CAVIUM_OCTEON_HW_FIX_UNALIGNED config CAVIUM_OCTEON_CVMSEG_SIZE int "Number of L1 cache lines reserved for CVMSEG memory" - depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS range 0 54 default 1 help @@ -50,7 +43,6 @@ config CAVIUM_OCTEON_CVMSEG_SIZE config CAVIUM_OCTEON_LOCK_L2 bool "Lock often used kernel code in the L2" - depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "y" help Enable locking parts of the kernel into the L2 cache. @@ -93,7 +85,6 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY config ARCH_SPARSEMEM_ENABLE def_bool y select SPARSEMEM_STATIC - depends on CPU_CAVIUM_OCTEON config CAVIUM_OCTEON_HELPER def_bool y @@ -107,6 +98,8 @@ config NEED_SG_DMA_LENGTH config SWIOTLB def_bool y - depends on CPU_CAVIUM_OCTEON select IOMMU_HELPER select NEED_SG_DMA_LENGTH + + +endif # CPU_CAVIUM_OCTEON diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h index 650ac9ba734..b4db69fbc40 100644 --- a/arch/mips/include/asm/cache.h +++ b/arch/mips/include/asm/cache.h @@ -17,6 +17,6 @@ #define SMP_CACHE_SHIFT L1_CACHE_SHIFT #define SMP_CACHE_BYTES L1_CACHE_BYTES -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif /* _ASM_CACHE_H */ diff --git a/arch/mips/include/asm/cevt-r4k.h b/arch/mips/include/asm/cevt-r4k.h index fa4328f9124..65f9bdd02f1 100644 --- a/arch/mips/include/asm/cevt-r4k.h +++ b/arch/mips/include/asm/cevt-r4k.h @@ -14,6 +14,9 @@ #ifndef __ASM_CEVT_R4K_H #define __ASM_CEVT_R4K_H +#include <linux/clockchips.h> +#include <asm/time.h> + DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); void mips_event_handler(struct clock_event_device *dev); diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 655f849bd08..7aa37ddfca4 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -5,7 +5,9 @@ #include <asm/cache.h> #include <asm-generic/dma-coherent.h> +#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */ #include <dma-coherence.h> +#endif extern struct dma_map_ops *mips_dma_map_ops; diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h index f5e85601532..c565b7c3f0b 100644 --- a/arch/mips/include/asm/hugetlb.h +++ b/arch/mips/include/asm/hugetlb.h @@ -70,6 +70,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { + flush_tlb_mm(vma->vm_mm); } static inline int huge_pte_none(pte_t pte) diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h index 32978d32561..ed72e6a26b7 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h @@ -88,7 +88,7 @@ struct bcm_tag { char kernel_crc[CRC_LEN]; /* 228-235: Unused at present */ char reserved1[8]; - /* 236-239: CRC32 of header excluding tagVersion */ + /* 236-239: CRC32 of header excluding last 20 bytes */ char header_crc[CRC_LEN]; /* 240-255: Unused at present */ char reserved2[16]; diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index 9ce9f64cb76..2d8e447cb82 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -211,7 +211,7 @@ EXPORT_SYMBOL(vdma_free); */ int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) { - int first, pages, npages; + int first, pages; if (laddr > 0xffffff) { if (vdma_debug) @@ -228,8 +228,7 @@ int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) return -EINVAL; /* invalid physical address */ } - npages = pages = - (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; + pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; first = laddr >> 12; if (vdma_debug) printk("vdma_remap: first=%x, pages=%x\n", first, pages); diff --git a/arch/mips/jz4740/dma.c b/arch/mips/jz4740/dma.c index 5ebe75a6835..d7feb898692 100644 --- a/arch/mips/jz4740/dma.c +++ b/arch/mips/jz4740/dma.c @@ -242,9 +242,7 @@ EXPORT_SYMBOL_GPL(jz4740_dma_get_residue); static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma) { - uint32_t status; - - status = jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); + (void) jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE); diff --git a/arch/mips/jz4740/time.c b/arch/mips/jz4740/time.c index fe01678d94f..eaa853a54af 100644 --- a/arch/mips/jz4740/time.c +++ b/arch/mips/jz4740/time.c @@ -89,7 +89,7 @@ static int jz4740_clockevent_set_next(unsigned long evt, static struct clock_event_device jz4740_clockevent = { .name = "jz4740-timer", - .features = CLOCK_EVT_FEAT_PERIODIC, + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_next_event = jz4740_clockevent_set_next, .set_mode = jz4740_clockevent_set_mode, .rating = 200, diff --git a/arch/mips/jz4740/timer.c b/arch/mips/jz4740/timer.c index b2c01512905..654d5c3900b 100644 --- a/arch/mips/jz4740/timer.c +++ b/arch/mips/jz4740/timer.c @@ -27,11 +27,13 @@ void jz4740_timer_enable_watchdog(void) { writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR); } +EXPORT_SYMBOL_GPL(jz4740_timer_enable_watchdog); void jz4740_timer_disable_watchdog(void) { writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET); } +EXPORT_SYMBOL_GPL(jz4740_timer_disable_watchdog); void __init jz4740_timer_init(void) { diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 94ca2b018af..feb8021a305 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -23,6 +23,7 @@ #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ +#define JUMP_RANGE_MASK ((1UL << 28) - 1) #define INSN_NOP 0x00000000 /* nop */ #define INSN_JAL(addr) \ @@ -44,12 +45,12 @@ static inline void ftrace_dyn_arch_init_insns(void) /* jal (ftrace_caller + 8), jump over the first two instruction */ buf = (u32 *)&insn_jal_ftrace_caller; - uasm_i_jal(&buf, (FTRACE_ADDR + 8)); + uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* j ftrace_graph_caller */ buf = (u32 *)&insn_j_ftrace_graph_caller; - uasm_i_j(&buf, (unsigned long)ftrace_graph_caller); + uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); #endif } diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index d21c388c011..584e6b55c86 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -540,8 +540,8 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) secure_computing(regs->regs[2]); if (unlikely(current->audit_context) && entryexit) - audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), - regs->regs[2]); + audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]), + -regs->regs[2]); if (!(current->ptrace & PT_PTRACED)) goto out; diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 7f5468b38d4..7f1377eb22d 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -565,7 +565,7 @@ einval: li v0, -ENOSYS sys sys_ioprio_get 2 /* 4315 */ sys sys_utimensat 4 sys sys_signalfd 3 - sys sys_ni_syscall 0 + sys sys_ni_syscall 0 /* was timerfd */ sys sys_eventfd 1 sys sys_fallocate 6 /* 4320 */ sys sys_timerfd_create 2 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index a2e1fcbc41d..7c0ef7f128b 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -404,7 +404,7 @@ sys_call_table: PTR sys_ioprio_get PTR sys_utimensat /* 5275 */ PTR sys_signalfd - PTR sys_ni_syscall + PTR sys_ni_syscall /* was timerfd */ PTR sys_eventfd PTR sys_fallocate PTR sys_timerfd_create /* 5280 */ diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index b2c7624995b..de6c5563bea 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -403,7 +403,7 @@ EXPORT(sysn32_call_table) PTR sys_ioprio_get PTR compat_sys_utimensat PTR compat_sys_signalfd /* 6280 */ - PTR sys_ni_syscall + PTR sys_ni_syscall /* was timerfd */ PTR sys_eventfd PTR sys_fallocate PTR sys_timerfd_create diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 049a9c8c49a..b0541dda883 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -522,7 +522,7 @@ sys_call_table: PTR sys_ioprio_get /* 4315 */ PTR compat_sys_utimensat PTR compat_sys_signalfd - PTR sys_ni_syscall + PTR sys_ni_syscall /* was timerfd */ PTR sys_eventfd PTR sys32_fallocate /* 4320 */ PTR sys_timerfd_create diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 71350f7f2d8..e9b3af27d84 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -374,7 +374,8 @@ void __noreturn die(const char *str, struct pt_regs *regs) unsigned long dvpret = dvpe(); #endif /* CONFIG_MIPS_MT_SMTC */ - notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV); + if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) + sig = 0; console_verbose(); spin_lock_irq(&die_lock); @@ -383,9 +384,6 @@ void __noreturn die(const char *str, struct pt_regs *regs) mips_mt_regdump(dvpret); #endif /* CONFIG_MIPS_MT_SMTC */ - if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) - sig = 0; - printk("%s[#%d]:\n", str, ++die_counter); show_registers(regs); add_taint(TAINT_DIE); diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 832afbb8758..e4b0b0bec03 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -74,6 +74,7 @@ SECTIONS INIT_TASK_DATA(PAGE_SIZE) NOSAVE_DATA CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) + READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) DATA_DATA CONSTRUCTORS } diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c index 11b193f848f..d93830ad611 100644 --- a/arch/mips/loongson/common/env.c +++ b/arch/mips/loongson/common/env.c @@ -29,9 +29,10 @@ unsigned long memsize, highmemsize; #define parse_even_earlier(res, option, p) \ do { \ - int ret; \ + unsigned int tmp __maybe_unused; \ + \ if (strncmp(option, (char *)p, strlen(option)) == 0) \ - ret = strict_strtol((char *)p + strlen(option"="), 10, &res); \ + tmp = strict_strtol((char *)p + strlen(option"="), 10, &res); \ } while (0) void __init prom_init_env(void) diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index b4923a75cb4..71bddf8f7d2 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1075,7 +1075,6 @@ static int __cpuinit probe_scache(void) unsigned long flags, addr, begin, end, pow2; unsigned int config = read_c0_config(); struct cpuinfo_mips *c = ¤t_cpu_data; - int tmp; if (config & CONF_SC) return 0; @@ -1108,7 +1107,6 @@ static int __cpuinit probe_scache(void) /* Now search for the wrap around point. */ pow2 = (128 * 1024); - tmp = 0; for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { cache_op(Index_Load_Tag_SD, addr); __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 5ef294fbb6e..f5734c2c809 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1151,8 +1151,8 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) struct uasm_reloc *r = relocs; u32 *f; unsigned int final_len; - struct mips_huge_tlb_info htlb_info; - enum vmalloc64_mode vmalloc_mode; + struct mips_huge_tlb_info htlb_info __maybe_unused; + enum vmalloc64_mode vmalloc_mode __maybe_unused; memset(tlb_handler, 0, sizeof(tlb_handler)); memset(labels, 0, sizeof(labels)); diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c index 414f0c99b19..31180c321a1 100644 --- a/arch/mips/mti-malta/malta-init.c +++ b/arch/mips/mti-malta/malta-init.c @@ -193,8 +193,6 @@ extern struct plat_smp_ops msmtc_smp_ops; void __init prom_init(void) { - int result; - prom_argc = fw_arg0; _prom_argv = (int *) fw_arg1; _prom_envp = (int *) fw_arg2; @@ -360,20 +358,14 @@ void __init prom_init(void) #ifdef CONFIG_SERIAL_8250_CONSOLE console_config(); #endif - /* Early detection of CMP support */ - result = gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ); - #ifdef CONFIG_MIPS_CMP - if (result) + /* Early detection of CMP support */ + if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ)) register_smp_ops(&cmp_smp_ops); + else #endif #ifdef CONFIG_MIPS_MT_SMP -#ifdef CONFIG_MIPS_CMP - if (!result) register_smp_ops(&vsmp_smp_ops); -#else - register_smp_ops(&vsmp_smp_ops); -#endif #endif #ifdef CONFIG_MIPS_MT_SMTC register_smp_ops(&msmtc_smp_ops); diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index 9027061f0ea..e85c977328d 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -56,7 +56,6 @@ static DEFINE_RAW_SPINLOCK(mips_irq_lock); static inline int mips_pcibios_iack(void) { int irq; - u32 dummy; /* * Determine highest priority pending interrupt by performing @@ -83,7 +82,7 @@ static inline int mips_pcibios_iack(void) BONITO_PCIMAP_CFG = 0x20000; /* Flush Bonito register block */ - dummy = BONITO_PCIMAP_CFG; + (void) BONITO_PCIMAP_CFG; iob(); /* sync */ irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg); diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c index f9b9dcdfa9d..98fd0099d96 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c @@ -97,7 +97,7 @@ static int msp_per_irq_set_affinity(struct irq_data *d, static struct irq_chip msp_per_irq_controller = { .name = "MSP_PER", - .irq_enable = unmask_per_irq. + .irq_enable = unmask_per_irq, .irq_disable = mask_per_irq, .irq_ack = msp_per_irq_ack, #ifdef CONFIG_SMP diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S index dbb5c7b4b70..f8a751c0328 100644 --- a/arch/mips/power/hibernate.S +++ b/arch/mips/power/hibernate.S @@ -35,7 +35,7 @@ LEAF(swsusp_arch_resume) 0: PTR_L t1, PBE_ADDRESS(t0) /* source */ PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ - PTR_ADDIU t3, t1, PAGE_SIZE + PTR_ADDU t3, t1, PAGE_SIZE 1: REG_L t8, (t1) REG_S t8, (t2) diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c index 37de05d595e..6c47dfeb7be 100644 --- a/arch/mips/rb532/gpio.c +++ b/arch/mips/rb532/gpio.c @@ -185,7 +185,7 @@ int __init rb532_gpio_init(void) struct resource *r; r = rb532_gpio_reg0_res; - rb532_gpio_chip->regbase = ioremap_nocache(r->start, r->end - r->start); + rb532_gpio_chip->regbase = ioremap_nocache(r->start, resource_size(r)); if (!rb532_gpio_chip->regbase) { printk(KERN_ERR "rb532: cannot remap GPIO register 0\n"); diff --git a/arch/mips/sgi-ip22/ip22-platform.c b/arch/mips/sgi-ip22/ip22-platform.c index deddbf0ebe5..698904daf90 100644 --- a/arch/mips/sgi-ip22/ip22-platform.c +++ b/arch/mips/sgi-ip22/ip22-platform.c @@ -132,7 +132,7 @@ static struct platform_device eth1_device = { */ static int __init sgiseeq_devinit(void) { - unsigned int tmp; + unsigned int pbdma __maybe_unused; int res, i; eth0_pd.hpc = hpc3c0; @@ -151,7 +151,7 @@ static int __init sgiseeq_devinit(void) /* Second HPC is missing? */ if (ip22_is_fullhouse() || - get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1])) + get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1])) return 0; sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 | diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c index 603fc91c103..1a94c989418 100644 --- a/arch/mips/sgi-ip22/ip22-time.c +++ b/arch/mips/sgi-ip22/ip22-time.c @@ -32,7 +32,7 @@ static unsigned long dosample(void) { u32 ct0, ct1; - u8 msb, lsb; + u8 msb; /* Start the counter. */ sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | @@ -46,7 +46,7 @@ static unsigned long dosample(void) /* Latch and spin until top byte of counter2 is zero */ do { writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword); - lsb = readb(&sgint->tcnt2); + (void) readb(&sgint->tcnt2); msb = readb(&sgint->tcnt2); ct1 = read_c0_count(); } while (msb); diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c index a1fa4abb3f6..cd0d5b06cd8 100644 --- a/arch/mips/sgi-ip27/ip27-hubio.c +++ b/arch/mips/sgi-ip27/ip27-hubio.c @@ -29,7 +29,6 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget, unsigned long xtalk_addr, size_t size) { nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); - volatile hubreg_t junk; unsigned i; /* use small-window mapping if possible */ @@ -64,7 +63,7 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget, * after we write it. */ IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr); - junk = HUB_L(IIO_ITTE_GET(nasid, i)); + (void) HUB_L(IIO_ITTE_GET(nasid, i)); return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE); } diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c index c3d30a88daf..1d1919a44e8 100644 --- a/arch/mips/sgi-ip27/ip27-klnuma.c +++ b/arch/mips/sgi-ip27/ip27-klnuma.c @@ -54,11 +54,8 @@ void __init setup_replication_mask(void) static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid) { - cnodeid_t client_cnode; kern_vars_t *kvp; - client_cnode = NASID_TO_COMPACT_NODEID(client_nasid); - kvp = &hub_data(client_nasid)->kern_vars; KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp; diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c index c76151b5656..0904d4d30cb 100644 --- a/arch/mips/sni/time.c +++ b/arch/mips/sni/time.c @@ -95,7 +95,7 @@ static void __init sni_a20r_timer_setup(void) static __init unsigned long dosample(void) { u32 ct0, ct1; - volatile u8 msb, lsb; + volatile u8 msb; /* Start the counter. */ outb_p(0x34, 0x43); @@ -108,7 +108,7 @@ static __init unsigned long dosample(void) /* Latch and spin until top byte of counter0 is zero */ do { outb(0x00, 0x43); - lsb = inb(0x40); + (void) inb(0x40); msb = inb(0x40); ct1 = read_c0_count(); } while (msb); diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 55613e33e26..a6ae1cfad86 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -933,12 +933,16 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, if (data && !(data & DABR_TRANSLATION)) return -EIO; #ifdef CONFIG_HAVE_HW_BREAKPOINT + if (ptrace_get_breakpoints(task) < 0) + return -ESRCH; + bp = thread->ptrace_bps[0]; if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) { if (bp) { unregister_hw_breakpoint(bp); thread->ptrace_bps[0] = NULL; } + ptrace_put_breakpoints(task); return 0; } if (bp) { @@ -948,9 +952,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, (DABR_DATA_WRITE | DABR_DATA_READ), &attr.bp_type); ret = modify_user_hw_breakpoint(bp, &attr); - if (ret) + if (ret) { + ptrace_put_breakpoints(task); return ret; + } thread->ptrace_bps[0] = bp; + ptrace_put_breakpoints(task); thread->dabr = data; return 0; } @@ -965,9 +972,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, ptrace_triggered, task); if (IS_ERR(bp)) { thread->ptrace_bps[0] = NULL; + ptrace_put_breakpoints(task); return PTR_ERR(bp); } + ptrace_put_breakpoints(task); + #endif /* CONFIG_HAVE_HW_BREAKPOINT */ /* Move contents to the DABR register */ diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index 188272934cf..104faa8aa23 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -318,17 +318,20 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = { .end = mpc83xx_suspend_end, }; +static struct of_device_id pmc_match[]; static int pmc_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct resource res; struct pmc_type *type; int ret = 0; - if (!ofdev->dev.of_match) + match = of_match_device(pmc_match, &ofdev->dev); + if (!match) return -EINVAL; - type = ofdev->dev.of_match->data; + type = match->data; if (!of_device_is_available(np)) return -ENODEV; diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index d5679dc1e20..01cd2f08951 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -304,8 +304,10 @@ static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi, return 0; } +static const struct of_device_id fsl_of_msi_ids[]; static int __devinit fsl_of_msi_probe(struct platform_device *dev) { + const struct of_device_id *match; struct fsl_msi *msi; struct resource res; int err, i, j, irq_index, count; @@ -316,9 +318,10 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev) u32 offset; static const u32 all_avail[] = { 0, NR_MSI_IRQS }; - if (!dev->dev.of_match) + match = of_match_device(fsl_of_msi_ids, &dev->dev); + if (!match) return -EINVAL; - features = dev->dev.of_match->data; + features = match->data; printk(KERN_DEBUG "Setting up Freescale MSI support\n"); diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h index 72b2e2f2d32..7e91c58072e 100644 --- a/arch/s390/include/asm/diag.h +++ b/arch/s390/include/asm/diag.h @@ -9,9 +9,22 @@ #define _ASM_S390_DIAG_H /* - * Diagnose 10: Release pages + * Diagnose 10: Release page range */ -extern void diag10(unsigned long addr); +static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn) +{ + unsigned long start_addr, end_addr; + + start_addr = start_pfn << PAGE_SHIFT; + end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; + + asm volatile( + "0: diag %0,%1,0x10\n" + "1:\n" + EX_TABLE(0b, 1b) + EX_TABLE(1b, 1b) + : : "a" (start_addr), "a" (end_addr)); +} /* * Diagnose 14: Input spool file manipulation diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index a6f0e7cc9cd..8c277caa8d3 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -23,7 +23,7 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_64BIT mm->context.asce_bits |= _ASCE_TYPE_REGION3; #endif - if (current->mm->context.alloc_pgste) { + if (current->mm && current->mm->context.alloc_pgste) { /* * alloc_pgste indicates, that any NEW context will be created * with extended page tables. The old context is unchanged. The diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index c032d11da8a..8237fc07ac7 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c @@ -9,27 +9,6 @@ #include <asm/diag.h> /* - * Diagnose 10: Release pages - */ -void diag10(unsigned long addr) -{ - if (addr >= 0x7ff00000) - return; - asm volatile( -#ifdef CONFIG_64BIT - " sam31\n" - " diag %0,%0,0x10\n" - "0: sam64\n" -#else - " diag %0,%0,0x10\n" - "0:\n" -#endif - EX_TABLE(0b, 0b) - : : "a" (addr)); -} -EXPORT_SYMBOL(diag10); - -/* * Diagnose 14: Input spool file manipulation */ int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index c83726c9fe0..3d4a78fc1ad 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -672,6 +672,7 @@ static struct insn opcode_b2[] = { { "rp", 0x77, INSTR_S_RD }, { "stcke", 0x78, INSTR_S_RD }, { "sacf", 0x79, INSTR_S_RD }, + { "spp", 0x80, INSTR_S_RD }, { "stsi", 0x7d, INSTR_S_RD }, { "srnm", 0x99, INSTR_S_RD }, { "stfpc", 0x9c, INSTR_S_RD }, diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 648f64239a9..1b67fc6ebdc 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -836,7 +836,7 @@ restart_base: stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on basr %r14,0 l %r14,restart_addr-.(%r14) - br %r14 # branch to start_secondary + basr %r14,%r14 # branch to start_secondary restart_addr: .long start_secondary .align 8 diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 9d3603d6c51..9fd86456349 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -841,7 +841,7 @@ restart_base: mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on - jg start_secondary + brasl %r14,start_secondary .align 8 restart_vtime: .long 0x7fffffff,0xffffffff diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index c66ffd8dbbb..1f1dba9dcf5 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -91,7 +91,7 @@ static long cmm_alloc_pages(long nr, long *counter, } else free_page((unsigned long) npa); } - diag10(addr); + diag10_range(addr >> PAGE_SHIFT, 1); pa->pages[pa->index++] = addr; (*counter)++; spin_unlock(&cmm_lock); diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index 4952872d6f0..33cbd373cce 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c @@ -1021,20 +1021,14 @@ deallocate_exit: return rc; } -long hwsampler_query_min_interval(void) +unsigned long hwsampler_query_min_interval(void) { - if (min_sampler_rate) - return min_sampler_rate; - else - return -EINVAL; + return min_sampler_rate; } -long hwsampler_query_max_interval(void) +unsigned long hwsampler_query_max_interval(void) { - if (max_sampler_rate) - return max_sampler_rate; - else - return -EINVAL; + return max_sampler_rate; } unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h index 8c72b59316b..1912f3bb190 100644 --- a/arch/s390/oprofile/hwsampler.h +++ b/arch/s390/oprofile/hwsampler.h @@ -102,8 +102,8 @@ int hwsampler_setup(void); int hwsampler_shutdown(void); int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); int hwsampler_deallocate(void); -long hwsampler_query_min_interval(void); -long hwsampler_query_max_interval(void); +unsigned long hwsampler_query_min_interval(void); +unsigned long hwsampler_query_max_interval(void); int hwsampler_start_all(unsigned long interval); int hwsampler_stop_all(void); int hwsampler_deactivate(unsigned int cpu); diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index c63d7e58352..5995e9bc72d 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -145,15 +145,11 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) * create hwsampler files only if hwsampler_setup() succeeds. */ oprofile_min_interval = hwsampler_query_min_interval(); - if (oprofile_min_interval < 0) { - oprofile_min_interval = 0; + if (oprofile_min_interval == 0) return -ENODEV; - } oprofile_max_interval = hwsampler_query_max_interval(); - if (oprofile_max_interval < 0) { - oprofile_max_interval = 0; + if (oprofile_max_interval == 0) return -ENODEV; - } if (oprofile_timer_init(ops)) return -ENODEV; diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 2130ca674e9..3d7b209b217 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -117,7 +117,11 @@ void user_enable_single_step(struct task_struct *child) set_tsk_thread_flag(child, TIF_SINGLESTEP); + if (ptrace_get_breakpoints(child) < 0) + return; + set_single_step(child, pc); + ptrace_put_breakpoints(child); } void user_disable_single_step(struct task_struct *child) diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c index f679c57644d..1e34f29e58b 100644 --- a/arch/sparc/kernel/apc.c +++ b/arch/sparc/kernel/apc.c @@ -165,7 +165,7 @@ static int __devinit apc_probe(struct platform_device *op) return 0; } -static struct of_device_id __initdata apc_match[] = { +static struct of_device_id apc_match[] = { { .name = APC_OBPNAME, }, diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c index 948068a083f..d1840dbdaa2 100644 --- a/arch/sparc/kernel/pci_sabre.c +++ b/arch/sparc/kernel/pci_sabre.c @@ -452,8 +452,10 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm, sabre_scan_bus(pbm, &op->dev); } +static const struct of_device_id sabre_match[]; static int __devinit sabre_probe(struct platform_device *op) { + const struct of_device_id *match; const struct linux_prom64_registers *pr_regs; struct device_node *dp = op->dev.of_node; struct pci_pbm_info *pbm; @@ -463,7 +465,8 @@ static int __devinit sabre_probe(struct platform_device *op) const u32 *vdma; u64 clear_irq; - hummingbird_p = op->dev.of_match && (op->dev.of_match->data != NULL); + match = of_match_device(sabre_match, &op->dev); + hummingbird_p = match && (match->data != NULL); if (!hummingbird_p) { struct device_node *cpu_dp; diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c index fecfcb2063c..283fbc329a4 100644 --- a/arch/sparc/kernel/pci_schizo.c +++ b/arch/sparc/kernel/pci_schizo.c @@ -1458,11 +1458,15 @@ out_err: return err; } +static const struct of_device_id schizo_match[]; static int __devinit schizo_probe(struct platform_device *op) { - if (!op->dev.of_match) + const struct of_device_id *match; + + match = of_match_device(schizo_match, &op->dev); + if (!match) return -EINVAL; - return __schizo_init(op, (unsigned long) op->dev.of_match->data); + return __schizo_init(op, (unsigned long)match->data); } /* The ordering of this table is very important. Some Tomatillo diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c index 93d7b4465f8..6a585d39358 100644 --- a/arch/sparc/kernel/pmc.c +++ b/arch/sparc/kernel/pmc.c @@ -69,7 +69,7 @@ static int __devinit pmc_probe(struct platform_device *op) return 0; } -static struct of_device_id __initdata pmc_match[] = { +static struct of_device_id pmc_match[] = { { .name = PMC_OBPNAME, }, diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 91c10fb7085..850a1360c0d 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c @@ -53,6 +53,7 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE; void __cpuinit smp_store_cpu_info(int id) { int cpu_node; + int mid; cpu_data(id).udelay_val = loops_per_jiffy; @@ -60,10 +61,13 @@ void __cpuinit smp_store_cpu_info(int id) cpu_data(id).clock_tick = prom_getintdefault(cpu_node, "clock-frequency", 0); cpu_data(id).prom_node = cpu_node; - cpu_data(id).mid = cpu_get_hwmid(cpu_node); + mid = cpu_get_hwmid(cpu_node); - if (cpu_data(id).mid < 0) - panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); + if (mid < 0) { + printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node); + mid = 0; + } + cpu_data(id).mid = mid; } void __init smp_cpus_done(unsigned int max_cpus) diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 4e236391b63..96046a4024c 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -168,7 +168,7 @@ static int __devinit clock_probe(struct platform_device *op) return 0; } -static struct of_device_id __initdata clock_match[] = { +static struct of_device_id clock_match[] = { { .name = "eeprom", }, diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S index 3632cb34e91..0084c3361e1 100644 --- a/arch/sparc/lib/checksum_32.S +++ b/arch/sparc/lib/checksum_32.S @@ -289,10 +289,16 @@ cc_end_cruft: /* Also, handle the alignment code out of band. */ cc_dword_align: - cmp %g1, 6 - bl,a ccte + cmp %g1, 16 + bge 1f + srl %g1, 1, %o3 +2: cmp %o3, 0 + be,a ccte andcc %g1, 0xf, %o3 - andcc %o0, 0x1, %g0 + andcc %o3, %o0, %g0 ! Check %o0 only (%o1 has the same last 2 bits) + be,a 2b + srl %o3, 1, %o3 +1: andcc %o0, 0x1, %g0 bne ccslow andcc %o0, 0x2, %g0 be 1f diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c index 6ea77979531..42827cafa6a 100644 --- a/arch/um/os-Linux/util.c +++ b/arch/um/os-Linux/util.c @@ -5,6 +5,7 @@ #include <stdio.h> #include <stdlib.h> +#include <unistd.h> #include <errno.h> #include <signal.h> #include <string.h> @@ -75,6 +76,26 @@ void setup_hostinfo(char *buf, int len) host.release, host.version, host.machine); } +/* + * We cannot use glibc's abort(). It makes use of tgkill() which + * has no effect within UML's kernel threads. + * After that glibc would execute an invalid instruction to kill + * the calling process and UML crashes with SIGSEGV. + */ +static inline void __attribute__ ((noreturn)) uml_abort(void) +{ + sigset_t sig; + + fflush(NULL); + + if (!sigemptyset(&sig) && !sigaddset(&sig, SIGABRT)) + sigprocmask(SIG_UNBLOCK, &sig, 0); + + for (;;) + if (kill(getpid(), SIGABRT) < 0) + exit(127); +} + void os_dump_core(void) { int pid; @@ -116,5 +137,5 @@ void os_dump_core(void) while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0) os_kill_ptraced_process(pid, 0); - abort(); + uml_abort(); } diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index d87988bacf3..34595d5e103 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h @@ -78,6 +78,7 @@ #define APIC_DEST_LOGICAL 0x00800 #define APIC_DEST_PHYSICAL 0x00000 #define APIC_DM_FIXED 0x00000 +#define APIC_DM_FIXED_MASK 0x00700 #define APIC_DM_LOWEST 0x00100 #define APIC_DM_SMI 0x00200 #define APIC_DM_REMRD 0x00300 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 7db7723d1f3..d56187c6b83 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -299,6 +299,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, /* Install a pte for a particular vaddr in kernel space. */ void set_pte_vaddr(unsigned long vaddr, pte_t pte); +extern void native_pagetable_reserve(u64 start, u64 end); #ifdef CONFIG_X86_32 extern void native_pagetable_setup_start(pgd_t *base); extern void native_pagetable_setup_done(pgd_t *base); diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 3e094af443c..130f1eeee5f 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h @@ -94,6 +94,8 @@ /* after this # consecutive successes, bump up the throttle if it was lowered */ #define COMPLETE_THRESHOLD 5 +#define UV_LB_SUBNODEID 0x10 + /* * number of entries in the destination side payload queue */ @@ -124,7 +126,7 @@ * The distribution specification (32 bytes) is interpreted as a 256-bit * distribution vector. Adjacent bits correspond to consecutive even numbered * nodeIDs. The result of adding the index of a given bit to the 15-bit - * 'base_dest_nodeid' field of the header corresponds to the + * 'base_dest_nasid' field of the header corresponds to the * destination nodeID associated with that specified bit. */ struct bau_target_uvhubmask { @@ -176,7 +178,7 @@ struct bau_msg_payload { struct bau_msg_header { unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ /* bits 5:0 */ - unsigned int base_dest_nodeid:15; /* nasid of the */ + unsigned int base_dest_nasid:15; /* nasid of the */ /* bits 20:6 */ /* first bit in uvhub map */ unsigned int command:8; /* message type */ /* bits 28:21 */ @@ -378,6 +380,10 @@ struct ptc_stats { unsigned long d_rcanceled; /* number of messages canceled by resets */ }; +struct hub_and_pnode { + short uvhub; + short pnode; +}; /* * one per-cpu; to locate the software tables */ @@ -399,10 +405,12 @@ struct bau_control { int baudisabled; int set_bau_off; short cpu; + short osnode; short uvhub_cpu; short uvhub; short cpus_in_socket; short cpus_in_uvhub; + short partition_base_pnode; unsigned short message_number; unsigned short uvhub_quiesce; short socket_acknowledge_count[DEST_Q_SIZE]; @@ -422,15 +430,16 @@ struct bau_control { int congested_period; cycles_t period_time; long period_requests; + struct hub_and_pnode *target_hub_and_pnode; }; static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp) { return constant_test_bit(uvhub, &dstp->bits[0]); } -static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp) +static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp) { - __set_bit(uvhub, &dstp->bits[0]); + __set_bit(pnode, &dstp->bits[0]); } static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp, int nbits) diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index a501741c233..4298002d0c8 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h @@ -398,6 +398,8 @@ struct uv_blade_info { unsigned short nr_online_cpus; unsigned short pnode; short memory_nid; + spinlock_t nmi_lock; + unsigned long nmi_count; }; extern struct uv_blade_info *uv_blade_info; extern short *uv_node_to_blade; diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index 20cafeac745..f5bb64a823d 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h @@ -5,7 +5,7 @@ * * SGI UV MMR definitions * - * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved. */ #ifndef _ASM_X86_UV_UV_MMRS_H @@ -1099,5 +1099,19 @@ union uvh_rtc1_int_config_u { } s; }; +/* ========================================================================= */ +/* UVH_SCRATCH5 */ +/* ========================================================================= */ +#define UVH_SCRATCH5 0x2d0200UL +#define UVH_SCRATCH5_32 0x00778 + +#define UVH_SCRATCH5_SCRATCH5_SHFT 0 +#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL +union uvh_scratch5_u { + unsigned long v; + struct uvh_scratch5_s { + unsigned long scratch5 : 64; /* RW, W1CS */ + } s; +}; #endif /* __ASM_UV_MMRS_X86_H__ */ diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 643ebf2e2ad..d3d859035af 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -68,6 +68,17 @@ struct x86_init_oem { }; /** + * struct x86_init_mapping - platform specific initial kernel pagetable setup + * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage + * + * For more details on the purpose of this hook, look in + * init_memory_mapping and the commit that added it. + */ +struct x86_init_mapping { + void (*pagetable_reserve)(u64 start, u64 end); +}; + +/** * struct x86_init_paging - platform specific paging functions * @pagetable_setup_start: platform specific pre paging_init() call * @pagetable_setup_done: platform specific post paging_init() call @@ -123,6 +134,7 @@ struct x86_init_ops { struct x86_init_mpparse mpparse; struct x86_init_irqs irqs; struct x86_init_oem oem; + struct x86_init_mapping mapping; struct x86_init_paging paging; struct x86_init_timers timers; struct x86_init_iommu iommu; diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 33b10a0fc09..7acd2d2ac96 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -37,6 +37,13 @@ #include <asm/smp.h> #include <asm/x86_init.h> #include <asm/emergency-restart.h> +#include <asm/nmi.h> + +/* BMC sets a bit this MMR non-zero before sending an NMI */ +#define UVH_NMI_MMR UVH_SCRATCH5 +#define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8) +#define UV_NMI_PENDING_MASK (1UL << 63) +DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count); DEFINE_PER_CPU(int, x2apic_extra_bits); @@ -642,18 +649,46 @@ void __cpuinit uv_cpu_init(void) */ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) { + unsigned long real_uv_nmi; + int bid; + if (reason != DIE_NMIUNKNOWN) return NOTIFY_OK; if (in_crash_kexec) /* do nothing if entering the crash kernel */ return NOTIFY_OK; + /* - * Use a lock so only one cpu prints at a time - * to prevent intermixed output. + * Each blade has an MMR that indicates when an NMI has been sent + * to cpus on the blade. If an NMI is detected, atomically + * clear the MMR and update a per-blade NMI count used to + * cause each cpu on the blade to notice a new NMI. + */ + bid = uv_numa_blade_id(); + real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK); + + if (unlikely(real_uv_nmi)) { + spin_lock(&uv_blade_info[bid].nmi_lock); + real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK); + if (real_uv_nmi) { + uv_blade_info[bid].nmi_count++; + uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK); + } + spin_unlock(&uv_blade_info[bid].nmi_lock); + } + + if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count)) + return NOTIFY_DONE; + + __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count; + + /* + * Use a lock so only one cpu prints at a time. + * This prevents intermixed output. */ spin_lock(&uv_nmi_lock); - pr_info("NMI stack dump cpu %u:\n", smp_processor_id()); + pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id()); dump_stack(); spin_unlock(&uv_nmi_lock); @@ -661,7 +696,8 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) } static struct notifier_block uv_dump_stack_nmi_nb = { - .notifier_call = uv_handle_nmi + .notifier_call = uv_handle_nmi, + .priority = NMI_LOCAL_LOW_PRIOR - 1, }; void uv_register_nmi_notifier(void) @@ -720,8 +756,9 @@ void __init uv_system_init(void) printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); - uv_blade_info = kmalloc(bytes, GFP_KERNEL); + uv_blade_info = kzalloc(bytes, GFP_KERNEL); BUG_ON(!uv_blade_info); + for (blade = 0; blade < uv_num_possible_blades(); blade++) uv_blade_info[blade].memory_nid = -1; @@ -747,6 +784,7 @@ void __init uv_system_init(void) uv_blade_info[blade].pnode = pnode; uv_blade_info[blade].nr_possible_cpus = 0; uv_blade_info[blade].nr_online_cpus = 0; + spin_lock_init(&uv_blade_info[blade].nmi_lock); max_pnode = max(pnode, max_pnode); blade++; } diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index bb9eb29a52d..6f9d1f6063e 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -613,7 +613,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) #endif /* As a rule processors have APIC timer running in deep C states */ - if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) + if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400)) set_cpu_cap(c, X86_FEATURE_ARAT); /* @@ -698,7 +698,7 @@ cpu_dev_register(amd_cpu_dev); */ const int amd_erratum_400[] = - AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0x0f, 0x4, 0x2, 0xff, 0xf), + AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); EXPORT_SYMBOL_GPL(amd_erratum_400); diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 167f97b5596..bb0adad3514 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -509,6 +509,7 @@ recurse: out_free: if (b) { kobject_put(&b->kobj); + list_del(&b->miscj); kfree(b); } return err; diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 6f8c5e9da97..0f034460260 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -446,18 +446,20 @@ void intel_init_thermal(struct cpuinfo_x86 *c) */ rdmsr(MSR_IA32_MISC_ENABLE, l, h); + h = lvtthmr_init; /* * The initial value of thermal LVT entries on all APs always reads * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI * sequence to them and LVT registers are reset to 0s except for * the mask bits which are set to 1s when APs receive INIT IPI. - * Always restore the value that BIOS has programmed on AP based on - * BSP's info we saved since BIOS is always setting the same value - * for all threads/cores + * If BIOS takes over the thermal interrupt and sets its interrupt + * delivery mode to SMI (not fixed), it restores the value that the + * BIOS has programmed on AP based on BSP's info we saved since BIOS + * is always setting the same value for all threads/cores. */ - apic_write(APIC_LVTTHMR, lvtthmr_init); + if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED) + apic_write(APIC_LVTTHMR, lvtthmr_init); - h = lvtthmr_init; if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { printk(KERN_DEBUG diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index e61539b07d2..447a28de6f0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -184,26 +184,23 @@ static __initconst const u64 snb_hw_cache_event_ids }, }, [ C(LL ) ] = { - /* - * TBD: Need Off-core Response Performance Monitoring support - */ [ C(OP_READ) ] = { - /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ + /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01bb, + /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, }, [ C(OP_WRITE) ] = { - /* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */ + /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01bb, + /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, }, [ C(OP_PREFETCH) ] = { - /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ + /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01bb, + /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, }, }, [ C(DTLB) ] = { @@ -285,26 +282,26 @@ static __initconst const u64 westmere_hw_cache_event_ids }, [ C(LL ) ] = { [ C(OP_READ) ] = { - /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ + /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01bb, + /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, }, /* * Use RFO, not WRITEBACK, because a write miss would typically occur * on RFO. */ [ C(OP_WRITE) ] = { - /* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */ - [ C(RESULT_ACCESS) ] = 0x01bb, - /* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */ + /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ + [ C(RESULT_ACCESS) ] = 0x01b7, + /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ [ C(RESULT_MISS) ] = 0x01b7, }, [ C(OP_PREFETCH) ] = { - /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ + /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01bb, + /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, }, }, [ C(DTLB) ] = { @@ -352,16 +349,36 @@ static __initconst const u64 westmere_hw_cache_event_ids }; /* - * OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3 + * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits; + * See IA32 SDM Vol 3B 30.6.1.3 */ -#define DMND_DATA_RD (1 << 0) -#define DMND_RFO (1 << 1) -#define DMND_WB (1 << 3) -#define PF_DATA_RD (1 << 4) -#define PF_DATA_RFO (1 << 5) -#define RESP_UNCORE_HIT (1 << 8) -#define RESP_MISS (0xf600) /* non uncore hit */ +#define NHM_DMND_DATA_RD (1 << 0) +#define NHM_DMND_RFO (1 << 1) +#define NHM_DMND_IFETCH (1 << 2) +#define NHM_DMND_WB (1 << 3) +#define NHM_PF_DATA_RD (1 << 4) +#define NHM_PF_DATA_RFO (1 << 5) +#define NHM_PF_IFETCH (1 << 6) +#define NHM_OFFCORE_OTHER (1 << 7) +#define NHM_UNCORE_HIT (1 << 8) +#define NHM_OTHER_CORE_HIT_SNP (1 << 9) +#define NHM_OTHER_CORE_HITM (1 << 10) + /* reserved */ +#define NHM_REMOTE_CACHE_FWD (1 << 12) +#define NHM_REMOTE_DRAM (1 << 13) +#define NHM_LOCAL_DRAM (1 << 14) +#define NHM_NON_DRAM (1 << 15) + +#define NHM_ALL_DRAM (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM) + +#define NHM_DMND_READ (NHM_DMND_DATA_RD) +#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB) +#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO) + +#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM) +#define NHM_L3_MISS (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD) +#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS) static __initconst const u64 nehalem_hw_cache_extra_regs [PERF_COUNT_HW_CACHE_MAX] @@ -370,16 +387,16 @@ static __initconst const u64 nehalem_hw_cache_extra_regs { [ C(LL ) ] = { [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT, - [ C(RESULT_MISS) ] = DMND_DATA_RD|RESP_MISS, + [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS, + [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS, }, [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT, - [ C(RESULT_MISS) ] = DMND_RFO|DMND_WB|RESP_MISS, + [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS, + [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS, }, [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT, - [ C(RESULT_MISS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS, + [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS, + [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS, }, } }; diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index c969fd9d156..f1a6244d7d9 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -1183,12 +1183,13 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + unsigned long flags; /* This is possible if op is under delayed unoptimizing */ if (kprobe_disabled(&op->kp)) return; - preempt_disable(); + local_irq_save(flags); if (kprobe_running()) { kprobes_inc_nmissed_count(&op->kp); } else { @@ -1207,7 +1208,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, opt_pre_handler(&op->kp, regs); __this_cpu_write(current_kprobe, NULL); } - preempt_enable_no_resched(); + local_irq_restore(flags); } static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 45892dc4b72..f65e5b521db 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -608,6 +608,9 @@ static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) unsigned len, type; struct perf_event *bp; + if (ptrace_get_breakpoints(tsk) < 0) + return -ESRCH; + data &= ~DR_CONTROL_RESERVED; old_dr7 = ptrace_get_dr7(thread->ptrace_bps); restore: @@ -655,6 +658,9 @@ restore: } goto restore; } + + ptrace_put_breakpoints(tsk); + return ((orig_ret < 0) ? orig_ret : rc); } @@ -668,10 +674,17 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) if (n < HBP_NUM) { struct perf_event *bp; + + if (ptrace_get_breakpoints(tsk) < 0) + return -ESRCH; + bp = thread->ptrace_bps[n]; if (!bp) - return 0; - val = bp->hw.info.address; + val = 0; + else + val = bp->hw.info.address; + + ptrace_put_breakpoints(tsk); } else if (n == 6) { val = thread->debugreg6; } else if (n == 7) { @@ -686,6 +699,10 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, struct perf_event *bp; struct thread_struct *t = &tsk->thread; struct perf_event_attr attr; + int err = 0; + + if (ptrace_get_breakpoints(tsk) < 0) + return -ESRCH; if (!t->ptrace_bps[nr]) { ptrace_breakpoint_init(&attr); @@ -709,24 +726,23 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, * writing for the user. And anyway this is the previous * behaviour. */ - if (IS_ERR(bp)) - return PTR_ERR(bp); + if (IS_ERR(bp)) { + err = PTR_ERR(bp); + goto put; + } t->ptrace_bps[nr] = bp; } else { - int err; - bp = t->ptrace_bps[nr]; attr = bp->attr; attr.bp_addr = addr; err = modify_user_hw_breakpoint(bp, &attr); - if (err) - return err; } - - return 0; +put: + ptrace_put_breakpoints(tsk); + return err; } /* diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index c11514e9128..75ef4b18e9b 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -61,6 +61,10 @@ struct x86_init_ops x86_init __initdata = { .banner = default_banner, }, + .mapping = { + .pagetable_reserve = native_pagetable_reserve, + }, + .paging = { .pagetable_setup_start = native_pagetable_setup_start, .pagetable_setup_done = native_pagetable_setup_done, diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 286d289b039..37b8b0fe832 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -81,6 +81,11 @@ static void __init find_early_table_space(unsigned long end, int use_pse, end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); } +void __init native_pagetable_reserve(u64 start, u64 end) +{ + memblock_x86_reserve_range(start, end, "PGTABLE"); +} + struct map_range { unsigned long start; unsigned long end; @@ -272,9 +277,24 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, __flush_tlb_all(); + /* + * Reserve the kernel pagetable pages we used (pgt_buf_start - + * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) + * so that they can be reused for other purposes. + * + * On native it just means calling memblock_x86_reserve_range, on Xen it + * also means marking RW the pagetable pages that we allocated before + * but that haven't been used. + * + * In fact on xen we mark RO the whole range pgt_buf_start - + * pgt_buf_top, because we have to make sure that when + * init_memory_mapping reaches the pagetable pages area, it maps + * RO all the pagetable pages, including the ones that are beyond + * pgt_buf_end at that time. + */ if (!after_bootmem && pgt_buf_end > pgt_buf_start) - memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT, - pgt_buf_end << PAGE_SHIFT, "PGTABLE"); + x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start), + PFN_PHYS(pgt_buf_end)); if (!after_bootmem) early_memtest(start, end); diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 7cb6424317f..c58e0ea39ef 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -699,16 +699,17 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va, unsigned int cpu) { - int tcpu; - int uvhub; int locals = 0; int remotes = 0; int hubs = 0; + int tcpu; + int tpnode; struct bau_desc *bau_desc; struct cpumask *flush_mask; struct ptc_stats *stat; struct bau_control *bcp; struct bau_control *tbcp; + struct hub_and_pnode *hpp; /* kernel was booted 'nobau' */ if (nobau) @@ -750,11 +751,18 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); - /* cpu statistics */ for_each_cpu(tcpu, flush_mask) { - uvhub = uv_cpu_to_blade_id(tcpu); - bau_uvhub_set(uvhub, &bau_desc->distribution); - if (uvhub == bcp->uvhub) + /* + * The distribution vector is a bit map of pnodes, relative + * to the partition base pnode (and the partition base nasid + * in the header). + * Translate cpu to pnode and hub using an array stored + * in local memory. + */ + hpp = &bcp->socket_master->target_hub_and_pnode[tcpu]; + tpnode = hpp->pnode - bcp->partition_base_pnode; + bau_uvhub_set(tpnode, &bau_desc->distribution); + if (hpp->uvhub == bcp->uvhub) locals++; else remotes++; @@ -855,7 +863,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) * an interrupt, but causes an error message to be returned to * the sender. */ -static void uv_enable_timeouts(void) +static void __init uv_enable_timeouts(void) { int uvhub; int nuvhubs; @@ -1326,10 +1334,10 @@ static int __init uv_ptc_init(void) } /* - * initialize the sending side's sending buffers + * Initialize the sending side's sending buffers. */ static void -uv_activation_descriptor_init(int node, int pnode) +uv_activation_descriptor_init(int node, int pnode, int base_pnode) { int i; int cpu; @@ -1352,11 +1360,11 @@ uv_activation_descriptor_init(int node, int pnode) n = pa >> uv_nshift; m = pa & uv_mmask; + /* the 14-bit pnode */ uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, (n << UV_DESC_BASE_PNODE_SHIFT | m)); - /* - * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each + * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each * cpu even though we only use the first one; one descriptor can * describe a broadcast to 256 uv hubs. */ @@ -1365,12 +1373,13 @@ uv_activation_descriptor_init(int node, int pnode) memset(bd2, 0, sizeof(struct bau_desc)); bd2->header.sw_ack_flag = 1; /* - * base_dest_nodeid is the nasid of the first uvhub - * in the partition. The bit map will indicate uvhub numbers, - * which are 0-N in a partition. Pnodes are unique system-wide. + * The base_dest_nasid set in the message header is the nasid + * of the first uvhub in the partition. The bit map will + * indicate destination pnode numbers relative to that base. + * They may not be consecutive if nasid striding is being used. */ - bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); - bd2->header.dest_subnodeid = 0x10; /* the LB */ + bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode); + bd2->header.dest_subnodeid = UV_LB_SUBNODEID; bd2->header.command = UV_NET_ENDPOINT_INTD; bd2->header.int_both = 1; /* @@ -1442,7 +1451,7 @@ uv_payload_queue_init(int node, int pnode) /* * Initialization of each UV hub's structures */ -static void __init uv_init_uvhub(int uvhub, int vector) +static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode) { int node; int pnode; @@ -1450,11 +1459,11 @@ static void __init uv_init_uvhub(int uvhub, int vector) node = uvhub_to_first_node(uvhub); pnode = uv_blade_to_pnode(uvhub); - uv_activation_descriptor_init(node, pnode); + uv_activation_descriptor_init(node, pnode, base_pnode); uv_payload_queue_init(node, pnode); /* - * the below initialization can't be in firmware because the - * messaging IRQ will be determined by the OS + * The below initialization can't be in firmware because the + * messaging IRQ will be determined by the OS. */ apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, @@ -1491,10 +1500,11 @@ calculate_destination_timeout(void) /* * initialize the bau_control structure for each cpu */ -static int __init uv_init_per_cpu(int nuvhubs) +static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode) { int i; int cpu; + int tcpu; int pnode; int uvhub; int have_hmaster; @@ -1528,6 +1538,15 @@ static int __init uv_init_per_cpu(int nuvhubs) bcp = &per_cpu(bau_control, cpu); memset(bcp, 0, sizeof(struct bau_control)); pnode = uv_cpu_hub_info(cpu)->pnode; + if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) { + printk(KERN_EMERG + "cpu %d pnode %d-%d beyond %d; BAU disabled\n", + cpu, pnode, base_part_pnode, + UV_DISTRIBUTION_SIZE); + return 1; + } + bcp->osnode = cpu_to_node(cpu); + bcp->partition_base_pnode = uv_partition_base_pnode; uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); bdp = &uvhub_descs[uvhub]; @@ -1536,7 +1555,7 @@ static int __init uv_init_per_cpu(int nuvhubs) bdp->pnode = pnode; /* kludge: 'assuming' one node per socket, and assuming that disabling a socket just leaves a gap in node numbers */ - socket = (cpu_to_node(cpu) & 1); + socket = bcp->osnode & 1; bdp->socket_mask |= (1 << socket); sdp = &bdp->socket[socket]; sdp->cpu_number[sdp->num_cpus] = cpu; @@ -1585,6 +1604,20 @@ static int __init uv_init_per_cpu(int nuvhubs) nextsocket: socket++; socket_mask = (socket_mask >> 1); + /* each socket gets a local array of pnodes/hubs */ + bcp = smaster; + bcp->target_hub_and_pnode = kmalloc_node( + sizeof(struct hub_and_pnode) * + num_possible_cpus(), GFP_KERNEL, bcp->osnode); + memset(bcp->target_hub_and_pnode, 0, + sizeof(struct hub_and_pnode) * + num_possible_cpus()); + for_each_present_cpu(tcpu) { + bcp->target_hub_and_pnode[tcpu].pnode = + uv_cpu_hub_info(tcpu)->pnode; + bcp->target_hub_and_pnode[tcpu].uvhub = + uv_cpu_hub_info(tcpu)->numa_blade_id; + } } } kfree(uvhub_descs); @@ -1637,21 +1670,22 @@ static int __init uv_bau_init(void) spin_lock_init(&disable_lock); congested_cycles = microsec_2_cycles(congested_response_us); - if (uv_init_per_cpu(nuvhubs)) { - nobau = 1; - return 0; - } - uv_partition_base_pnode = 0x7fffffff; - for (uvhub = 0; uvhub < nuvhubs; uvhub++) + for (uvhub = 0; uvhub < nuvhubs; uvhub++) { if (uv_blade_nr_possible_cpus(uvhub) && (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) uv_partition_base_pnode = uv_blade_to_pnode(uvhub); + } + + if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) { + nobau = 1; + return 0; + } vector = UV_BAU_MESSAGE; for_each_possible_blade(uvhub) if (uv_blade_nr_possible_cpus(uvhub)) - uv_init_uvhub(uvhub, vector); + uv_init_uvhub(uvhub, vector, uv_partition_base_pnode); uv_enable_timeouts(); alloc_intr_gate(vector, uv_bau_message_intr1); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 55c965b38c2..0684f3c74d5 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1275,6 +1275,20 @@ static __init void xen_pagetable_setup_start(pgd_t *base) { } +static __init void xen_mapping_pagetable_reserve(u64 start, u64 end) +{ + /* reserve the range used */ + native_pagetable_reserve(start, end); + + /* set as RW the rest */ + printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end, + PFN_PHYS(pgt_buf_top)); + while (end < PFN_PHYS(pgt_buf_top)) { + make_lowmem_page_readwrite(__va(end)); + end += PAGE_SIZE; + } +} + static void xen_post_allocator_init(void); static __init void xen_pagetable_setup_done(pgd_t *base) @@ -1463,119 +1477,6 @@ static int xen_pgd_alloc(struct mm_struct *mm) return ret; } -#ifdef CONFIG_X86_64 -static __initdata u64 __last_pgt_set_rw = 0; -static __initdata u64 __pgt_buf_start = 0; -static __initdata u64 __pgt_buf_end = 0; -static __initdata u64 __pgt_buf_top = 0; -/* - * As a consequence of the commit: - * - * commit 4b239f458c229de044d6905c2b0f9fe16ed9e01e - * Author: Yinghai Lu <yinghai@kernel.org> - * Date: Fri Dec 17 16:58:28 2010 -0800 - * - * x86-64, mm: Put early page table high - * - * at some point init_memory_mapping is going to reach the pagetable pages - * area and map those pages too (mapping them as normal memory that falls - * in the range of addresses passed to init_memory_mapping as argument). - * Some of those pages are already pagetable pages (they are in the range - * pgt_buf_start-pgt_buf_end) therefore they are going to be mapped RO and - * everything is fine. - * Some of these pages are not pagetable pages yet (they fall in the range - * pgt_buf_end-pgt_buf_top; for example the page at pgt_buf_end) so they - * are going to be mapped RW. When these pages become pagetable pages and - * are hooked into the pagetable, xen will find that the guest has already - * a RW mapping of them somewhere and fail the operation. - * The reason Xen requires pagetables to be RO is that the hypervisor needs - * to verify that the pagetables are valid before using them. The validation - * operations are called "pinning". - * - * In order to fix the issue we mark all the pages in the entire range - * pgt_buf_start-pgt_buf_top as RO, however when the pagetable allocation - * is completed only the range pgt_buf_start-pgt_buf_end is reserved by - * init_memory_mapping. Hence the kernel is going to crash as soon as one - * of the pages in the range pgt_buf_end-pgt_buf_top is reused (b/c those - * ranges are RO). - * - * For this reason, 'mark_rw_past_pgt' is introduced which is called _after_ - * the init_memory_mapping has completed (in a perfect world we would - * call this function from init_memory_mapping, but lets ignore that). - * - * Because we are called _after_ init_memory_mapping the pgt_buf_[start, - * end,top] have all changed to new values (b/c init_memory_mapping - * is called and setting up another new page-table). Hence, the first time - * we enter this function, we save away the pgt_buf_start value and update - * the pgt_buf_[end,top]. - * - * When we detect that the "old" pgt_buf_start through pgt_buf_end - * PFNs have been reserved (so memblock_x86_reserve_range has been called), - * we immediately set out to RW the "old" pgt_buf_end through pgt_buf_top. - * - * And then we update those "old" pgt_buf_[end|top] with the new ones - * so that we can redo this on the next pagetable. - */ -static __init void mark_rw_past_pgt(void) { - - if (pgt_buf_end > pgt_buf_start) { - u64 addr, size; - - /* Save it away. */ - if (!__pgt_buf_start) { - __pgt_buf_start = pgt_buf_start; - __pgt_buf_end = pgt_buf_end; - __pgt_buf_top = pgt_buf_top; - return; - } - /* If we get the range that starts at __pgt_buf_end that means - * the range is reserved, and that in 'init_memory_mapping' - * the 'memblock_x86_reserve_range' has been called with the - * outdated __pgt_buf_start, __pgt_buf_end (the "new" - * pgt_buf_[start|end|top] refer now to a new pagetable. - * Note: we are called _after_ the pgt_buf_[..] have been - * updated.*/ - - addr = memblock_x86_find_in_range_size(PFN_PHYS(__pgt_buf_start), - &size, PAGE_SIZE); - - /* Still not reserved, meaning 'memblock_x86_reserve_range' - * hasn't been called yet. Update the _end and _top.*/ - if (addr == PFN_PHYS(__pgt_buf_start)) { - __pgt_buf_end = pgt_buf_end; - __pgt_buf_top = pgt_buf_top; - return; - } - - /* OK, the area is reserved, meaning it is time for us to - * set RW for the old end->top PFNs. */ - - /* ..unless we had already done this. */ - if (__pgt_buf_end == __last_pgt_set_rw) - return; - - addr = PFN_PHYS(__pgt_buf_end); - - /* set as RW the rest */ - printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", - PFN_PHYS(__pgt_buf_end), PFN_PHYS(__pgt_buf_top)); - - while (addr < PFN_PHYS(__pgt_buf_top)) { - make_lowmem_page_readwrite(__va(addr)); - addr += PAGE_SIZE; - } - /* And update everything so that we are ready for the next - * pagetable (the one created for regions past 4GB) */ - __last_pgt_set_rw = __pgt_buf_end; - __pgt_buf_start = pgt_buf_start; - __pgt_buf_end = pgt_buf_end; - __pgt_buf_top = pgt_buf_top; - } - return; -} -#else -static __init void mark_rw_past_pgt(void) { } -#endif static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) { #ifdef CONFIG_X86_64 @@ -1602,14 +1503,6 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) unsigned long pfn = pte_pfn(pte); /* - * A bit of optimization. We do not need to call the workaround - * when xen_set_pte_init is called with a PTE with 0 as PFN. - * That is b/c the pagetable at that point are just being populated - * with empty values and we can save some cycles by not calling - * the 'memblock' code.*/ - if (pfn) - mark_rw_past_pgt(); - /* * If the new pfn is within the range of the newly allocated * kernel pagetable, and it isn't being mapped into an * early_ioremap fixmap slot as a freshly allocated page, make sure @@ -2118,8 +2011,6 @@ __init void xen_ident_map_ISA(void) static __init void xen_post_allocator_init(void) { - mark_rw_past_pgt(); - #ifdef CONFIG_XEN_DEBUG pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); #endif @@ -2228,6 +2119,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { void __init xen_init_mmu_ops(void) { + x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve; x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; pv_mmu_ops = xen_mmu_ops; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index f0605ab2a76..471fdcc5df8 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -114,6 +114,13 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) } EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); +struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) +{ + return container_of(task_subsys_state(tsk, blkio_subsys_id), + struct blkio_cgroup, css); +} +EXPORT_SYMBOL_GPL(task_blkio_cgroup); + static inline void blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) { diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 10919fae2d3..c774930cc20 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -291,6 +291,7 @@ static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {} #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) extern struct blkio_cgroup blkio_root_cgroup; extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); +extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk); extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, struct blkio_group *blkg, void *key, dev_t dev, enum blkio_policy_id plid); @@ -314,6 +315,8 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg, struct cgroup; static inline struct blkio_cgroup * cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } +static inline struct blkio_cgroup * +task_blkio_cgroup(struct task_struct *tsk) { return NULL; } static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, struct blkio_group *blkg, void *key, dev_t dev, diff --git a/block/blk-core.c b/block/blk-core.c index a2e58eeb354..3fe00a14822 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -316,8 +316,10 @@ EXPORT_SYMBOL(__blk_run_queue); */ void blk_run_queue_async(struct request_queue *q) { - if (likely(!blk_queue_stopped(q))) + if (likely(!blk_queue_stopped(q))) { + __cancel_delayed_work(&q->delay_work); queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); + } } EXPORT_SYMBOL(blk_run_queue_async); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 0475a22a420..252a81a306f 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -160,9 +160,8 @@ static void throtl_put_tg(struct throtl_grp *tg) } static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, - struct cgroup *cgroup) + struct blkio_cgroup *blkcg) { - struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); struct throtl_grp *tg = NULL; void *key = td; struct backing_dev_info *bdi = &td->queue->backing_dev_info; @@ -229,12 +228,12 @@ done: static struct throtl_grp * throtl_get_tg(struct throtl_data *td) { - struct cgroup *cgroup; struct throtl_grp *tg = NULL; + struct blkio_cgroup *blkcg; rcu_read_lock(); - cgroup = task_cgroup(current, blkio_subsys_id); - tg = throtl_find_alloc_tg(td, cgroup); + blkcg = task_blkio_cgroup(current); + tg = throtl_find_alloc_tg(td, blkcg); if (!tg) tg = &td->root_tg; rcu_read_unlock(); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 5b52011e3a4..ab7a9e6a9b1 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1014,10 +1014,9 @@ void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg, cfqg->needs_update = true; } -static struct cfq_group * -cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) +static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd, + struct blkio_cgroup *blkcg, int create) { - struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); struct cfq_group *cfqg = NULL; void *key = cfqd; int i, j; @@ -1079,12 +1078,12 @@ done: */ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) { - struct cgroup *cgroup; + struct blkio_cgroup *blkcg; struct cfq_group *cfqg = NULL; rcu_read_lock(); - cgroup = task_cgroup(current, blkio_subsys_id); - cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create); + blkcg = task_blkio_cgroup(current); + cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create); if (!cfqg && create) cfqg = &cfqd->root_group; rcu_read_unlock(); diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index ff9d832a163..d38c40fe4dd 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -561,27 +561,6 @@ void ahci_start_engine(struct ata_port *ap) { void __iomem *port_mmio = ahci_port_base(ap); u32 tmp; - u8 status; - - status = readl(port_mmio + PORT_TFDATA) & 0xFF; - - /* - * At end of section 10.1 of AHCI spec (rev 1.3), it states - * Software shall not set PxCMD.ST to 1 until it is determined - * that a functoinal device is present on the port as determined by - * PxTFD.STS.BSY=0, PxTFD.STS.DRQ=0 and PxSSTS.DET=3h - * - * Even though most AHCI host controllers work without this check, - * specific controller will fail under this condition - */ - if (status & (ATA_BUSY | ATA_DRQ)) - return; - else { - ahci_scr_read(&ap->link, SCR_STATUS, &tmp); - - if ((tmp & 0xf) != 0x3) - return; - } /* start DMA */ tmp = readl(port_mmio + PORT_CMD); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index f26f2fe3480..dad9fd660f3 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -3316,7 +3316,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; enum ata_lpm_policy old_policy = link->lpm_policy; - bool no_dipm = ap->flags & ATA_FLAG_NO_DIPM; + bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; unsigned int err_mask; int rc; diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index bdd2719f3f6..bc9e702186d 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -2643,16 +2643,19 @@ fore200e_init(struct fore200e* fore200e, struct device *parent) } #ifdef CONFIG_SBUS +static const struct of_device_id fore200e_sba_match[]; static int __devinit fore200e_sba_probe(struct platform_device *op) { + const struct of_device_id *match; const struct fore200e_bus *bus; struct fore200e *fore200e; static int index = 0; int err; - if (!op->dev.of_match) + match = of_match_device(fore200e_sba_match, &op->dev); + if (!match) return -EINVAL; - bus = op->dev.of_match->data; + bus = match->data; fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); if (!fore200e) diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 8066d086578..e086fbbbe85 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -2547,7 +2547,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) disk->major = MajorNumber; disk->first_minor = n << DAC960_MaxPartitionsBits; disk->fops = &DAC960_BlockDeviceOperations; - disk->events = DISK_EVENT_MEDIA_CHANGE; } /* Indicate the Block Device Registration completed successfully, diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 456c0cc90dc..8eba86bba59 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1736,7 +1736,6 @@ static int __init fd_probe_drives(void) disk->major = FLOPPY_MAJOR; disk->first_minor = drive; disk->fops = &floppy_fops; - disk->events = DISK_EVENT_MEDIA_CHANGE; sprintf(disk->disk_name, "fd%d", drive); disk->private_data = &unit[drive]; set_capacity(disk, 880*2); diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index c871eae1412..ede16c64ff0 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1964,7 +1964,6 @@ static int __init atari_floppy_init (void) unit[i].disk->first_minor = i; sprintf(unit[i].disk->disk_name, "fd%d", i); unit[i].disk->fops = &floppy_fops; - unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE; unit[i].disk->private_data = &unit[i]; unit[i].disk->queue = blk_init_queue(do_fd_request, &ataflop_lock); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 301d7a9a41a..db8f88586c8 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4205,7 +4205,6 @@ static int __init floppy_init(void) disks[dr]->major = FLOPPY_MAJOR; disks[dr]->first_minor = TOMINOR(dr); disks[dr]->fops = &floppy_fops; - disks[dr]->events = DISK_EVENT_MEDIA_CHANGE; sprintf(disks[dr]->disk_name, "fd%d", dr); init_timer(&motor_off_timer[dr]); diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 2f2ccf68625..8690e31d993 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -320,7 +320,6 @@ static void pcd_init_units(void) disk->first_minor = unit; strcpy(disk->disk_name, cd->name); /* umm... */ disk->fops = &pcd_bdops; - disk->events = DISK_EVENT_MEDIA_CHANGE; } } diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 21dfdb77686..869e7676d46 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -837,7 +837,6 @@ static void pd_probe_drive(struct pd_unit *disk) p->fops = &pd_fops; p->major = major; p->first_minor = (disk - pd) << PD_BITS; - p->events = DISK_EVENT_MEDIA_CHANGE; disk->gd = p; p->private_data = disk; p->queue = pd_queue; diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 7adeb1edbf4..f21b520ef41 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -294,7 +294,6 @@ static void __init pf_init_units(void) disk->first_minor = unit; strcpy(disk->disk_name, pf->name); disk->fops = &pf_fops; - disk->events = DISK_EVENT_MEDIA_CHANGE; if (!(*drives[unit])[D_PRT]) pf_drive_count++; } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3e904717c1c..9712fad82bc 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -92,6 +92,8 @@ struct rbd_client { struct list_head node; }; +struct rbd_req_coll; + /* * a single io request */ @@ -100,6 +102,24 @@ struct rbd_request { struct bio *bio; /* cloned bio */ struct page **pages; /* list of used pages */ u64 len; + int coll_index; + struct rbd_req_coll *coll; +}; + +struct rbd_req_status { + int done; + int rc; + u64 bytes; +}; + +/* + * a collection of requests + */ +struct rbd_req_coll { + int total; + int num_done; + struct kref kref; + struct rbd_req_status status[0]; }; struct rbd_snap { @@ -416,6 +436,17 @@ static void rbd_put_client(struct rbd_device *rbd_dev) rbd_dev->client = NULL; } +/* + * Destroy requests collection + */ +static void rbd_coll_release(struct kref *kref) +{ + struct rbd_req_coll *coll = + container_of(kref, struct rbd_req_coll, kref); + + dout("rbd_coll_release %p\n", coll); + kfree(coll); +} /* * Create a new header structure, translate header format from the on-disk @@ -590,6 +621,14 @@ static u64 rbd_get_segment(struct rbd_image_header *header, return len; } +static int rbd_get_num_segments(struct rbd_image_header *header, + u64 ofs, u64 len) +{ + u64 start_seg = ofs >> header->obj_order; + u64 end_seg = (ofs + len - 1) >> header->obj_order; + return end_seg - start_seg + 1; +} + /* * bio helpers */ @@ -735,6 +774,50 @@ static void rbd_destroy_ops(struct ceph_osd_req_op *ops) kfree(ops); } +static void rbd_coll_end_req_index(struct request *rq, + struct rbd_req_coll *coll, + int index, + int ret, u64 len) +{ + struct request_queue *q; + int min, max, i; + + dout("rbd_coll_end_req_index %p index %d ret %d len %lld\n", + coll, index, ret, len); + + if (!rq) + return; + + if (!coll) { + blk_end_request(rq, ret, len); + return; + } + + q = rq->q; + + spin_lock_irq(q->queue_lock); + coll->status[index].done = 1; + coll->status[index].rc = ret; + coll->status[index].bytes = len; + max = min = coll->num_done; + while (max < coll->total && coll->status[max].done) + max++; + + for (i = min; i<max; i++) { + __blk_end_request(rq, coll->status[i].rc, + coll->status[i].bytes); + coll->num_done++; + kref_put(&coll->kref, rbd_coll_release); + } + spin_unlock_irq(q->queue_lock); +} + +static void rbd_coll_end_req(struct rbd_request *req, + int ret, u64 len) +{ + rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len); +} + /* * Send ceph osd request */ @@ -749,6 +832,8 @@ static int rbd_do_request(struct request *rq, int flags, struct ceph_osd_req_op *ops, int num_reply, + struct rbd_req_coll *coll, + int coll_index, void (*rbd_cb)(struct ceph_osd_request *req, struct ceph_msg *msg), struct ceph_osd_request **linger_req, @@ -763,12 +848,20 @@ static int rbd_do_request(struct request *rq, struct ceph_osd_request_head *reqhead; struct rbd_image_header *header = &dev->header; - ret = -ENOMEM; req_data = kzalloc(sizeof(*req_data), GFP_NOIO); - if (!req_data) - goto done; + if (!req_data) { + if (coll) + rbd_coll_end_req_index(rq, coll, coll_index, + -ENOMEM, len); + return -ENOMEM; + } - dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs); + if (coll) { + req_data->coll = coll; + req_data->coll_index = coll_index; + } + + dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs); down_read(&header->snap_rwsem); @@ -828,7 +921,8 @@ static int rbd_do_request(struct request *rq, ret = ceph_osdc_wait_request(&dev->client->osdc, req); if (ver) *ver = le64_to_cpu(req->r_reassert_version.version); - dout("reassert_ver=%lld\n", le64_to_cpu(req->r_reassert_version.version)); + dout("reassert_ver=%lld\n", + le64_to_cpu(req->r_reassert_version.version)); ceph_osdc_put_request(req); } return ret; @@ -837,10 +931,8 @@ done_err: bio_chain_put(req_data->bio); ceph_osdc_put_request(req); done_pages: + rbd_coll_end_req(req_data, ret, len); kfree(req_data); -done: - if (rq) - blk_end_request(rq, ret, len); return ret; } @@ -874,7 +966,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg) bytes = req_data->len; } - blk_end_request(req_data->rq, rc, bytes); + rbd_coll_end_req(req_data, rc, bytes); if (req_data->bio) bio_chain_put(req_data->bio); @@ -934,6 +1026,7 @@ static int rbd_req_sync_op(struct rbd_device *dev, flags, ops, 2, + NULL, 0, NULL, linger_req, ver); if (ret < 0) @@ -959,7 +1052,9 @@ static int rbd_do_op(struct request *rq, u64 snapid, int opcode, int flags, int num_reply, u64 ofs, u64 len, - struct bio *bio) + struct bio *bio, + struct rbd_req_coll *coll, + int coll_index) { char *seg_name; u64 seg_ofs; @@ -995,7 +1090,10 @@ static int rbd_do_op(struct request *rq, flags, ops, num_reply, + coll, coll_index, rbd_req_cb, 0, NULL); + + rbd_destroy_ops(ops); done: kfree(seg_name); return ret; @@ -1008,13 +1106,15 @@ static int rbd_req_write(struct request *rq, struct rbd_device *rbd_dev, struct ceph_snap_context *snapc, u64 ofs, u64 len, - struct bio *bio) + struct bio *bio, + struct rbd_req_coll *coll, + int coll_index) { return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP, CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 2, - ofs, len, bio); + ofs, len, bio, coll, coll_index); } /* @@ -1024,14 +1124,16 @@ static int rbd_req_read(struct request *rq, struct rbd_device *rbd_dev, u64 snapid, u64 ofs, u64 len, - struct bio *bio) + struct bio *bio, + struct rbd_req_coll *coll, + int coll_index) { return rbd_do_op(rq, rbd_dev, NULL, (snapid ? snapid : CEPH_NOSNAP), CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 2, - ofs, len, bio); + ofs, len, bio, coll, coll_index); } /* @@ -1063,7 +1165,9 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev, { struct ceph_osd_req_op *ops; struct page **pages = NULL; - int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); + int ret; + + ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); if (ret < 0) return ret; @@ -1077,6 +1181,7 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev, CEPH_OSD_FLAG_READ, ops, 1, + NULL, 0, rbd_simple_req_cb, 0, NULL); rbd_destroy_ops(ops); @@ -1274,6 +1379,20 @@ static int rbd_req_sync_exec(struct rbd_device *dev, return ret; } +static struct rbd_req_coll *rbd_alloc_coll(int num_reqs) +{ + struct rbd_req_coll *coll = + kzalloc(sizeof(struct rbd_req_coll) + + sizeof(struct rbd_req_status) * num_reqs, + GFP_ATOMIC); + + if (!coll) + return NULL; + coll->total = num_reqs; + kref_init(&coll->kref); + return coll; +} + /* * block device queue callback */ @@ -1291,6 +1410,8 @@ static void rbd_rq_fn(struct request_queue *q) bool do_write; int size, op_size = 0; u64 ofs; + int num_segs, cur_seg = 0; + struct rbd_req_coll *coll; /* peek at request from block layer */ if (!rq) @@ -1321,6 +1442,14 @@ static void rbd_rq_fn(struct request_queue *q) do_write ? "write" : "read", size, blk_rq_pos(rq) * 512ULL); + num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size); + coll = rbd_alloc_coll(num_segs); + if (!coll) { + spin_lock_irq(q->queue_lock); + __blk_end_request_all(rq, -ENOMEM); + goto next; + } + do { /* a bio clone to be passed down to OSD req */ dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt); @@ -1328,35 +1457,41 @@ static void rbd_rq_fn(struct request_queue *q) rbd_dev->header.block_name, ofs, size, NULL, NULL); + kref_get(&coll->kref); bio = bio_chain_clone(&rq_bio, &next_bio, &bp, op_size, GFP_ATOMIC); if (!bio) { - spin_lock_irq(q->queue_lock); - __blk_end_request_all(rq, -ENOMEM); - goto next; + rbd_coll_end_req_index(rq, coll, cur_seg, + -ENOMEM, op_size); + goto next_seg; } + /* init OSD command: write or read */ if (do_write) rbd_req_write(rq, rbd_dev, rbd_dev->header.snapc, ofs, - op_size, bio); + op_size, bio, + coll, cur_seg); else rbd_req_read(rq, rbd_dev, cur_snap_id(rbd_dev), ofs, - op_size, bio); + op_size, bio, + coll, cur_seg); +next_seg: size -= op_size; ofs += op_size; + cur_seg++; rq_bio = next_bio; } while (size > 0); + kref_put(&coll->kref, rbd_coll_release); if (bp) bio_pair_release(bp); - spin_lock_irq(q->queue_lock); next: rq = blk_fetch_request(q); diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 24a482f2fbd..fd5adcd5594 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -858,7 +858,6 @@ static int __devinit swim_floppy_init(struct swim_priv *swd) swd->unit[drive].disk->first_minor = drive; sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive); swd->unit[drive].disk->fops = &floppy_fops; - swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE; swd->unit[drive].disk->private_data = &swd->unit[drive]; swd->unit[drive].disk->queue = swd->queue; set_capacity(swd->unit[drive].disk, 2880); diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 4c10f56facb..773bfa79277 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -1163,7 +1163,6 @@ static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device disk->major = FLOPPY_MAJOR; disk->first_minor = i; disk->fops = &floppy_fops; - disk->events = DISK_EVENT_MEDIA_CHANGE; disk->private_data = &floppy_states[i]; disk->queue = swim3_queue; disk->flags |= GENHD_FL_REMOVABLE; diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 68b9430c7cf..0e376d46bdd 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -2334,7 +2334,6 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) disk->major = UB_MAJOR; disk->first_minor = lun->id * UB_PARTS_PER_LUN; disk->fops = &ub_bd_fops; - disk->events = DISK_EVENT_MEDIA_CHANGE; disk->private_data = lun; disk->driverfs_dev = &sc->intf->dev; diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 645ff765cd1..6c7fd7db6df 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -1005,7 +1005,6 @@ static int __devinit ace_setup(struct ace_device *ace) ace->gd->major = ace_major; ace->gd->first_minor = ace->id * ACE_NUM_MINORS; ace->gd->fops = &ace_fops; - ace->gd->events = DISK_EVENT_MEDIA_CHANGE; ace->gd->queue = ace->queue; ace->gd->private_data = ace; snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 514dd8efaf7..75fb965b8f7 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -986,6 +986,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t cdinfo(CD_OPEN, "entering cdrom_open\n"); + /* open is event synchronization point, check events first */ + check_disk_change(bdev); + /* if this was a O_NONBLOCK open and we should honor the flags, * do a quick open without drive/disc integrity checks. */ cdi->use_count++; @@ -1012,9 +1015,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", cdi->name, cdi->use_count); - /* Do this on open. Don't wait for mount, because they might - not be mounting, but opening with O_NONBLOCK */ - check_disk_change(bdev); return 0; err_release: if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index b2b034fea34..3ceaf006e7f 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -803,7 +803,6 @@ static int __devinit probe_gdrom(struct platform_device *devptr) goto probe_fail_cdrom_register; } gd.disk->fops = &gdrom_bdops; - gd.disk->events = DISK_EVENT_MEDIA_CHANGE; /* latch on to the interrupt */ err = gdrom_set_interrupt_handlers(); if (err) diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 4e874c5fa60..e427fbe4599 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -626,7 +626,6 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id) gendisk->queue = q; gendisk->fops = &viocd_fops; gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; - gendisk->events = DISK_EVENT_MEDIA_CHANGE; set_capacity(gendisk, 0); gendisk->private_data = d; d->viocd_disk = gendisk; diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index 43ac61978d8..ac6739e085e 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c @@ -619,15 +619,18 @@ static void __devinit n2rng_driver_version(void) pr_info("%s", version); } +static const struct of_device_id n2rng_match[]; static int __devinit n2rng_probe(struct platform_device *op) { + const struct of_device_id *match; int victoria_falls; int err = -ENOMEM; struct n2rng *np; - if (!op->dev.of_match) + match = of_match_device(n2rng_match, &op->dev); + if (!match) return -EINVAL; - victoria_falls = (op->dev.of_match->data != NULL); + victoria_falls = (match->data != NULL); n2rng_driver_version(); np = kzalloc(sizeof(*np), GFP_KERNEL); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index cc6c9b2546a..64c6b853061 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2554,9 +2554,11 @@ static struct pci_driver ipmi_pci_driver = { }; #endif /* CONFIG_PCI */ +static struct of_device_id ipmi_match[]; static int __devinit ipmi_probe(struct platform_device *dev) { #ifdef CONFIG_OF + const struct of_device_id *match; struct smi_info *info; struct resource resource; const __be32 *regsize, *regspacing, *regshift; @@ -2566,7 +2568,8 @@ static int __devinit ipmi_probe(struct platform_device *dev) dev_info(&dev->dev, "probing via device tree\n"); - if (!dev->dev.of_match) + match = of_match_device(ipmi_match, &dev->dev); + if (!match) return -EINVAL; ret = of_address_to_resource(np, 0, &resource); @@ -2601,7 +2604,7 @@ static int __devinit ipmi_probe(struct platform_device *dev) return -ENOMEM; } - info->si_type = (enum si_type) dev->dev.of_match->data; + info->si_type = (enum si_type) match->data; info->addr_source = SI_DEVICETREE; info->irq_setup = std_irq_setup; diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index d6412c16385..39ccdeada79 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c @@ -715,13 +715,13 @@ static int __devexit hwicap_remove(struct device *dev) } #ifdef CONFIG_OF -static int __devinit hwicap_of_probe(struct platform_device *op) +static int __devinit hwicap_of_probe(struct platform_device *op, + const struct hwicap_driver_config *config) { struct resource res; const unsigned int *id; const char *family; int rc; - const struct hwicap_driver_config *config = op->dev.of_match->data; const struct config_registers *regs; @@ -751,20 +751,24 @@ static int __devinit hwicap_of_probe(struct platform_device *op) regs); } #else -static inline int hwicap_of_probe(struct platform_device *op) +static inline int hwicap_of_probe(struct platform_device *op, + const struct hwicap_driver_config *config) { return -EINVAL; } #endif /* CONFIG_OF */ +static const struct of_device_id __devinitconst hwicap_of_match[]; static int __devinit hwicap_drv_probe(struct platform_device *pdev) { + const struct of_device_id *match; struct resource *res; const struct config_registers *regs; const char *family; - if (pdev->dev.of_match) - return hwicap_of_probe(pdev); + match = of_match_device(hwicap_of_match, &pdev->dev); + if (match) + return hwicap_of_probe(pdev, match->data); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index c1f0045ceb8..af8e7b1aa29 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c @@ -1019,7 +1019,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci, struct ppc4xx_edac_pdata *pdata = NULL; const struct device_node *np = op->dev.of_node; - if (op->dev.of_match == NULL) + if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL) return -EINVAL; /* Initial driver pointers and private data */ diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 11d7a72c22d..140b9525b48 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1516,17 +1516,33 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) } EXPORT_SYMBOL(drm_fb_helper_initial_config); -bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) +/** + * drm_fb_helper_hotplug_event - respond to a hotplug notification by + * probing all the outputs attached to the fb. + * @fb_helper: the drm_fb_helper + * + * LOCKING: + * Called at runtime, must take mode config lock. + * + * Scan the connectors attached to the fb_helper and try to put together a + * setup after *notification of a change in output configuration. + * + * RETURNS: + * 0 on success and a non-zero error code otherwise. + */ +int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) { + struct drm_device *dev = fb_helper->dev; int count = 0; u32 max_width, max_height, bpp_sel; bool bound = false, crtcs_bound = false; struct drm_crtc *crtc; if (!fb_helper->fb) - return false; + return 0; - list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { + mutex_lock(&dev->mode_config.mutex); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (crtc->fb) crtcs_bound = true; if (crtc->fb == fb_helper->fb) @@ -1535,7 +1551,8 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) if (!bound && crtcs_bound) { fb_helper->delayed_hotplug = true; - return false; + mutex_unlock(&dev->mode_config.mutex); + return 0; } DRM_DEBUG_KMS("\n"); @@ -1546,6 +1563,7 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height); drm_setup_crtcs(fb_helper); + mutex_unlock(&dev->mode_config.mutex); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); } diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 5d00b0fc0d9..959186cbf32 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -431,7 +431,7 @@ EXPORT_SYMBOL(drm_mm_search_free_in_range); void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) { list_replace(&old->node_list, &new->node_list); - list_replace(&old->node_list, &new->hole_stack); + list_replace(&old->hole_stack, &new->hole_stack); new->hole_follows = old->hole_follows; new->mm = old->mm; new->start = old->start; @@ -699,8 +699,8 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) entry->size); total_used += entry->size; if (entry->hole_follows) { - hole_start = drm_mm_hole_node_start(&mm->head_node); - hole_end = drm_mm_hole_node_end(&mm->head_node); + hole_start = drm_mm_hole_node_start(entry); + hole_end = drm_mm_hole_node_end(entry); hole_size = hole_end - hole_start; seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", hole_start, hole_end, hole_size); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c34a8dd31d0..32d1b3e829c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -49,7 +49,7 @@ module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); unsigned int i915_powersave = 1; module_param_named(powersave, i915_powersave, int, 0600); -unsigned int i915_semaphores = 1; +unsigned int i915_semaphores = 0; module_param_named(semaphores, i915_semaphores, int, 0600); unsigned int i915_enable_rc6 = 0; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e522c702b04..2166ee071dd 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5605,9 +5605,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) intel_clock_t clock; if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) - fp = FP0(pipe); + fp = I915_READ(FP0(pipe)); else - fp = FP1(pipe); + fp = I915_READ(FP1(pipe)); clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; if (IS_PINEVIEW(dev)) { @@ -6579,8 +6579,10 @@ intel_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOENT); intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); - if (!intel_fb) + if (!intel_fb) { + drm_gem_object_unreference_unlocked(&obj->base); return ERR_PTR(-ENOMEM); + } ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); if (ret) { diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index cb8578b7e44..a4d80314e7f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1470,7 +1470,8 @@ intel_dp_link_down(struct intel_dp *intel_dp) if (!HAS_PCH_CPT(dev) && I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { - struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); + struct drm_crtc *crtc = intel_dp->base.base.crtc; + /* Hardware workaround: leaving our transcoder select * set to transcoder B while it's off will prevent the * corresponding HDMI output on transcoder A. @@ -1485,7 +1486,19 @@ intel_dp_link_down(struct intel_dp *intel_dp) /* Changes to enable or select take place the vblank * after being written. */ - intel_wait_for_vblank(dev, intel_crtc->pipe); + if (crtc == NULL) { + /* We can arrive here never having been attached + * to a CRTC, for instance, due to inheriting + * random state from the BIOS. + * + * If the pipe is not running, play safe and + * wait for the clocks to stabilise before + * continuing. + */ + POSTING_READ(intel_dp->output_reg); + msleep(50); + } else + intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); } I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index a562bd2648c..67cb076d271 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -539,6 +539,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, struct drm_device *dev = dev_priv->dev; struct drm_connector *connector = dev_priv->int_lvds_connector; + if (dev->switch_power_state != DRM_SWITCH_POWER_ON) + return NOTIFY_OK; + /* * check and update the status of LVDS connector after receiving * the LID nofication event. diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 5045f8b921d..c3e953b0899 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -152,8 +152,6 @@ nouveau_mem_vram_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - nouveau_bo_ref(NULL, &dev_priv->vga_ram); - ttm_bo_device_release(&dev_priv->ttm.bdev); nouveau_ttm_global_release(dev_priv); diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 4bce801bc58..c77111eca6a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -42,7 +42,8 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, nvbe->nr_pages = 0; while (num_pages--) { - if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) { + /* this code path isn't called and is incorrect anyways */ + if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/ nvbe->pages[nvbe->nr_pages] = dma_addrs[nvbe->nr_pages]; nvbe->ttm_alloced[nvbe->nr_pages] = true; diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index a30adec5bea..915fbce8959 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -768,6 +768,11 @@ static void nouveau_card_takedown(struct drm_device *dev) engine->mc.takedown(dev); engine->display.late_takedown(dev); + if (dev_priv->vga_ram) { + nouveau_bo_unpin(dev_priv->vga_ram); + nouveau_bo_ref(NULL, &dev_priv->vga_ram); + } + mutex_lock(&dev->struct_mutex); ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index c20eac3379e..9073e3bfb08 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1780,7 +1780,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev) mc_shared_chmap = RREG32(MC_SHARED_CHMAP); - mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); + if (rdev->flags & RADEON_IS_IGP) + mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG); + else + mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); switch (rdev->config.evergreen.max_tile_pipes) { case 1: diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 94533849927..fc40e0cc345 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -200,6 +200,7 @@ #define BURSTLENGTH_SHIFT 9 #define BURSTLENGTH_MASK 0x00000200 #define CHANSIZE_OVERRIDE (1 << 11) +#define FUS_MC_ARB_RAMCFG 0x2768 #define MC_VM_AGP_TOP 0x2028 #define MC_VM_AGP_BOT 0x202C #define MC_VM_AGP_BASE 0x2030 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 7aade20f63a..3d8a7634bbe 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -674,7 +674,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); - cgts_tcc_disable = RREG32(CGTS_TCC_DISABLE); + cgts_tcc_disable = 0xff000000; gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); @@ -871,7 +871,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) smx_dc_ctl0 = RREG32(SMX_DC_CTL0); smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); - smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); + smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); WREG32(SMX_DC_CTL0, smx_dc_ctl0); WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); @@ -887,20 +887,20 @@ static void cayman_gpu_init(struct radeon_device *rdev) WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); - WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | - POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | - SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); + WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | + POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | + SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); - WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | - SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | - SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); + WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | + SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | + SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); WREG32(VGT_NUM_INSTANCES, 1); WREG32(CP_PERFMON_CNTL, 0); - WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | + WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | FETCH_FIFO_HIWATER(0x4) | DONE_FIFO_HIWATER(0xe0) | ALU_UPDATE_FIFO_HIWATER(0x8))); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index f116516bfef..90dfb2b8cf0 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -431,7 +431,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } } - /* Acer laptop (Acer TravelMate 5730G) has an HDMI port + /* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port * on the laptop and a DVI port on the docking station and * both share the same encoder, hpd pin, and ddc line. * So while the bios table is technically correct, @@ -440,7 +440,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, * with different crtcs which isn't possible on the hardware * side and leaves no crtcs for LVDS or VGA. */ - if ((dev->pdev->device == 0x95c4) && + if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) && (dev->pdev->subsystem_vendor == 0x1025) && (dev->pdev->subsystem_device == 0x013c)) { if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && @@ -1574,9 +1574,17 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; bool bad_record = false; - u8 *record = (u8 *)(mode_info->atom_context->bios + - data_offset + - le16_to_cpu(lvds_info->info.usModePatchTableOffset)); + u8 *record; + + if ((frev == 1) && (crev < 2)) + /* absolute */ + record = (u8 *)(mode_info->atom_context->bios + + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); + else + /* relative */ + record = (u8 *)(mode_info->atom_context->bios + + data_offset + + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); while (*record != ATOM_RECORD_END_TYPE) { switch (*record) { case LCD_MODE_PATCH_RECORD_MODE_TYPE: diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index ed5dfe58f29..9d95792bea3 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -15,6 +15,9 @@ #define ATPX_VERSION 0 #define ATPX_GPU_PWR 2 #define ATPX_MUX_SELECT 3 +#define ATPX_I2C_MUX_SELECT 4 +#define ATPX_SWITCH_START 5 +#define ATPX_SWITCH_END 6 #define ATPX_INTEGRATED 0 #define ATPX_DISCRETE 1 @@ -149,13 +152,35 @@ static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id) return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id); } +static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id) +{ + return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id); +} + +static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id) +{ + return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id); +} + +static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id) +{ + return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id); +} static int radeon_atpx_switchto(enum vga_switcheroo_client_id id) { + int gpu_id; + if (id == VGA_SWITCHEROO_IGD) - radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 0); + gpu_id = ATPX_INTEGRATED; else - radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 1); + gpu_id = ATPX_DISCRETE; + + radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id); + radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id); + radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id); + radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id); + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index bdf2fa1189a..3189a7efb2e 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -167,9 +167,6 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, return -EINVAL; } - radeon_crtc->cursor_width = width; - radeon_crtc->cursor_height = height; - obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); if (!obj) { DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id); @@ -180,6 +177,9 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, if (ret) goto fail; + radeon_crtc->cursor_width = width; + radeon_crtc->cursor_height = height; + radeon_lock_cursor(crtc, true); /* XXX only 27 bit offset for legacy cursor */ radeon_set_cursor(crtc, obj, gpu_addr); diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 8a955bbdb60..a533f52fd16 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -181,9 +181,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { - /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32 - * is requested. */ - if (dma_addr[i] != DMA_ERROR_CODE) { + /* we reverted the patch using dma_addr in TTM for now but this + * code stops building on alpha so just comment it out for now */ + if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */ rdev->gart.ttm_alloced[p] = true; rdev->gart.pages_addr[p] = dma_addr[i]; } else { diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman index 6334f8ac120..0aa8e85a945 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman @@ -33,6 +33,7 @@ cayman 0x9400 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 0x00009100 SPI_CONFIG_CNTL 0x0000913C SPI_CONFIG_CNTL_1 +0x00009508 TA_CNTL_AUX 0x00009830 DB_DEBUG 0x00009834 DB_DEBUG2 0x00009838 DB_DEBUG3 diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index 7e1637176e0..0e28cae7ea4 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen @@ -46,6 +46,7 @@ evergreen 0x9400 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 0x00009100 SPI_CONFIG_CNTL 0x0000913C SPI_CONFIG_CNTL_1 +0x00009508 TA_CNTL_AUX 0x00009700 VC_CNTL 0x00009714 VC_ENHANCE 0x00009830 DB_DEBUG diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index e01cacba685..498b284e5ef 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -219,9 +219,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) int i; struct vga_switcheroo_client *active = NULL; - if (new_client->active == true) - return 0; - for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { if (vgasr_priv.clients[i].active == true) { active = &vgasr_priv.clients[i]; @@ -372,6 +369,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, goto out; } + if (client->active == true) + goto out; + /* okay we want a switch - test if devices are willing to switch */ can_switch = true; for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 75b984c519a..107397a606b 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -560,15 +560,18 @@ static struct i2c_adapter mpc_ops = { .timeout = HZ, }; +static const struct of_device_id mpc_i2c_of_match[]; static int __devinit fsl_i2c_probe(struct platform_device *op) { + const struct of_device_id *match; struct mpc_i2c *i2c; const u32 *prop; u32 clock = MPC_I2C_CLOCK_LEGACY; int result = 0; int plen; - if (!op->dev.of_match) + match = of_match_device(mpc_i2c_of_match, &op->dev); + if (!match) return -EINVAL; i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); @@ -605,8 +608,8 @@ static int __devinit fsl_i2c_probe(struct platform_device *op) clock = *prop; } - if (op->dev.of_match->data) { - struct mpc_i2c_data *data = op->dev.of_match->data; + if (match->data) { + struct mpc_i2c_data *data = match->data; data->setup(op->dev.of_node, i2c, clock, data->prescaler); } else { /* Backwards compatibility */ diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index a97e3fec814..04be9f82e14 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -65,7 +65,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data) jiffies, expires); timer->expires = jiffies + expires; - timer->data = (unsigned long)&alg_data; + timer->data = (unsigned long)alg_data; add_timer(timer); } diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index c24946f5125..1de1c19dad3 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -281,17 +281,24 @@ struct ser_req { u8 command; u8 ref_off; u16 scratch; - __be16 sample; struct spi_message msg; struct spi_transfer xfer[6]; + /* + * DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + __be16 sample ____cacheline_aligned; }; struct ads7845_ser_req { u8 command[3]; - u8 pwrdown[3]; - u8 sample[3]; struct spi_message msg; struct spi_transfer xfer[2]; + /* + * DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + u8 sample[3] ____cacheline_aligned; }; static int ads7846_read12_ser(struct device *dev, unsigned command) diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c index e7089a1f6cb..b37e6186d0f 100644 --- a/drivers/leds/leds-lm3530.c +++ b/drivers/leds/leds-lm3530.c @@ -349,6 +349,7 @@ static const struct i2c_device_id lm3530_id[] = { {LM3530_NAME, 0}, {} }; +MODULE_DEVICE_TABLE(i2c, lm3530_id); static struct i2c_driver lm3530_i2c_driver = { .probe = lm3530_probe, diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c index c820e2f5352..3f442003623 100644 --- a/drivers/media/video/cx88/cx88-input.c +++ b/drivers/media/video/cx88/cx88-input.c @@ -524,7 +524,7 @@ void cx88_ir_irq(struct cx88_core *core) for (todo = 32; todo > 0; todo -= bits) { ev.pulse = samples & 0x80000000 ? false : true; bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples)); - ev.duration = (bits * NSEC_PER_SEC) / (1000 * ir_samplerate); + ev.duration = (bits * (NSEC_PER_SEC / 1000)) / ir_samplerate; ir_raw_event_store_with_filter(ir->dev, &ev); samples <<= bits; } diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 3973f9a9475..ddb4c091ded 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -136,11 +136,50 @@ unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl, } EXPORT_SYMBOL(soc_camera_apply_sensor_flags); +#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ + ((x) >> 24) & 0xff + +static int soc_camera_try_fmt(struct soc_camera_device *icd, + struct v4l2_format *f) +{ + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct v4l2_pix_format *pix = &f->fmt.pix; + int ret; + + dev_dbg(&icd->dev, "TRY_FMT(%c%c%c%c, %ux%u)\n", + pixfmtstr(pix->pixelformat), pix->width, pix->height); + + pix->bytesperline = 0; + pix->sizeimage = 0; + + ret = ici->ops->try_fmt(icd, f); + if (ret < 0) + return ret; + + if (!pix->sizeimage) { + if (!pix->bytesperline) { + const struct soc_camera_format_xlate *xlate; + + xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); + if (!xlate) + return -EINVAL; + + ret = soc_mbus_bytes_per_line(pix->width, + xlate->host_fmt); + if (ret > 0) + pix->bytesperline = ret; + } + if (pix->bytesperline) + pix->sizeimage = pix->bytesperline * pix->height; + } + + return 0; +} + static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); WARN_ON(priv != file->private_data); @@ -149,7 +188,7 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, return -EINVAL; /* limit format to hardware capabilities */ - return ici->ops->try_fmt(icd, f); + return soc_camera_try_fmt(icd, f); } static int soc_camera_enum_input(struct file *file, void *priv, @@ -362,9 +401,6 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd) icd->user_formats = NULL; } -#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ - ((x) >> 24) & 0xff - /* Called with .vb_lock held, or from the first open(2), see comment there */ static int soc_camera_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) @@ -377,7 +413,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd, pixfmtstr(pix->pixelformat), pix->width, pix->height); /* We always call try_fmt() before set_fmt() or set_crop() */ - ret = ici->ops->try_fmt(icd, f); + ret = soc_camera_try_fmt(icd, f); if (ret < 0) return ret; diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c index 5aeaf876ba9..4aae501f02d 100644 --- a/drivers/media/video/v4l2-device.c +++ b/drivers/media/video/v4l2-device.c @@ -155,8 +155,10 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, sd->v4l2_dev = v4l2_dev; if (sd->internal_ops && sd->internal_ops->registered) { err = sd->internal_ops->registered(sd); - if (err) + if (err) { + module_put(sd->owner); return err; + } } /* This just returns 0 if either of the two args is NULL */ @@ -164,6 +166,7 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, if (err) { if (sd->internal_ops && sd->internal_ops->unregistered) sd->internal_ops->unregistered(sd); + module_put(sd->owner); return err; } diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c index 0b806449067..812729ebf09 100644 --- a/drivers/media/video/v4l2-subdev.c +++ b/drivers/media/video/v4l2-subdev.c @@ -155,25 +155,25 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg) switch (cmd) { case VIDIOC_QUERYCTRL: - return v4l2_subdev_queryctrl(sd, arg); + return v4l2_queryctrl(sd->ctrl_handler, arg); case VIDIOC_QUERYMENU: - return v4l2_subdev_querymenu(sd, arg); + return v4l2_querymenu(sd->ctrl_handler, arg); case VIDIOC_G_CTRL: - return v4l2_subdev_g_ctrl(sd, arg); + return v4l2_g_ctrl(sd->ctrl_handler, arg); case VIDIOC_S_CTRL: - return v4l2_subdev_s_ctrl(sd, arg); + return v4l2_s_ctrl(sd->ctrl_handler, arg); case VIDIOC_G_EXT_CTRLS: - return v4l2_subdev_g_ext_ctrls(sd, arg); + return v4l2_g_ext_ctrls(sd->ctrl_handler, arg); case VIDIOC_S_EXT_CTRLS: - return v4l2_subdev_s_ext_ctrls(sd, arg); + return v4l2_s_ext_ctrls(sd->ctrl_handler, arg); case VIDIOC_TRY_EXT_CTRLS: - return v4l2_subdev_try_ext_ctrls(sd, arg); + return v4l2_try_ext_ctrls(sd->ctrl_handler, arg); case VIDIOC_DQEVENT: if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 643ad52e3ca..4796bbf0ae4 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -1000,7 +1000,6 @@ static struct i2o_block_device *i2o_block_device_alloc(void) gd->major = I2O_MAJOR; gd->queue = queue; gd->fops = &i2o_block_fops; - gd->events = DISK_EVENT_MEDIA_CHANGE; gd->private_data = dev; dev->gd = gd; diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index d4a851c6b5b..0b4d5b23bec 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c @@ -144,7 +144,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) int iter, i; unsigned long flags; - data->chip->irq_ack(irq_data); + data->chip->irq_ack(data); for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { u32 status; diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 2e165117457..3ab9ffa00aa 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -717,14 +717,14 @@ static int usbhs_enable(struct device *dev) gpio_request(pdata->ehci_data->reset_gpio_port[0], "USB1 PHY reset"); gpio_direction_output - (pdata->ehci_data->reset_gpio_port[0], 1); + (pdata->ehci_data->reset_gpio_port[0], 0); } if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) { gpio_request(pdata->ehci_data->reset_gpio_port[1], "USB2 PHY reset"); gpio_direction_output - (pdata->ehci_data->reset_gpio_port[1], 1); + (pdata->ehci_data->reset_gpio_port[1], 0); } /* Hold the PHY in RESET for enough time till DIR is high */ @@ -904,11 +904,11 @@ static int usbhs_enable(struct device *dev) if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) gpio_set_value - (pdata->ehci_data->reset_gpio_port[0], 0); + (pdata->ehci_data->reset_gpio_port[0], 1); if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) gpio_set_value - (pdata->ehci_data->reset_gpio_port[1], 0); + (pdata->ehci_data->reset_gpio_port[1], 1); } end_count: diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c index 16422de0823..2c0d4d16491 100644 --- a/drivers/mfd/twl4030-power.c +++ b/drivers/mfd/twl4030-power.c @@ -447,12 +447,13 @@ static int __init load_twl4030_script(struct twl4030_script *tscript, if (err) goto out; } - if (tscript->flags & TWL4030_SLEEP_SCRIPT) + if (tscript->flags & TWL4030_SLEEP_SCRIPT) { if (order) pr_warning("TWL4030: Bad order of scripts (sleep "\ "script before wakeup) Leads to boot"\ "failure on some boards\n"); err = twl4030_config_sleep_sequence(address); + } out: return err; } diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 2b200c1cfbb..461e6a17fb9 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -94,7 +94,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host) spin_unlock_irqrestore(&host->clk_lock, flags); return; } - mmc_claim_host(host); + mutex_lock(&host->clk_gate_mutex); spin_lock_irqsave(&host->clk_lock, flags); if (!host->clk_requests) { spin_unlock_irqrestore(&host->clk_lock, flags); @@ -104,7 +104,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host) pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); } spin_unlock_irqrestore(&host->clk_lock, flags); - mmc_release_host(host); + mutex_unlock(&host->clk_gate_mutex); } /* @@ -130,7 +130,7 @@ void mmc_host_clk_ungate(struct mmc_host *host) { unsigned long flags; - mmc_claim_host(host); + mutex_lock(&host->clk_gate_mutex); spin_lock_irqsave(&host->clk_lock, flags); if (host->clk_gated) { spin_unlock_irqrestore(&host->clk_lock, flags); @@ -140,7 +140,7 @@ void mmc_host_clk_ungate(struct mmc_host *host) } host->clk_requests++; spin_unlock_irqrestore(&host->clk_lock, flags); - mmc_release_host(host); + mutex_unlock(&host->clk_gate_mutex); } /** @@ -215,6 +215,7 @@ static inline void mmc_host_clk_init(struct mmc_host *host) host->clk_gated = false; INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); spin_lock_init(&host->clk_lock); + mutex_init(&host->clk_gate_mutex); } /** diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c index f9b611fc773..60e4186a434 100644 --- a/drivers/mmc/host/sdhci-of-core.c +++ b/drivers/mmc/host/sdhci-of-core.c @@ -124,8 +124,10 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np) #endif } +static const struct of_device_id sdhci_of_match[]; static int __devinit sdhci_of_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct sdhci_of_data *sdhci_of_data; struct sdhci_host *host; @@ -134,9 +136,10 @@ static int __devinit sdhci_of_probe(struct platform_device *ofdev) int size; int ret; - if (!ofdev->dev.of_match) + match = of_match_device(sdhci_of_match, &ofdev->dev); + if (!match) return -EINVAL; - sdhci_of_data = ofdev->dev.of_match->data; + sdhci_of_data = match->data; if (!of_device_is_available(np)) return -ENODEV; diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index bd483f0c57e..c1d33464aee 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -214,11 +214,13 @@ static void __devinit of_free_probes(const char **probes) } #endif +static struct of_device_id of_flash_match[]; static int __devinit of_flash_probe(struct platform_device *dev) { #ifdef CONFIG_MTD_PARTITIONS const char **part_probe_types; #endif + const struct of_device_id *match; struct device_node *dp = dev->dev.of_node; struct resource res; struct of_flash *info; @@ -232,9 +234,10 @@ static int __devinit of_flash_probe(struct platform_device *dev) struct mtd_info **mtd_list = NULL; resource_size_t res_size; - if (!dev->dev.of_match) + match = of_match_device(of_flash_match, &dev->dev); + if (!match) return -EINVAL; - probe_type = dev->dev.of_match->data; + probe_type = match->data; reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index dc280bc8eba..6c884ef1b06 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2536,7 +2536,7 @@ config S6GMAC source "drivers/net/stmmac/Kconfig" config PCH_GBE - tristate "PCH Gigabit Ethernet" + tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" depends on PCI select MII ---help--- @@ -2548,6 +2548,12 @@ config PCH_GBE to Gigabit Ethernet. This driver enables Gigabit Ethernet function. + This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ + Output Hub), ML7223. + ML7223 IOH is for MP(Media Phone) use. + ML7223 is companion chip for Intel Atom E6xx series. + ML7223 is completely compatible for Intel EG20T PCH. + endif # NETDEV_1000 # diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 01b604ad155..e5a7375685a 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -144,7 +144,7 @@ obj-$(CONFIG_NE3210) += ne3210.o 8390.o obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o obj-$(CONFIG_B44) += b44.o obj-$(CONFIG_FORCEDETH) += forcedeth.o -obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o +obj-$(CONFIG_NE_H8300) += ne-h8300.o obj-$(CONFIG_AX88796) += ax88796.o obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o obj-$(CONFIG_FTMAC100) += ftmac100.o @@ -219,7 +219,7 @@ obj-$(CONFIG_SC92031) += sc92031.o obj-$(CONFIG_LP486E) += lp486e.o obj-$(CONFIG_ETH16I) += eth16i.o -obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o +obj-$(CONFIG_ZORRO8390) += zorro8390.o obj-$(CONFIG_HPLANCE) += hplance.o 7990.o obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o obj-$(CONFIG_EQUALIZER) += eql.o @@ -231,7 +231,7 @@ obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o obj-$(CONFIG_DECLANCE) += declance.o obj-$(CONFIG_ATARILANCE) += atarilance.o obj-$(CONFIG_A2065) += a2065.o -obj-$(CONFIG_HYDRA) += hydra.o 8390.o +obj-$(CONFIG_HYDRA) += hydra.o obj-$(CONFIG_ARIADNE) += ariadne.o obj-$(CONFIG_CS89x0) += cs89x0.o obj-$(CONFIG_MACSONIC) += macsonic.o diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c index 4af235d41fd..fbfb5b47c50 100644 --- a/drivers/net/arm/etherh.c +++ b/drivers/net/arm/etherh.c @@ -527,7 +527,7 @@ static void __init etherh_banner(void) * Read the ethernet address string from the on board rom. * This is an ascii string... */ -static int __init etherh_addr(char *addr, struct expansion_card *ec) +static int __devinit etherh_addr(char *addr, struct expansion_card *ec) { struct in_chunk_dir cd; char *s; @@ -655,7 +655,7 @@ static const struct net_device_ops etherh_netdev_ops = { static u32 etherh_regoffsets[16]; static u32 etherm_regoffsets[16]; -static int __init +static int __devinit etherh_probe(struct expansion_card *ec, const struct ecard_id *id) { const struct etherh_data *data = id->data; diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index 66823eded7a..2353eca3259 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h @@ -213,7 +213,7 @@ struct be_rx_stats { struct be_rx_compl_info { u32 rss_hash; - u16 vid; + u16 vlan_tag; u16 pkt_size; u16 rxq_idx; u16 mac_id; diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 1e2d825bb94..9dc9394fd4c 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -132,7 +132,7 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, struct be_async_event_grp5_pvid_state *evt) { if (evt->enabled) - adapter->pvid = evt->tag; + adapter->pvid = le16_to_cpu(evt->tag); else adapter->pvid = 0; } diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 02a0443d182..9187fb4e08f 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -1018,7 +1018,8 @@ static void be_rx_compl_process(struct be_adapter *adapter, kfree_skb(skb); return; } - vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid); + vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, + rxcp->vlan_tag); } else { netif_receive_skb(skb); } @@ -1076,7 +1077,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, if (likely(!rxcp->vlanf)) napi_gro_frags(&eq_obj->napi); else - vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid); + vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, + rxcp->vlan_tag); } static void be_parse_rx_compl_v1(struct be_adapter *adapter, @@ -1102,7 +1104,8 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter, rxcp->pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl); - rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl); + rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, + compl); } static void be_parse_rx_compl_v0(struct be_adapter *adapter, @@ -1128,7 +1131,8 @@ static void be_parse_rx_compl_v0(struct be_adapter *adapter, rxcp->pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl); - rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl); + rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, + compl); } static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) @@ -1155,9 +1159,11 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) rxcp->vlanf = 0; if (!lancer_chip(adapter)) - rxcp->vid = swab16(rxcp->vid); + rxcp->vlan_tag = swab16(rxcp->vlan_tag); - if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid]) + if (((adapter->pvid & VLAN_VID_MASK) == + (rxcp->vlan_tag & VLAN_VID_MASK)) && + !adapter->vlan_tag[rxcp->vlan_tag]) rxcp->vlanf = 0; /* As the compl has been parsed, reset it; we wont touch it again */ diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index b28baff7086..01b8a6af275 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h @@ -39,7 +39,7 @@ typedef struct mac_addr { u8 mac_addr_value[ETH_ALEN]; -} mac_addr_t; +} __packed mac_addr_t; enum { BOND_AD_STABLE = 0, @@ -134,12 +134,12 @@ typedef struct lacpdu { u8 tlv_type_terminator; // = terminator u8 terminator_length; // = 0 u8 reserved_50[50]; // = 0 -} lacpdu_t; +} __packed lacpdu_t; typedef struct lacpdu_header { struct ethhdr hdr; struct lacpdu lacpdu; -} lacpdu_header_t; +} __packed lacpdu_header_t; // Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) typedef struct bond_marker { @@ -155,12 +155,12 @@ typedef struct bond_marker { u8 tlv_type_terminator; // = 0x00 u8 terminator_length; // = 0x00 u8 reserved_90[90]; // = 0 -} bond_marker_t; +} __packed bond_marker_t; typedef struct bond_marker_header { struct ethhdr hdr; struct bond_marker marker; -} bond_marker_header_t; +} __packed bond_marker_header_t; #pragma pack() diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index bd1d811c204..5fedc337556 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -247,8 +247,10 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, } #endif /* CONFIG_PPC_MPC512x */ +static struct of_device_id mpc5xxx_can_table[]; static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct mpc5xxx_can_data *data; struct device_node *np = ofdev->dev.of_node; struct net_device *dev; @@ -258,9 +260,10 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) int irq, mscan_clksrc = 0; int err = -ENOMEM; - if (!ofdev->dev.of_match) + match = of_match_device(mpc5xxx_can_table, &ofdev->dev); + if (!match) return -EINVAL; - data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data; + data = match->data; base = of_iomap(np, 0); if (!base) { diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index a358ea9445a..f501bba1fc6 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -346,10 +346,10 @@ static void sja1000_rx(struct net_device *dev) | (priv->read_reg(priv, REG_ID2) >> 5); } + cf->can_dlc = get_can_dlc(fi & 0x0F); if (fi & FI_RTR) { id |= CAN_RTR_FLAG; } else { - cf->can_dlc = get_can_dlc(fi & 0x0F); for (i = 0; i < cf->can_dlc; i++) cf->data[i] = priv->read_reg(priv, dreg++); } diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index b423965a78d..1b49df6b247 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -583,7 +583,9 @@ static int slcan_open(struct tty_struct *tty) /* Done. We have linked the TTY line to a channel. */ rtnl_unlock(); tty->receive_room = 65536; /* We don't flow control */ - return sl->dev->base_addr; + + /* TTY layer expects 0 on success */ + return 0; err_free_chan: sl->tty = NULL; diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c index 3e2e734fecb..f3bbdcef338 100644 --- a/drivers/net/ehea/ehea_ethtool.c +++ b/drivers/net/ehea/ehea_ethtool.c @@ -55,15 +55,20 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->duplex = -1; } - cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full - | SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half - | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half - | SUPPORTED_Autoneg | SUPPORTED_FIBRE); - - cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg - | ADVERTISED_FIBRE); + if (cmd->speed == SPEED_10000) { + cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + cmd->port = PORT_FIBRE; + } else { + cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full + | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full + | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg + | SUPPORTED_TP); + cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg + | ADVERTISED_TP); + cmd->port = PORT_TP; + } - cmd->port = PORT_FIBRE; cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE; return 0; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 53c0f04b1b2..cf79cf759e1 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -2688,9 +2688,6 @@ static int ehea_open(struct net_device *dev) netif_start_queue(dev); } - init_waitqueue_head(&port->swqe_avail_wq); - init_waitqueue_head(&port->restart_wq); - mutex_unlock(&port->port_lock); return ret; @@ -3276,6 +3273,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, INIT_WORK(&port->reset_task, ehea_reset_port); + init_waitqueue_head(&port->swqe_avail_wq); + init_waitqueue_head(&port->restart_wq); + ret = register_netdev(dev); if (ret) { pr_err("register_netdev failed. ret=%d\n", ret); diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 24cb953900d..5131e61c358 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -998,8 +998,10 @@ static const struct net_device_ops fs_enet_netdev_ops = { #endif }; +static struct of_device_id fs_enet_match[]; static int __devinit fs_enet_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct net_device *ndev; struct fs_enet_private *fep; struct fs_platform_info *fpi; @@ -1007,14 +1009,15 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev) const u8 *mac_addr; int privsize, len, ret = -ENODEV; - if (!ofdev->dev.of_match) + match = of_match_device(fs_enet_match, &ofdev->dev); + if (!match) return -EINVAL; fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); if (!fpi) return -ENOMEM; - if (!IS_FEC(ofdev->dev.of_match)) { + if (!IS_FEC(match)) { data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); if (!data || len != 4) goto out_free_fpi; @@ -1049,7 +1052,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev) fep->dev = &ofdev->dev; fep->ndev = ndev; fep->fpi = fpi; - fep->ops = ofdev->dev.of_match->data; + fep->ops = match->data; ret = fep->ops->setup_data(ndev); if (ret) diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c index 7e840d373ab..6a2e150e75b 100644 --- a/drivers/net/fs_enet/mii-fec.c +++ b/drivers/net/fs_enet/mii-fec.c @@ -101,17 +101,20 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus) return 0; } +static struct of_device_id fs_enet_mdio_fec_match[]; static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct resource res; struct mii_bus *new_bus; struct fec_info *fec; int (*get_bus_freq)(struct device_node *); int ret = -ENOMEM, clock, speed; - if (!ofdev->dev.of_match) + match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev); + if (!match) return -EINVAL; - get_bus_freq = ofdev->dev.of_match->data; + get_bus_freq = match->data; new_bus = mdiobus_alloc(); if (!new_bus) diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c index c5ef62ceb84..1cd481c0420 100644 --- a/drivers/net/hydra.c +++ b/drivers/net/hydra.c @@ -98,15 +98,15 @@ static const struct net_device_ops hydra_netdev_ops = { .ndo_open = hydra_open, .ndo_stop = hydra_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, + .ndo_poll_controller = __ei_poll, #endif }; @@ -125,7 +125,7 @@ static int __devinit hydra_init(struct zorro_dev *z) 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, }; - dev = alloc_ei_netdev(); + dev = ____alloc_ei_netdev(0); if (!dev) return -ENOMEM; diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c index 30be8c634eb..7298a34bc79 100644 --- a/drivers/net/ne-h8300.c +++ b/drivers/net/ne-h8300.c @@ -167,7 +167,7 @@ static void cleanup_card(struct net_device *dev) #ifndef MODULE struct net_device * __init ne_probe(int unit) { - struct net_device *dev = alloc_ei_netdev(); + struct net_device *dev = ____alloc_ei_netdev(0); int err; if (!dev) @@ -197,15 +197,15 @@ static const struct net_device_ops ne_netdev_ops = { .ndo_open = ne_open, .ndo_stop = ne_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, + .ndo_poll_controller = __ei_poll, #endif }; @@ -637,7 +637,7 @@ int init_module(void) int err; for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - struct net_device *dev = alloc_ei_netdev(); + struct net_device *dev = ____alloc_ei_netdev(0); if (!dev) break; if (io[this_dev]) { diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 2ef2f9cdefa..56d049a472d 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c @@ -34,6 +34,10 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_COPYBREAK_DEFAULT 256 #define PCH_GBE_PCI_BAR 1 +/* Macros for ML7223 */ +#define PCI_VENDOR_ID_ROHM 0x10db +#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 + #define PCH_GBE_TX_WEIGHT 64 #define PCH_GBE_RX_WEIGHT 64 #define PCH_GBE_RX_BUFFER_WRITE 16 @@ -43,8 +47,7 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \ PCH_GBE_CHIP_TYPE_INTERNAL | \ - PCH_GBE_RGMII_MODE_RGMII | \ - PCH_GBE_CRS_SEL \ + PCH_GBE_RGMII_MODE_RGMII \ ) /* Ethertype field values */ @@ -1494,12 +1497,11 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, /* Write meta date of skb */ skb_put(skb, length); skb->protocol = eth_type_trans(skb, netdev); - if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) == - PCH_GBE_RXD_ACC_STAT_TCPIPOK) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else { + if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) skb->ip_summed = CHECKSUM_NONE; - } + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + napi_gro_receive(&adapter->napi, skb); (*work_done)++; pr_debug("Receive skb->ip_summed: %d length: %d\n", @@ -2420,6 +2422,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = (0xFFFF00) }, + {.vendor = PCI_VENDOR_ID_ROHM, + .device = PCI_DEVICE_ID_ROHM_ML7223_GBE, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), + .class_mask = (0xFFFF00) + }, /* required last entry */ {0} }; diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c index d98479030ef..3dd45ed61f0 100644 --- a/drivers/net/sfc/mcdi.c +++ b/drivers/net/sfc/mcdi.c @@ -50,6 +50,20 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) return &nic_data->mcdi; } +static inline void +efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) +{ + struct siena_nic_data *nic_data = efx->nic_data; + value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); +} + +static inline void +efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) +{ + struct siena_nic_data *nic_data = efx->nic_data; + __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); +} + void efx_mcdi_init(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; @@ -70,8 +84,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); - unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); + unsigned pdu = MCDI_PDU(efx); + unsigned doorbell = MCDI_DOORBELL(efx); unsigned int i; efx_dword_t hdr; u32 xflags, seqno; @@ -92,30 +106,28 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, MCDI_HEADER_SEQ, seqno, MCDI_HEADER_XFLAGS, xflags); - efx_writed(efx, &hdr, pdu); + efx_mcdi_writed(efx, &hdr, pdu); - for (i = 0; i < inlen; i += 4) { - _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); - /* use wmb() within loop to inhibit write combining */ - wmb(); - } + for (i = 0; i < inlen; i += 4) + efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), + pdu + 4 + i); /* ring the doorbell with a distinctive value */ - _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); - wmb(); + EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); + efx_mcdi_writed(efx, &hdr, doorbell); } static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + unsigned int pdu = MCDI_PDU(efx); int i; BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); BUG_ON(outlen & 3 || outlen >= 0x100); for (i = 0; i < outlen; i += 4) - *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); + efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); } static int efx_mcdi_poll(struct efx_nic *efx) @@ -123,7 +135,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned int time, finish; unsigned int respseq, respcmd, error; - unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + unsigned int pdu = MCDI_PDU(efx); unsigned int rc, spins; efx_dword_t reg; @@ -149,8 +161,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) time = get_seconds(); - rmb(); - efx_readd(efx, ®, pdu); + efx_mcdi_readd(efx, ®, pdu); /* All 1's indicates that shared memory is in reset (and is * not a valid header). Wait for it to come out reset before @@ -177,7 +188,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) respseq, mcdi->seqno); rc = EIO; } else if (error) { - efx_readd(efx, ®, pdu + 4); + efx_mcdi_readd(efx, ®, pdu + 4); switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { #define TRANSLATE_ERROR(name) \ case MC_CMD_ERR_ ## name: \ @@ -211,21 +222,21 @@ out: /* Test and clear MC-rebooted flag for this port/function */ int efx_mcdi_poll_reboot(struct efx_nic *efx) { - unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); + unsigned int addr = MCDI_REBOOT_FLAG(efx); efx_dword_t reg; uint32_t value; if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) return false; - efx_readd(efx, ®, addr); + efx_mcdi_readd(efx, ®, addr); value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); if (value == 0) return 0; EFX_ZERO_DWORD(reg); - efx_writed(efx, ®, addr); + efx_mcdi_writed(efx, ®, addr); if (value == MC_STATUS_DWORD_ASSERT) return -EINTR; diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index 10f1cb79c14..9b29a8d7c44 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c @@ -1937,6 +1937,13 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) size = min_t(size_t, table->step, 16); + if (table->offset >= efx->type->mem_map_size) { + /* No longer mapped; return dummy data */ + memcpy(buf, "\xde\xc0\xad\xde", 4); + buf += table->rows * size; + continue; + } + for (i = 0; i < table->rows; i++) { switch (table->step) { case 4: /* 32-bit register or SRAM */ diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index a42db6e35be..d91701abd33 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h @@ -143,10 +143,12 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx) /** * struct siena_nic_data - Siena NIC state * @mcdi: Management-Controller-to-Driver Interface + * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable. * @wol_filter_id: Wake-on-LAN packet filter id */ struct siena_nic_data { struct efx_mcdi_iface mcdi; + void __iomem *mcdi_smem; int wol_filter_id; }; diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index e4dd8986b1f..837869b71db 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c @@ -220,12 +220,26 @@ static int siena_probe_nic(struct efx_nic *efx) efx_reado(efx, ®, FR_AZ_CS_DEBUG); efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; + /* Initialise MCDI */ + nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + + FR_CZ_MC_TREG_SMEM, + FR_CZ_MC_TREG_SMEM_STEP * + FR_CZ_MC_TREG_SMEM_ROWS); + if (!nic_data->mcdi_smem) { + netif_err(efx, probe, efx->net_dev, + "could not map MCDI at %llx+%x\n", + (unsigned long long)efx->membase_phys + + FR_CZ_MC_TREG_SMEM, + FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); + rc = -ENOMEM; + goto fail1; + } efx_mcdi_init(efx); /* Recover from a failed assertion before probing */ rc = efx_mcdi_handle_assertion(efx); if (rc) - goto fail1; + goto fail2; /* Let the BMC know that the driver is now in charge of link and * filter settings. We must do this before we reset the NIC */ @@ -280,6 +294,7 @@ fail4: fail3: efx_mcdi_drv_attach(efx, false, NULL); fail2: + iounmap(nic_data->mcdi_smem); fail1: kfree(efx->nic_data); return rc; @@ -359,6 +374,8 @@ static int siena_init_nic(struct efx_nic *efx) static void siena_remove_nic(struct efx_nic *efx) { + struct siena_nic_data *nic_data = efx->nic_data; + efx_nic_free_buffer(efx, &efx->irq_status); siena_reset_hw(efx, RESET_TYPE_ALL); @@ -368,7 +385,8 @@ static void siena_remove_nic(struct efx_nic *efx) efx_mcdi_drv_attach(efx, false, NULL); /* Tear down the private nic state */ - kfree(efx->nic_data); + iounmap(nic_data->mcdi_smem); + kfree(nic_data); efx->nic_data = NULL; } @@ -606,8 +624,7 @@ struct efx_nic_type siena_a0_nic_type = { .default_mac_ops = &efx_mcdi_mac_operations, .revision = EFX_REV_SIENA_A0, - .mem_map_size = (FR_CZ_MC_TREG_SMEM + - FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), + .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, .buf_tbl_base = FR_BZ_BUF_FULL_TBL, diff --git a/drivers/net/slip.c b/drivers/net/slip.c index 86cbb9ea2f2..8ec1a9a0bb9 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c @@ -853,7 +853,9 @@ static int slip_open(struct tty_struct *tty) /* Done. We have linked the TTY line to a channel. */ rtnl_unlock(); tty->receive_room = 65536; /* We don't flow control */ - return sl->dev->base_addr; + + /* TTY layer expects 0 on success */ + return 0; err_free_bufs: sl_free_bufs(sl); diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index eb4f59fb01e..bff2f7999ff 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -3237,15 +3237,18 @@ static void happy_meal_pci_exit(void) #endif #ifdef CONFIG_SBUS +static const struct of_device_id hme_sbus_match[]; static int __devinit hme_sbus_probe(struct platform_device *op) { + const struct of_device_id *match; struct device_node *dp = op->dev.of_node; const char *model = of_get_property(dp, "model", NULL); int is_qfe; - if (!op->dev.of_match) + match = of_match_device(hme_sbus_match, &op->dev); + if (!match) return -EINVAL; - is_qfe = (op->dev.of_match->data != NULL); + is_qfe = (match->data != NULL); if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) is_qfe = 1; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index a301479ecc6..c924ea2bce0 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -567,7 +567,7 @@ static const struct usb_device_id products [] = { { USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, + .driver_info = (unsigned long)&wwan_info, }, /* diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 7d42f9a2c06..81126ff85e0 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -65,6 +65,7 @@ #define IPHETH_USBINTF_PROTO 1 #define IPHETH_BUF_SIZE 1516 +#define IPHETH_IP_ALIGN 2 /* padding at front of URB */ #define IPHETH_TX_TIMEOUT (5 * HZ) #define IPHETH_INTFNUM 2 @@ -202,18 +203,21 @@ static void ipheth_rcvbulk_callback(struct urb *urb) return; } - len = urb->actual_length; - buf = urb->transfer_buffer; + if (urb->actual_length <= IPHETH_IP_ALIGN) { + dev->net->stats.rx_length_errors++; + return; + } + len = urb->actual_length - IPHETH_IP_ALIGN; + buf = urb->transfer_buffer + IPHETH_IP_ALIGN; - skb = dev_alloc_skb(NET_IP_ALIGN + len); + skb = dev_alloc_skb(len); if (!skb) { err("%s: dev_alloc_skb: -ENOMEM", __func__); dev->net->stats.rx_dropped++; return; } - skb_reserve(skb, NET_IP_ALIGN); - memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN); + memcpy(skb_put(skb, len), buf, len); skb->dev = dev->net; skb->protocol = eth_type_trans(skb, dev->net); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 009bba3d753..9ab439d144e 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -645,6 +645,7 @@ int usbnet_stop (struct net_device *net) struct driver_info *info = dev->driver_info; int retval; + clear_bit(EVENT_DEV_OPEN, &dev->flags); netif_stop_queue (net); netif_info(dev, ifdown, dev->net, @@ -1524,9 +1525,12 @@ int usbnet_resume (struct usb_interface *intf) smp_mb(); clear_bit(EVENT_DEV_ASLEEP, &dev->flags); spin_unlock_irq(&dev->txq.lock); - if (!(dev->txq.qlen >= TX_QLEN(dev))) - netif_start_queue(dev->net); - tasklet_schedule (&dev->bh); + + if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { + if (!(dev->txq.qlen >= TX_QLEN(dev))) + netif_start_queue(dev->net); + tasklet_schedule (&dev->bh); + } } return 0; } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 0d47c3a0530..c16ed961153 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -178,6 +178,7 @@ static void vmxnet3_process_events(struct vmxnet3_adapter *adapter) { int i; + unsigned long flags; u32 events = le32_to_cpu(adapter->shared->ecr); if (!events) return; @@ -190,10 +191,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter) /* Check if there is an error on xmit/recv queues */ if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { - spin_lock(&adapter->cmd_lock); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS); - spin_unlock(&adapter->cmd_lock); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tqd_start[i].status.stopped) @@ -2733,13 +2734,14 @@ static void vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) { u32 cfg; + unsigned long flags; /* intr settings */ - spin_lock(&adapter->cmd_lock); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_CONF_INTR); cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); - spin_unlock(&adapter->cmd_lock); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); adapter->intr.type = cfg & 0x3; adapter->intr.mask_mode = (cfg >> 2) & 0x3; diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 51f2ef142a5..976467253d2 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -311,6 +311,9 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) /* toggle the LRO feature*/ netdev->features ^= NETIF_F_LRO; + /* Update private LRO flag */ + adapter->lro = lro_requested; + /* update harware LRO capability accordingly */ if (lro_requested) adapter->shared->devRead.misc.uptFeatures |= diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 17d04ff8d67..1482fa65083 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -2141,6 +2141,8 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) static void ath9k_flush(struct ieee80211_hw *hw, bool drop) { struct ath_softc *sc = hw->priv; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); int timeout = 200; /* ms */ int i, j; @@ -2149,6 +2151,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) cancel_delayed_work_sync(&sc->tx_complete_work); + if (sc->sc_flags & SC_OP_INVALID) { + ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); + mutex_unlock(&sc->mutex); + return; + } + if (drop) timeout = 1; diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c index c1511b14b23..42db0fc8b92 100644 --- a/drivers/net/wireless/iwlegacy/iwl-core.c +++ b/drivers/net/wireless/iwlegacy/iwl-core.c @@ -2155,6 +2155,13 @@ int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed) goto set_ch_out; } + if (priv->iw_mode == NL80211_IFTYPE_ADHOC && + !iwl_legacy_is_channel_ibss(ch_info)) { + IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n"); + ret = -EINVAL; + goto set_ch_out; + } + spin_lock_irqsave(&priv->lock, flags); for_each_context(priv, ctx) { diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h index 9ee849d669f..f43ac1eb901 100644 --- a/drivers/net/wireless/iwlegacy/iwl-dev.h +++ b/drivers/net/wireless/iwlegacy/iwl-dev.h @@ -1411,6 +1411,12 @@ iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch) return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; } +static inline int +iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch) +{ + return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0; +} + static inline void __iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page) { diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 7e8a658b767..f3ac62431a3 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c @@ -1339,8 +1339,8 @@ int lbs_execute_next_command(struct lbs_private *priv) cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { lbs_deb_host( "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); - list_del(&cmdnode->list); spin_lock_irqsave(&priv->driver_lock, flags); + list_del(&cmdnode->list); lbs_complete_command(priv, cmdnode, 0); spin_unlock_irqrestore(&priv->driver_lock, flags); @@ -1352,8 +1352,8 @@ int lbs_execute_next_command(struct lbs_private *priv) (priv->psstate == PS_STATE_PRE_SLEEP)) { lbs_deb_host( "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n"); - list_del(&cmdnode->list); spin_lock_irqsave(&priv->driver_lock, flags); + list_del(&cmdnode->list); lbs_complete_command(priv, cmdnode, 0); spin_unlock_irqrestore(&priv->driver_lock, flags); priv->needtowakeup = 1; @@ -1366,7 +1366,9 @@ int lbs_execute_next_command(struct lbs_private *priv) "EXEC_NEXT_CMD: sending EXIT_PS\n"); } } + spin_lock_irqsave(&priv->driver_lock, flags); list_del(&cmdnode->list); + spin_unlock_irqrestore(&priv->driver_lock, flags); lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n", le16_to_cpu(cmd->command)); lbs_submit_command(priv, cmdnode); diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c index b78a38d9172..8c7c522a056 100644 --- a/drivers/net/zorro8390.c +++ b/drivers/net/zorro8390.c @@ -126,7 +126,7 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, board = z->resource.start; ioaddr = board+cards[i].offset; - dev = alloc_ei_netdev(); + dev = ____alloc_ei_netdev(0); if (!dev) return -ENOMEM; if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) { @@ -146,15 +146,15 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, static const struct net_device_ops zorro8390_netdev_ops = { .ndo_open = zorro8390_open, .ndo_stop = zorro8390_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, + .ndo_poll_controller = __ei_poll, #endif }; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index ebf51ad1b71..a806cb321d2 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -579,7 +579,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, } size0 = calculate_iosize(size, min_size, size1, resource_size(b_res), 4096); - size1 = !add_size? size0: + size1 = (!add_head || (add_head && !add_size)) ? size0 : calculate_iosize(size, min_size+add_size, size1, resource_size(b_res), 4096); if (!size0 && !size1) { @@ -677,7 +677,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, align += aligns[order]; } size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); - size1 = !add_size ? size : + size1 = (!add_head || (add_head && !add_size)) ? size0 : calculate_memsize(size, min_size+add_size, 0, resource_size(b_res), min_align); if (!size0 && !size1) { diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 5f2dd386152..2c1abf63957 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -585,8 +585,9 @@ static bool eeepc_wlan_rfkill_blocked(struct eeepc_laptop *eeepc) return true; } -static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc) +static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle) { + struct pci_dev *port; struct pci_dev *dev; struct pci_bus *bus; bool blocked = eeepc_wlan_rfkill_blocked(eeepc); @@ -599,9 +600,16 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc) mutex_lock(&eeepc->hotplug_lock); if (eeepc->hotplug_slot) { - bus = pci_find_bus(0, 1); + port = acpi_get_pci_dev(handle); + if (!port) { + pr_warning("Unable to find port\n"); + goto out_unlock; + } + + bus = port->subordinate; + if (!bus) { - pr_warning("Unable to find PCI bus 1?\n"); + pr_warning("Unable to find PCI bus?\n"); goto out_unlock; } @@ -609,6 +617,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc) pr_err("Unable to read PCI config space?\n"); goto out_unlock; } + absent = (l == 0xffffffff); if (blocked != absent) { @@ -647,6 +656,17 @@ out_unlock: mutex_unlock(&eeepc->hotplug_lock); } +static void eeepc_rfkill_hotplug_update(struct eeepc_laptop *eeepc, char *node) +{ + acpi_status status = AE_OK; + acpi_handle handle; + + status = acpi_get_handle(NULL, node, &handle); + + if (ACPI_SUCCESS(status)) + eeepc_rfkill_hotplug(eeepc, handle); +} + static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) { struct eeepc_laptop *eeepc = data; @@ -654,7 +674,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) if (event != ACPI_NOTIFY_BUS_CHECK) return; - eeepc_rfkill_hotplug(eeepc); + eeepc_rfkill_hotplug(eeepc, handle); } static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc, @@ -672,6 +692,11 @@ static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc, eeepc); if (ACPI_FAILURE(status)) pr_warning("Failed to register notify on %s\n", node); + /* + * Refresh pci hotplug in case the rfkill state was + * changed during setup. + */ + eeepc_rfkill_hotplug(eeepc, handle); } else return -ENODEV; @@ -693,6 +718,12 @@ static void eeepc_unregister_rfkill_notifier(struct eeepc_laptop *eeepc, if (ACPI_FAILURE(status)) pr_err("Error removing rfkill notify handler %s\n", node); + /* + * Refresh pci hotplug in case the rfkill + * state was changed after + * eeepc_unregister_rfkill_notifier() + */ + eeepc_rfkill_hotplug(eeepc, handle); } } @@ -816,11 +847,7 @@ static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc) rfkill_destroy(eeepc->wlan_rfkill); eeepc->wlan_rfkill = NULL; } - /* - * Refresh pci hotplug in case the rfkill state was changed after - * eeepc_unregister_rfkill_notifier() - */ - eeepc_rfkill_hotplug(eeepc); + if (eeepc->hotplug_slot) pci_hp_deregister(eeepc->hotplug_slot); @@ -889,11 +916,6 @@ static int eeepc_rfkill_init(struct eeepc_laptop *eeepc) eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5"); eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6"); eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7"); - /* - * Refresh pci hotplug in case the rfkill state was changed during - * setup. - */ - eeepc_rfkill_hotplug(eeepc); exit: if (result && result != -ENODEV) @@ -928,8 +950,11 @@ static int eeepc_hotk_restore(struct device *device) struct eeepc_laptop *eeepc = dev_get_drvdata(device); /* Refresh both wlan rfkill state and pci hotplug */ - if (eeepc->wlan_rfkill) - eeepc_rfkill_hotplug(eeepc); + if (eeepc->wlan_rfkill) { + eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P5"); + eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P6"); + eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P7"); + } if (eeepc->bluetooth_rfkill) rfkill_set_sw_state(eeepc->bluetooth_rfkill, diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 8f709aec4da..6fe8cd6e23b 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -934,6 +934,14 @@ static ssize_t sony_nc_sysfs_store(struct device *dev, /* * Backlight device */ +struct sony_backlight_props { + struct backlight_device *dev; + int handle; + u8 offset; + u8 maxlvl; +}; +struct sony_backlight_props sony_bl_props; + static int sony_backlight_update_status(struct backlight_device *bd) { return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT", @@ -954,21 +962,26 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd) { int result; int *handle = (int *)bl_get_data(bd); + struct sony_backlight_props *sdev = + (struct sony_backlight_props *)bl_get_data(bd); - sony_call_snc_handle(*handle, 0x0200, &result); + sony_call_snc_handle(sdev->handle, 0x0200, &result); - return result & 0xff; + return (result & 0xff) - sdev->offset; } static int sony_nc_update_status_ng(struct backlight_device *bd) { int value, result; int *handle = (int *)bl_get_data(bd); + struct sony_backlight_props *sdev = + (struct sony_backlight_props *)bl_get_data(bd); - value = bd->props.brightness; - sony_call_snc_handle(*handle, 0x0100 | (value << 16), &result); + value = bd->props.brightness + sdev->offset; + if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result)) + return -EIO; - return sony_nc_get_brightness_ng(bd); + return value; } static const struct backlight_ops sony_backlight_ops = { @@ -981,8 +994,6 @@ static const struct backlight_ops sony_backlight_ng_ops = { .update_status = sony_nc_update_status_ng, .get_brightness = sony_nc_get_brightness_ng, }; -static int backlight_ng_handle; -static struct backlight_device *sony_backlight_device; /* * New SNC-only Vaios event mapping to driver known keys @@ -1549,6 +1560,75 @@ static void sony_nc_kbd_backlight_resume(void) &ignore); } +static void sony_nc_backlight_ng_read_limits(int handle, + struct sony_backlight_props *props) +{ + int offset; + acpi_status status; + u8 brlvl, i; + u8 min = 0xff, max = 0x00; + struct acpi_object_list params; + union acpi_object in_obj; + union acpi_object *lvl_enum; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + + props->handle = handle; + props->offset = 0; + props->maxlvl = 0xff; + + offset = sony_find_snc_handle(handle); + if (offset < 0) + return; + + /* try to read the boundaries from ACPI tables, if we fail the above + * defaults should be reasonable + */ + params.count = 1; + params.pointer = &in_obj; + in_obj.type = ACPI_TYPE_INTEGER; + in_obj.integer.value = offset; + status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", ¶ms, + &buffer); + if (ACPI_FAILURE(status)) + return; + + lvl_enum = (union acpi_object *) buffer.pointer; + if (!lvl_enum) { + pr_err("No SN06 return object."); + return; + } + if (lvl_enum->type != ACPI_TYPE_BUFFER) { + pr_err("Invalid SN06 return object 0x%.2x\n", + lvl_enum->type); + goto out_invalid; + } + + /* the buffer lists brightness levels available, brightness levels are + * from 0 to 8 in the array, other values are used by ALS control. + */ + for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) { + + brlvl = *(lvl_enum->buffer.pointer + i); + dprintk("Brightness level: %d\n", brlvl); + + if (!brlvl) + break; + + if (brlvl > max) + max = brlvl; + if (brlvl < min) + min = brlvl; + } + props->offset = min; + props->maxlvl = max; + dprintk("Brightness levels: min=%d max=%d\n", props->offset, + props->maxlvl); + +out_invalid: + kfree(buffer.pointer); + return; +} + static void sony_nc_backlight_setup(void) { acpi_handle unused; @@ -1557,14 +1637,14 @@ static void sony_nc_backlight_setup(void) struct backlight_properties props; if (sony_find_snc_handle(0x12f) != -1) { - backlight_ng_handle = 0x12f; ops = &sony_backlight_ng_ops; - max_brightness = 0xff; + sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props); + max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; } else if (sony_find_snc_handle(0x137) != -1) { - backlight_ng_handle = 0x137; ops = &sony_backlight_ng_ops; - max_brightness = 0xff; + sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props); + max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", &unused))) { @@ -1577,22 +1657,22 @@ static void sony_nc_backlight_setup(void) memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = max_brightness; - sony_backlight_device = backlight_device_register("sony", NULL, - &backlight_ng_handle, - ops, &props); + sony_bl_props.dev = backlight_device_register("sony", NULL, + &sony_bl_props, + ops, &props); - if (IS_ERR(sony_backlight_device)) { - pr_warning(DRV_PFX "unable to register backlight device\n"); - sony_backlight_device = NULL; + if (IS_ERR(sony_bl_props.dev)) { + pr_warn(DRV_PFX "unable to register backlight device\n"); + sony_bl_props.dev = NULL; } else - sony_backlight_device->props.brightness = - ops->get_brightness(sony_backlight_device); + sony_bl_props.dev->props.brightness = + ops->get_brightness(sony_bl_props.dev); } static void sony_nc_backlight_cleanup(void) { - if (sony_backlight_device) - backlight_device_unregister(sony_backlight_device); + if (sony_bl_props.dev) + backlight_device_unregister(sony_bl_props.dev); } static int sony_nc_add(struct acpi_device *device) @@ -2590,7 +2670,7 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd, mutex_lock(&spic_dev.lock); switch (cmd) { case SONYPI_IOCGBRT: - if (sony_backlight_device == NULL) { + if (sony_bl_props.dev == NULL) { ret = -EIO; break; } @@ -2603,7 +2683,7 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd, ret = -EFAULT; break; case SONYPI_IOCSBRT: - if (sony_backlight_device == NULL) { + if (sony_bl_props.dev == NULL) { ret = -EIO; break; } @@ -2617,8 +2697,8 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd, break; } /* sync the backlight device status */ - sony_backlight_device->props.brightness = - sony_backlight_get_brightness(sony_backlight_device); + sony_bl_props.dev->props.brightness = + sony_backlight_get_brightness(sony_bl_props.dev); break; case SONYPI_IOCGBAT1CAP: if (ec_read16(SONYPI_BAT1_FULL, &val16)) { diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index efb3b6b9bcd..562fcf0dd2b 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -128,7 +128,8 @@ enum { }; /* ACPI HIDs */ -#define TPACPI_ACPI_HKEY_HID "IBM0068" +#define TPACPI_ACPI_IBM_HKEY_HID "IBM0068" +#define TPACPI_ACPI_LENOVO_HKEY_HID "LEN0068" #define TPACPI_ACPI_EC_HID "PNP0C09" /* Input IDs */ @@ -3879,7 +3880,8 @@ errexit: } static const struct acpi_device_id ibm_htk_device_ids[] = { - {TPACPI_ACPI_HKEY_HID, 0}, + {TPACPI_ACPI_IBM_HKEY_HID, 0}, + {TPACPI_ACPI_LENOVO_HKEY_HID, 0}, {"", 0}, }; diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c index ac2701b22e7..043ee3136e4 100644 --- a/drivers/rapidio/switches/idt_gen2.c +++ b/drivers/rapidio/switches/idt_gen2.c @@ -95,6 +95,9 @@ idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, else table++; + if (route_port == RIO_INVALID_ROUTE) + route_port = IDT_DEFAULT_ROUTE; + rio_mport_write_config_32(mport, destid, hopcount, LOCAL_RTE_CONF_DESTID_SEL, table); @@ -411,6 +414,12 @@ static int idtg2_switch_init(struct rio_dev *rdev, int do_enum) rdev->rswitch->em_handle = idtg2_em_handler; rdev->rswitch->sw_sysfs = idtg2_sysfs; + if (do_enum) { + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, + RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); + } + return 0; } diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c index 3a971077e7b..d06ee2d44b4 100644 --- a/drivers/rapidio/switches/idtcps.c +++ b/drivers/rapidio/switches/idtcps.c @@ -26,6 +26,9 @@ idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, { u32 result; + if (route_port == RIO_INVALID_ROUTE) + route_port = CPS_DEFAULT_ROUTE; + if (table == RIO_GLOBAL_TABLE) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); @@ -130,6 +133,9 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum) /* set TVAL = ~50us */ rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, + RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE); } return 0; diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c index 1a62934bfeb..db8b8028988 100644 --- a/drivers/rapidio/switches/tsi57x.c +++ b/drivers/rapidio/switches/tsi57x.c @@ -303,6 +303,12 @@ static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum) rdev->rswitch->em_init = tsi57x_em_init; rdev->rswitch->em_handle = tsi57x_em_handler; + if (do_enum) { + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, + RIO_INVALID_ROUTE); + } + return 0; } diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c index 8d46838dff8..755e1fe914a 100644 --- a/drivers/rtc/rtc-davinci.c +++ b/drivers/rtc/rtc-davinci.c @@ -524,6 +524,8 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) goto fail2; } + platform_set_drvdata(pdev, davinci_rtc); + davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &davinci_rtc_ops, THIS_MODULE); if (IS_ERR(davinci_rtc->rtc)) { @@ -553,8 +555,6 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL); - platform_set_drvdata(pdev, davinci_rtc); - device_init_wakeup(&pdev->dev, 0); return 0; @@ -562,6 +562,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) fail4: rtc_device_unregister(davinci_rtc->rtc); fail3: + platform_set_drvdata(pdev, NULL); iounmap(davinci_rtc->base); fail2: release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size); diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c index 60ce6960082..47e681df31e 100644 --- a/drivers/rtc/rtc-ds1286.c +++ b/drivers/rtc/rtc-ds1286.c @@ -355,6 +355,7 @@ static int __devinit ds1286_probe(struct platform_device *pdev) goto out; } spin_lock_init(&priv->lock); + platform_set_drvdata(pdev, priv); rtc = rtc_device_register("ds1286", &pdev->dev, &ds1286_ops, THIS_MODULE); if (IS_ERR(rtc)) { @@ -362,7 +363,6 @@ static int __devinit ds1286_probe(struct platform_device *pdev) goto out; } priv->rtc = rtc; - platform_set_drvdata(pdev, priv); return 0; out: diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c index 11ae64dcbf3..335551d333b 100644 --- a/drivers/rtc/rtc-ep93xx.c +++ b/drivers/rtc/rtc-ep93xx.c @@ -151,6 +151,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) return -ENXIO; pdev->dev.platform_data = ep93xx_rtc; + platform_set_drvdata(pdev, rtc); rtc = rtc_device_register(pdev->name, &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); @@ -159,8 +160,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) goto exit; } - platform_set_drvdata(pdev, rtc); - err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); if (err) goto fail; @@ -168,9 +167,9 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) return 0; fail: - platform_set_drvdata(pdev, NULL); rtc_device_unregister(rtc); exit: + platform_set_drvdata(pdev, NULL); pdev->dev.platform_data = NULL; return err; } diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 69fe664a222..eda128fc1d3 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -783,6 +783,9 @@ static int m41t80_probe(struct i2c_client *client, goto exit; } + clientdata->features = id->driver_data; + i2c_set_clientdata(client, clientdata); + rtc = rtc_device_register(client->name, &client->dev, &m41t80_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { @@ -792,8 +795,6 @@ static int m41t80_probe(struct i2c_client *client, } clientdata->rtc = rtc; - clientdata->features = id->driver_data; - i2c_set_clientdata(client, clientdata); /* Make sure HT (Halt Update) bit is cleared */ rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR); diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c index 20494b5edc3..3bc046f427e 100644 --- a/drivers/rtc/rtc-max8925.c +++ b/drivers/rtc/rtc-max8925.c @@ -258,6 +258,8 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev) } dev_set_drvdata(&pdev->dev, info); + /* XXX - isn't this redundant? */ + platform_set_drvdata(pdev, info); info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev, &max8925_rtc_ops, THIS_MODULE); @@ -267,10 +269,9 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev) goto out_rtc; } - platform_set_drvdata(pdev, info); - return 0; out_rtc: + platform_set_drvdata(pdev, NULL); free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info); out_irq: kfree(info); diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c index 3f7bc6b9fef..2e48aa60427 100644 --- a/drivers/rtc/rtc-max8998.c +++ b/drivers/rtc/rtc-max8998.c @@ -265,6 +265,8 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) info->rtc = max8998->rtc; info->irq = max8998->irq_base + MAX8998_IRQ_ALARM0; + platform_set_drvdata(pdev, info); + info->rtc_dev = rtc_device_register("max8998-rtc", &pdev->dev, &max8998_rtc_ops, THIS_MODULE); @@ -274,8 +276,6 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) goto out_rtc; } - platform_set_drvdata(pdev, info); - ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0, "rtc-alarm0", info); @@ -293,6 +293,7 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) return 0; out_rtc: + platform_set_drvdata(pdev, NULL); kfree(info); return ret; } diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c index c5ac03793e7..a1a278bc340 100644 --- a/drivers/rtc/rtc-mc13xxx.c +++ b/drivers/rtc/rtc-mc13xxx.c @@ -349,11 +349,15 @@ static int __devinit mc13xxx_rtc_probe(struct platform_device *pdev) if (ret) goto err_alarm_irq_request; + mc13xxx_unlock(mc13xxx); + priv->rtc = rtc_device_register(pdev->name, &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE); if (IS_ERR(priv->rtc)) { ret = PTR_ERR(priv->rtc); + mc13xxx_lock(mc13xxx); + mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv); err_alarm_irq_request: @@ -365,12 +369,12 @@ err_reset_irq_status: mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv); err_reset_irq_request: + mc13xxx_unlock(mc13xxx); + platform_set_drvdata(pdev, NULL); kfree(priv); } - mc13xxx_unlock(mc13xxx); - return ret; } diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c index 67820626e18..fcb113c1112 100644 --- a/drivers/rtc/rtc-msm6242.c +++ b/drivers/rtc/rtc-msm6242.c @@ -214,6 +214,7 @@ static int __init msm6242_rtc_probe(struct platform_device *dev) error = -ENOMEM; goto out_free_priv; } + platform_set_drvdata(dev, priv); rtc = rtc_device_register("rtc-msm6242", &dev->dev, &msm6242_rtc_ops, THIS_MODULE); @@ -223,10 +224,10 @@ static int __init msm6242_rtc_probe(struct platform_device *dev) } priv->rtc = rtc; - platform_set_drvdata(dev, priv); return 0; out_unmap: + platform_set_drvdata(dev, NULL); iounmap(priv->regs); out_free_priv: kfree(priv); diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index 826ab64a8fa..d814417bee8 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c @@ -418,14 +418,6 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) goto exit_put_clk; } - rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, - THIS_MODULE); - if (IS_ERR(rtc)) { - ret = PTR_ERR(rtc); - goto exit_put_clk; - } - - pdata->rtc = rtc; platform_set_drvdata(pdev, pdata); /* Configure and enable the RTC */ @@ -438,8 +430,19 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) pdata->irq = -1; } + rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, + THIS_MODULE); + if (IS_ERR(rtc)) { + ret = PTR_ERR(rtc); + goto exit_clr_drvdata; + } + + pdata->rtc = rtc; + return 0; +exit_clr_drvdata: + platform_set_drvdata(pdev, NULL); exit_put_clk: clk_disable(pdata->clk); clk_put(pdata->clk); diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c index a633abc4289..cd4f198cc2e 100644 --- a/drivers/rtc/rtc-pcap.c +++ b/drivers/rtc/rtc-pcap.c @@ -151,6 +151,8 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev) pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent); + platform_set_drvdata(pdev, pcap_rtc); + pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev, &pcap_rtc_ops, THIS_MODULE); if (IS_ERR(pcap_rtc->rtc)) { @@ -158,7 +160,6 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev) goto fail_rtc; } - platform_set_drvdata(pdev, pcap_rtc); timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ); alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA); @@ -177,6 +178,7 @@ fail_alarm: fail_timer: rtc_device_unregister(pcap_rtc->rtc); fail_rtc: + platform_set_drvdata(pdev, NULL); kfree(pcap_rtc); return err; } diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c index 694da39b6dd..359da6d020b 100644 --- a/drivers/rtc/rtc-rp5c01.c +++ b/drivers/rtc/rtc-rp5c01.c @@ -249,15 +249,15 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev) spin_lock_init(&priv->lock); + platform_set_drvdata(dev, priv); + rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { error = PTR_ERR(rtc); goto out_unmap; } - priv->rtc = rtc; - platform_set_drvdata(dev, priv); error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr); if (error) @@ -268,6 +268,7 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev) out_unregister: rtc_device_unregister(rtc); out_unmap: + platform_set_drvdata(dev, NULL); iounmap(priv->regs); out_free_priv: kfree(priv); diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index b3466c491cd..16512ecae31 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -46,6 +46,7 @@ static struct clk *rtc_clk; static void __iomem *s3c_rtc_base; static int s3c_rtc_alarmno = NO_IRQ; static int s3c_rtc_tickno = NO_IRQ; +static bool wake_en; static enum s3c_cpu_type s3c_rtc_cpu_type; static DEFINE_SPINLOCK(s3c_rtc_pie_lock); @@ -562,8 +563,12 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) } s3c_rtc_enable(pdev, 0); - if (device_may_wakeup(&pdev->dev)) - enable_irq_wake(s3c_rtc_alarmno); + if (device_may_wakeup(&pdev->dev) && !wake_en) { + if (enable_irq_wake(s3c_rtc_alarmno) == 0) + wake_en = true; + else + dev_err(&pdev->dev, "enable_irq_wake failed\n"); + } return 0; } @@ -579,8 +584,10 @@ static int s3c_rtc_resume(struct platform_device *pdev) writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); } - if (device_may_wakeup(&pdev->dev)) + if (device_may_wakeup(&pdev->dev) && wake_en) { disable_irq_wake(s3c_rtc_alarmno); + wake_en = false; + } return 0; } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 475e603fc58..86b6f1cc1b1 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1742,11 +1742,20 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) static inline int _dasd_term_running_cqr(struct dasd_device *device) { struct dasd_ccw_req *cqr; + int rc; if (list_empty(&device->ccw_queue)) return 0; cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); - return device->discipline->term_IO(cqr); + rc = device->discipline->term_IO(cqr); + if (!rc) + /* + * CQR terminated because a more important request is pending. + * Undo decreasing of retry counter because this is + * not an error case. + */ + cqr->retries++; + return rc; } int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 4b60ede07f0..be55fb2b1b1 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -518,6 +518,8 @@ static void __init insert_increment(u16 rn, int standby, int assigned) return; new_incr->rn = rn; new_incr->standby = standby; + if (!standby) + new_incr->usecount = 1; last_rn = 0; prev = &sclp_mem_list; list_for_each_entry(incr, &sclp_mem_list, list) { diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 83cea9a55e2..1b3924c2fff 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -236,7 +236,6 @@ tapeblock_setup_device(struct tape_device * device) disk->major = tapeblock_major; disk->first_minor = device->first_minor; disk->fops = &tapeblock_fops; - disk->events = DISK_EVENT_MEDIA_CHANGE; disk->private_data = tape_get_device(device); disk->queue = blkdat->request_queue; set_capacity(disk, 0); diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index e2d45c91b8e..9689d41c788 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -1292,8 +1292,10 @@ static struct scsi_host_template qpti_template = { .use_clustering = ENABLE_CLUSTERING, }; +static const struct of_device_id qpti_match[]; static int __devinit qpti_sbus_probe(struct platform_device *op) { + const struct of_device_id *match; struct scsi_host_template *tpnt; struct device_node *dp = op->dev.of_node; struct Scsi_Host *host; @@ -1301,9 +1303,10 @@ static int __devinit qpti_sbus_probe(struct platform_device *op) static int nqptis; const char *fcode; - if (!op->dev.of_match) + match = of_match_device(qpti_match, &op->dev); + if (!match) return -EINVAL; - tpnt = op->dev.of_match->data; + tpnt = match->data; /* Sometimes Antares cards come up not completely * setup, and we get a report of a zero IRQ. diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index e9901b8f844..ec1803a4872 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -74,8 +74,6 @@ struct kmem_cache *scsi_sdb_cache; */ #define SCSI_QUEUE_DELAY 3 -static void scsi_run_queue(struct request_queue *q); - /* * Function: scsi_unprep_request() * @@ -161,7 +159,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) blk_requeue_request(q, cmd->request); spin_unlock_irqrestore(q->queue_lock, flags); - scsi_run_queue(q); + kblockd_schedule_work(q, &device->requeue_work); return 0; } @@ -400,10 +398,15 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost) static void scsi_run_queue(struct request_queue *q) { struct scsi_device *sdev = q->queuedata; - struct Scsi_Host *shost = sdev->host; + struct Scsi_Host *shost; LIST_HEAD(starved_list); unsigned long flags; + /* if the device is dead, sdev will be NULL, so no queue to run */ + if (!sdev) + return; + + shost = sdev->host; if (scsi_target(sdev)->single_lun) scsi_single_lun_run(sdev); @@ -433,7 +436,11 @@ static void scsi_run_queue(struct request_queue *q) continue; } - blk_run_queue_async(sdev->request_queue); + spin_unlock(shost->host_lock); + spin_lock(sdev->request_queue->queue_lock); + __blk_run_queue(sdev->request_queue); + spin_unlock(sdev->request_queue->queue_lock); + spin_lock(shost->host_lock); } /* put any unprocessed entries back */ list_splice(&starved_list, &shost->starved_list); @@ -442,6 +449,16 @@ static void scsi_run_queue(struct request_queue *q) blk_run_queue(q); } +void scsi_requeue_run_queue(struct work_struct *work) +{ + struct scsi_device *sdev; + struct request_queue *q; + + sdev = container_of(work, struct scsi_device, requeue_work); + q = sdev->request_queue; + scsi_run_queue(q); +} + /* * Function: scsi_requeue_command() * diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 087821fac8f..58584dc0724 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -242,6 +242,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, int display_failure_msg = 1, ret; struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); extern void scsi_evt_thread(struct work_struct *work); + extern void scsi_requeue_run_queue(struct work_struct *work); sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, GFP_ATOMIC); @@ -264,6 +265,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, INIT_LIST_HEAD(&sdev->event_list); spin_lock_init(&sdev->list_lock); INIT_WORK(&sdev->event_work, scsi_evt_thread); + INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue); sdev->sdev_gendev.parent = get_device(&starget->dev); sdev->sdev_target = starget; diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c index 0e8eec516df..c911b2419ab 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/of_serial.c @@ -80,14 +80,17 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev, /* * Try to register a serial port */ +static struct of_device_id of_platform_serial_table[]; static int __devinit of_platform_serial_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct of_serial_info *info; struct uart_port port; int port_type; int ret; - if (!ofdev->dev.of_match) + match = of_match_device(of_platform_serial_table, &ofdev->dev); + if (!match) return -EINVAL; if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) @@ -97,7 +100,7 @@ static int __devinit of_platform_serial_probe(struct platform_device *ofdev) if (info == NULL) return -ENOMEM; - port_type = (unsigned long)ofdev->dev.of_match->data; + port_type = (unsigned long)match->data; ret = of_platform_serial_setup(ofdev, port_type, &port); if (ret) goto out; diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c index 36613b37c50..3a68e09309f 100644 --- a/drivers/usb/gadget/fsl_qe_udc.c +++ b/drivers/usb/gadget/fsl_qe_udc.c @@ -2539,15 +2539,18 @@ static void qe_udc_release(struct device *dev) } /* Driver probe functions */ +static const struct of_device_id qe_udc_match[]; static int __devinit qe_udc_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct qe_ep *ep; unsigned int ret = 0; unsigned int i; const void *prop; - if (!ofdev->dev.of_match) + match = of_match_device(qe_udc_match, &ofdev->dev); + if (!match) return -EINVAL; prop = of_get_property(np, "mode", NULL); @@ -2561,7 +2564,7 @@ static int __devinit qe_udc_probe(struct platform_device *ofdev) return -ENOMEM; } - udc_controller->soc_type = (unsigned long)ofdev->dev.of_match->data; + udc_controller->soc_type = (unsigned long)match->data; udc_controller->usb_regs = of_iomap(np, 0); if (!udc_controller->usb_regs) { ret = -ENOMEM; diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c index 82acb8dc4aa..6183a57eb69 100644 --- a/drivers/video/acornfb.c +++ b/drivers/video/acornfb.c @@ -66,7 +66,7 @@ * have. Allow 1% either way on the nominal for TVs. */ #define NR_MONTYPES 6 -static struct fb_monspecs monspecs[NR_MONTYPES] __initdata = { +static struct fb_monspecs monspecs[NR_MONTYPES] __devinitdata = { { /* TV */ .hfmin = 15469, .hfmax = 15781, @@ -873,7 +873,7 @@ static struct fb_ops acornfb_ops = { /* * Everything after here is initialisation!!! */ -static struct fb_videomode modedb[] __initdata = { +static struct fb_videomode modedb[] __devinitdata = { { /* 320x256 @ 50Hz */ NULL, 50, 320, 256, 125000, 92, 62, 35, 19, 38, 2, FB_SYNC_COMP_HIGH_ACT, @@ -925,8 +925,7 @@ static struct fb_videomode modedb[] __initdata = { } }; -static struct fb_videomode __initdata -acornfb_default_mode = { +static struct fb_videomode acornfb_default_mode __devinitdata = { .name = NULL, .refresh = 60, .xres = 640, @@ -942,7 +941,7 @@ acornfb_default_mode = { .vmode = FB_VMODE_NONINTERLACED }; -static void __init acornfb_init_fbinfo(void) +static void __devinit acornfb_init_fbinfo(void) { static int first = 1; @@ -1018,8 +1017,7 @@ static void __init acornfb_init_fbinfo(void) * size can optionally be followed by 'M' or 'K' for * MB or KB respectively. */ -static void __init -acornfb_parse_mon(char *opt) +static void __devinit acornfb_parse_mon(char *opt) { char *p = opt; @@ -1066,8 +1064,7 @@ bad: current_par.montype = -1; } -static void __init -acornfb_parse_montype(char *opt) +static void __devinit acornfb_parse_montype(char *opt) { current_par.montype = -2; @@ -1108,8 +1105,7 @@ acornfb_parse_montype(char *opt) } } -static void __init -acornfb_parse_dram(char *opt) +static void __devinit acornfb_parse_dram(char *opt) { unsigned int size; @@ -1134,15 +1130,14 @@ acornfb_parse_dram(char *opt) static struct options { char *name; void (*parse)(char *opt); -} opt_table[] __initdata = { +} opt_table[] __devinitdata = { { "mon", acornfb_parse_mon }, { "montype", acornfb_parse_montype }, { "dram", acornfb_parse_dram }, { NULL, NULL } }; -int __init -acornfb_setup(char *options) +static int __devinit acornfb_setup(char *options) { struct options *optp; char *opt; @@ -1179,8 +1174,7 @@ acornfb_setup(char *options) * Detect type of monitor connected * For now, we just assume SVGA */ -static int __init -acornfb_detect_monitortype(void) +static int __devinit acornfb_detect_monitortype(void) { return 4; } diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index e0c2284924b..5aac00eb183 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -42,9 +42,34 @@ #define FBPIXMAPSIZE (1024 * 8) +static DEFINE_MUTEX(registration_lock); struct fb_info *registered_fb[FB_MAX] __read_mostly; int num_registered_fb __read_mostly; +static struct fb_info *get_fb_info(unsigned int idx) +{ + struct fb_info *fb_info; + + if (idx >= FB_MAX) + return ERR_PTR(-ENODEV); + + mutex_lock(®istration_lock); + fb_info = registered_fb[idx]; + if (fb_info) + atomic_inc(&fb_info->count); + mutex_unlock(®istration_lock); + + return fb_info; +} + +static void put_fb_info(struct fb_info *fb_info) +{ + if (!atomic_dec_and_test(&fb_info->count)) + return; + if (fb_info->fbops->fb_destroy) + fb_info->fbops->fb_destroy(fb_info); +} + int lock_fb_info(struct fb_info *info) { mutex_lock(&info->lock); @@ -647,6 +672,7 @@ int fb_show_logo(struct fb_info *info, int rotate) { return 0; } static void *fb_seq_start(struct seq_file *m, loff_t *pos) { + mutex_lock(®istration_lock); return (*pos < FB_MAX) ? pos : NULL; } @@ -658,6 +684,7 @@ static void *fb_seq_next(struct seq_file *m, void *v, loff_t *pos) static void fb_seq_stop(struct seq_file *m, void *v) { + mutex_unlock(®istration_lock); } static int fb_seq_show(struct seq_file *m, void *v) @@ -690,13 +717,30 @@ static const struct file_operations fb_proc_fops = { .release = seq_release, }; -static ssize_t -fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +/* + * We hold a reference to the fb_info in file->private_data, + * but if the current registered fb has changed, we don't + * actually want to use it. + * + * So look up the fb_info using the inode minor number, + * and just verify it against the reference we have. + */ +static struct fb_info *file_fb_info(struct file *file) { - unsigned long p = *ppos; struct inode *inode = file->f_path.dentry->d_inode; int fbidx = iminor(inode); struct fb_info *info = registered_fb[fbidx]; + + if (info != file->private_data) + info = NULL; + return info; +} + +static ssize_t +fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + unsigned long p = *ppos; + struct fb_info *info = file_fb_info(file); u8 *buffer, *dst; u8 __iomem *src; int c, cnt = 0, err = 0; @@ -761,9 +805,7 @@ static ssize_t fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; - struct inode *inode = file->f_path.dentry->d_inode; - int fbidx = iminor(inode); - struct fb_info *info = registered_fb[fbidx]; + struct fb_info *info = file_fb_info(file); u8 *buffer, *src; u8 __iomem *dst; int c, cnt = 0, err = 0; @@ -1141,10 +1183,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct inode *inode = file->f_path.dentry->d_inode; - int fbidx = iminor(inode); - struct fb_info *info = registered_fb[fbidx]; + struct fb_info *info = file_fb_info(file); + if (!info) + return -ENODEV; return do_fb_ioctl(info, cmd, arg); } @@ -1265,12 +1307,13 @@ static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd, static long fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct inode *inode = file->f_path.dentry->d_inode; - int fbidx = iminor(inode); - struct fb_info *info = registered_fb[fbidx]; - struct fb_ops *fb = info->fbops; + struct fb_info *info = file_fb_info(file); + struct fb_ops *fb; long ret = -ENOIOCTLCMD; + if (!info) + return -ENODEV; + fb = info->fbops; switch(cmd) { case FBIOGET_VSCREENINFO: case FBIOPUT_VSCREENINFO: @@ -1303,16 +1346,18 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd, static int fb_mmap(struct file *file, struct vm_area_struct * vma) { - int fbidx = iminor(file->f_path.dentry->d_inode); - struct fb_info *info = registered_fb[fbidx]; - struct fb_ops *fb = info->fbops; + struct fb_info *info = file_fb_info(file); + struct fb_ops *fb; unsigned long off; unsigned long start; u32 len; + if (!info) + return -ENODEV; if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) return -EINVAL; off = vma->vm_pgoff << PAGE_SHIFT; + fb = info->fbops; if (!fb) return -ENODEV; mutex_lock(&info->mm_lock); @@ -1361,14 +1406,16 @@ __releases(&info->lock) struct fb_info *info; int res = 0; - if (fbidx >= FB_MAX) - return -ENODEV; - info = registered_fb[fbidx]; - if (!info) + info = get_fb_info(fbidx); + if (!info) { request_module("fb%d", fbidx); - info = registered_fb[fbidx]; - if (!info) - return -ENODEV; + info = get_fb_info(fbidx); + if (!info) + return -ENODEV; + } + if (IS_ERR(info)) + return PTR_ERR(info); + mutex_lock(&info->lock); if (!try_module_get(info->fbops->owner)) { res = -ENODEV; @@ -1386,6 +1433,8 @@ __releases(&info->lock) #endif out: mutex_unlock(&info->lock); + if (res) + put_fb_info(info); return res; } @@ -1401,6 +1450,7 @@ __releases(&info->lock) info->fbops->fb_release(info,1); module_put(info->fbops->owner); mutex_unlock(&info->lock); + put_fb_info(info); return 0; } @@ -1487,8 +1537,10 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena, return false; } +static int do_unregister_framebuffer(struct fb_info *fb_info); + #define VGA_FB_PHYS 0xA0000 -void remove_conflicting_framebuffers(struct apertures_struct *a, +static void do_remove_conflicting_framebuffers(struct apertures_struct *a, const char *name, bool primary) { int i; @@ -1510,43 +1562,32 @@ void remove_conflicting_framebuffers(struct apertures_struct *a, printk(KERN_INFO "fb: conflicting fb hw usage " "%s vs %s - removing generic driver\n", name, registered_fb[i]->fix.id); - unregister_framebuffer(registered_fb[i]); + do_unregister_framebuffer(registered_fb[i]); } } } -EXPORT_SYMBOL(remove_conflicting_framebuffers); -/** - * register_framebuffer - registers a frame buffer device - * @fb_info: frame buffer info structure - * - * Registers a frame buffer device @fb_info. - * - * Returns negative errno on error, or zero for success. - * - */ - -int -register_framebuffer(struct fb_info *fb_info) +static int do_register_framebuffer(struct fb_info *fb_info) { int i; struct fb_event event; struct fb_videomode mode; - if (num_registered_fb == FB_MAX) - return -ENXIO; - if (fb_check_foreignness(fb_info)) return -ENOSYS; - remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, + do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, fb_is_primary_device(fb_info)); + if (num_registered_fb == FB_MAX) + return -ENXIO; + num_registered_fb++; for (i = 0 ; i < FB_MAX; i++) if (!registered_fb[i]) break; fb_info->node = i; + atomic_set(&fb_info->count, 1); mutex_init(&fb_info->lock); mutex_init(&fb_info->mm_lock); @@ -1592,36 +1633,14 @@ register_framebuffer(struct fb_info *fb_info) return 0; } - -/** - * unregister_framebuffer - releases a frame buffer device - * @fb_info: frame buffer info structure - * - * Unregisters a frame buffer device @fb_info. - * - * Returns negative errno on error, or zero for success. - * - * This function will also notify the framebuffer console - * to release the driver. - * - * This is meant to be called within a driver's module_exit() - * function. If this is called outside module_exit(), ensure - * that the driver implements fb_open() and fb_release() to - * check that no processes are using the device. - */ - -int -unregister_framebuffer(struct fb_info *fb_info) +static int do_unregister_framebuffer(struct fb_info *fb_info) { struct fb_event event; int i, ret = 0; i = fb_info->node; - if (!registered_fb[i]) { - ret = -EINVAL; - goto done; - } - + if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info) + return -EINVAL; if (!lock_fb_info(fb_info)) return -ENODEV; @@ -1629,16 +1648,14 @@ unregister_framebuffer(struct fb_info *fb_info) ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); unlock_fb_info(fb_info); - if (ret) { - ret = -EINVAL; - goto done; - } + if (ret) + return -EINVAL; if (fb_info->pixmap.addr && (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) kfree(fb_info->pixmap.addr); fb_destroy_modelist(&fb_info->modelist); - registered_fb[i]=NULL; + registered_fb[i] = NULL; num_registered_fb--; fb_cleanup_device(fb_info); device_destroy(fb_class, MKDEV(FB_MAJOR, i)); @@ -1646,9 +1663,65 @@ unregister_framebuffer(struct fb_info *fb_info) fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); /* this may free fb info */ - if (fb_info->fbops->fb_destroy) - fb_info->fbops->fb_destroy(fb_info); -done: + put_fb_info(fb_info); + return 0; +} + +void remove_conflicting_framebuffers(struct apertures_struct *a, + const char *name, bool primary) +{ + mutex_lock(®istration_lock); + do_remove_conflicting_framebuffers(a, name, primary); + mutex_unlock(®istration_lock); +} +EXPORT_SYMBOL(remove_conflicting_framebuffers); + +/** + * register_framebuffer - registers a frame buffer device + * @fb_info: frame buffer info structure + * + * Registers a frame buffer device @fb_info. + * + * Returns negative errno on error, or zero for success. + * + */ +int +register_framebuffer(struct fb_info *fb_info) +{ + int ret; + + mutex_lock(®istration_lock); + ret = do_register_framebuffer(fb_info); + mutex_unlock(®istration_lock); + + return ret; +} + +/** + * unregister_framebuffer - releases a frame buffer device + * @fb_info: frame buffer info structure + * + * Unregisters a frame buffer device @fb_info. + * + * Returns negative errno on error, or zero for success. + * + * This function will also notify the framebuffer console + * to release the driver. + * + * This is meant to be called within a driver's module_exit() + * function. If this is called outside module_exit(), ensure + * that the driver implements fb_open() and fb_release() to + * check that no processes are using the device. + */ +int +unregister_framebuffer(struct fb_info *fb_info) +{ + int ret; + + mutex_lock(®istration_lock); + ret = do_unregister_framebuffer(fb_info); + mutex_unlock(®istration_lock); + return ret; } diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c index 528bceb220f..eed5436ffb5 100644 --- a/drivers/watchdog/mpc8xxx_wdt.c +++ b/drivers/watchdog/mpc8xxx_wdt.c @@ -185,17 +185,20 @@ static struct miscdevice mpc8xxx_wdt_miscdev = { .fops = &mpc8xxx_wdt_fops, }; +static const struct of_device_id mpc8xxx_wdt_match[]; static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev) { int ret; + const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct mpc8xxx_wdt_type *wdt_type; u32 freq = fsl_get_sys_freq(); bool enabled; - if (!ofdev->dev.of_match) + match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev); + if (!match) return -EINVAL; - wdt_type = ofdev->dev.of_match->data; + wdt_type = match->data; if (!freq || freq == -1) return -EINVAL; diff --git a/fs/block_dev.c b/fs/block_dev.c index 5147bdd3b8e..257b00e9842 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1102,6 +1102,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) if (!bdev->bd_part) goto out_clear; + ret = 0; if (disk->fops->open) { ret = disk->fops->open(bdev, mode); if (ret == -ERESTARTSYS) { @@ -1118,9 +1119,18 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) put_disk(disk); goto restart; } - if (ret) - goto out_clear; } + /* + * If the device is invalidated, rescan partition + * if open succeeded or failed with -ENOMEDIUM. + * The latter is necessary to prevent ghost + * partitions on a removed medium. + */ + if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) + rescan_partitions(disk, bdev); + if (ret) + goto out_clear; + if (!bdev->bd_openers) { bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); bdi = blk_get_backing_dev_info(bdev); @@ -1128,8 +1138,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) bdi = &default_backing_dev_info; bdev_inode_switch_bdi(bdev->bd_inode, bdi); } - if (bdev->bd_invalidated) - rescan_partitions(disk, bdev); } else { struct block_device *whole; whole = bdget_disk(disk, 0); @@ -1153,13 +1161,14 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) } } else { if (bdev->bd_contains == bdev) { - if (bdev->bd_disk->fops->open) { + ret = 0; + if (bdev->bd_disk->fops->open) ret = bdev->bd_disk->fops->open(bdev, mode); - if (ret) - goto out_unlock_bdev; - } - if (bdev->bd_invalidated) + /* the same as first opener case, read comment there */ + if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) rescan_partitions(bdev->bd_disk, bdev); + if (ret) + goto out_unlock_bdev; } /* only one opener holds refs to the module and disk */ module_put(disk->fops->owner); diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 5d505aaa72f..44ea5b92e1b 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -178,12 +178,13 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, if (value) { acl = posix_acl_from_xattr(value, size); + if (IS_ERR(acl)) + return PTR_ERR(acl); + if (acl) { ret = posix_acl_valid(acl); if (ret) goto out; - } else if (IS_ERR(acl)) { - return PTR_ERR(acl); } } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index cd52f7f556e..9ee6bd55e16 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8856,23 +8856,38 @@ out: int btrfs_init_space_info(struct btrfs_fs_info *fs_info) { struct btrfs_space_info *space_info; + struct btrfs_super_block *disk_super; + u64 features; + u64 flags; + int mixed = 0; int ret; - ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0, - &space_info); - if (ret) - return ret; + disk_super = &fs_info->super_copy; + if (!btrfs_super_root(disk_super)) + return 1; - ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0, - &space_info); - if (ret) - return ret; + features = btrfs_super_incompat_flags(disk_super); + if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) + mixed = 1; - ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0, - &space_info); + flags = BTRFS_BLOCK_GROUP_SYSTEM; + ret = update_space_info(fs_info, flags, 0, 0, &space_info); if (ret) - return ret; + goto out; + if (mixed) { + flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; + ret = update_space_info(fs_info, flags, 0, 0, &space_info); + } else { + flags = BTRFS_BLOCK_GROUP_METADATA; + ret = update_space_info(fs_info, flags, 0, 0, &space_info); + if (ret) + goto out; + + flags = BTRFS_BLOCK_GROUP_DATA; + ret = update_space_info(fs_info, flags, 0, 0, &space_info); + } +out: return ret; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ffb48d6c543..2616f7ed479 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -81,6 +81,13 @@ static unsigned int btrfs_flags_to_ioctl(unsigned int flags) iflags |= FS_NOATIME_FL; if (flags & BTRFS_INODE_DIRSYNC) iflags |= FS_DIRSYNC_FL; + if (flags & BTRFS_INODE_NODATACOW) + iflags |= FS_NOCOW_FL; + + if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS)) + iflags |= FS_COMPR_FL; + else if (flags & BTRFS_INODE_NOCOMPRESS) + iflags |= FS_NOCOMP_FL; return iflags; } @@ -144,16 +151,13 @@ static int check_flags(unsigned int flags) if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ FS_NOATIME_FL | FS_NODUMP_FL | \ FS_SYNC_FL | FS_DIRSYNC_FL | \ - FS_NOCOMP_FL | FS_COMPR_FL | \ - FS_NOCOW_FL | FS_COW_FL)) + FS_NOCOMP_FL | FS_COMPR_FL | + FS_NOCOW_FL)) return -EOPNOTSUPP; if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) return -EINVAL; - if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL)) - return -EINVAL; - return 0; } @@ -218,6 +222,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) ip->flags |= BTRFS_INODE_DIRSYNC; else ip->flags &= ~BTRFS_INODE_DIRSYNC; + if (flags & FS_NOCOW_FL) + ip->flags |= BTRFS_INODE_NODATACOW; + else + ip->flags &= ~BTRFS_INODE_NODATACOW; /* * The COMPRESS flag can only be changed by users, while the NOCOMPRESS @@ -230,11 +238,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) } else if (flags & FS_COMPR_FL) { ip->flags |= BTRFS_INODE_COMPRESS; ip->flags &= ~BTRFS_INODE_NOCOMPRESS; + } else { + ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); } - if (flags & FS_NOCOW_FL) - ip->flags |= BTRFS_INODE_NODATACOW; - else if (flags & FS_COW_FL) - ip->flags &= ~BTRFS_INODE_NODATACOW; trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 9fa08662a88..2a5404c1c42 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -819,7 +819,7 @@ int __ceph_caps_used(struct ceph_inode_info *ci) used |= CEPH_CAP_FILE_CACHE; if (ci->i_wr_ref) used |= CEPH_CAP_FILE_WR; - if (ci->i_wrbuffer_ref) + if (ci->i_wb_ref || ci->i_wrbuffer_ref) used |= CEPH_CAP_FILE_BUFFER; return used; } @@ -1990,11 +1990,11 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got) if (got & CEPH_CAP_FILE_WR) ci->i_wr_ref++; if (got & CEPH_CAP_FILE_BUFFER) { - if (ci->i_wrbuffer_ref == 0) + if (ci->i_wb_ref == 0) ihold(&ci->vfs_inode); - ci->i_wrbuffer_ref++; - dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n", - &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref); + ci->i_wb_ref++; + dout("__take_cap_refs %p wb %d -> %d (?)\n", + &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref); } } @@ -2169,12 +2169,12 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) if (--ci->i_rdcache_ref == 0) last++; if (had & CEPH_CAP_FILE_BUFFER) { - if (--ci->i_wrbuffer_ref == 0) { + if (--ci->i_wb_ref == 0) { last++; put++; } - dout("put_cap_refs %p wrbuffer %d -> %d (?)\n", - inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref); + dout("put_cap_refs %p wb %d -> %d (?)\n", + inode, ci->i_wb_ref+1, ci->i_wb_ref); } if (had & CEPH_CAP_FILE_WR) if (--ci->i_wr_ref == 0) { diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 03d6dafda61..70b6a4839c3 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -355,6 +355,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb) ci->i_rd_ref = 0; ci->i_rdcache_ref = 0; ci->i_wr_ref = 0; + ci->i_wb_ref = 0; ci->i_wrbuffer_ref = 0; ci->i_wrbuffer_ref_head = 0; ci->i_shared_gen = 0; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index f60b07b0feb..d0fae4ce9ba 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -3304,8 +3304,8 @@ static void con_put(struct ceph_connection *con) { struct ceph_mds_session *s = con->private; + dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1); ceph_put_mds_session(s); - dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref)); } /* diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index e86ec1155f8..24067d68a55 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -206,7 +206,7 @@ void ceph_put_snap_realm(struct ceph_mds_client *mdsc, up_write(&mdsc->snap_rwsem); } else { spin_lock(&mdsc->snap_empty_lock); - list_add(&mdsc->snap_empty, &realm->empty_item); + list_add(&realm->empty_item, &mdsc->snap_empty); spin_unlock(&mdsc->snap_empty_lock); } } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index b1f1b8bb127..f5cabefa98d 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -293,7 +293,7 @@ struct ceph_inode_info { /* held references to caps */ int i_pin_ref; - int i_rd_ref, i_rdcache_ref, i_wr_ref; + int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref; int i_wrbuffer_ref, i_wrbuffer_ref_head; u32 i_shared_gen; /* increment each time we get FILE_SHARED */ u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */ diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 23d43cde430..1b2e180b018 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -277,6 +277,7 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen, for (i = 0, j = 0; i < srclen; j++) { src_char = source[i]; + charlen = 1; switch (src_char) { case 0: put_unaligned(0, &target[j]); @@ -316,16 +317,13 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen, dst_char = cpu_to_le16(0x003f); charlen = 1; } - /* - * character may take more than one byte in the source - * string, but will take exactly two bytes in the - * target string - */ - i += charlen; - continue; } + /* + * character may take more than one byte in the source string, + * but will take exactly two bytes in the target string + */ + i += charlen; put_unaligned(dst_char, &target[j]); - i++; /* move to next char in source string */ } ctoUCS_out: diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 4bc862a80ef..277262a8e82 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -274,7 +274,8 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) char *data_area_of_target; char *data_area_of_buf2; int remaining; - __u16 byte_count, total_data_size, total_in_buf, total_in_buf2; + unsigned int byte_count, total_in_buf; + __u16 total_data_size, total_in_buf2; total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); @@ -287,7 +288,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) remaining = total_data_size - total_in_buf; if (remaining < 0) - return -EINVAL; + return -EPROTO; if (remaining == 0) /* nothing to do, ignore */ return 0; @@ -308,20 +309,29 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) data_area_of_target += total_in_buf; /* copy second buffer into end of first buffer */ - memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); total_in_buf += total_in_buf2; + /* is the result too big for the field? */ + if (total_in_buf > USHRT_MAX) + return -EPROTO; put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount); + + /* fix up the BCC */ byte_count = get_bcc_le(pTargetSMB); byte_count += total_in_buf2; + /* is the result too big for the field? */ + if (byte_count > USHRT_MAX) + return -EPROTO; put_bcc_le(byte_count, pTargetSMB); byte_count = pTargetSMB->smb_buf_length; byte_count += total_in_buf2; - - /* BB also add check that we are not beyond maximum buffer size */ - + /* don't allow buffer to overflow */ + if (byte_count > CIFSMaxBufSize) + return -ENOBUFS; pTargetSMB->smb_buf_length = byte_count; + memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); + if (remaining == total_in_buf2) { cFYI(1, "found the last secondary response"); return 0; /* we are done */ @@ -607,59 +617,63 @@ incomplete_rcv: list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); - if ((mid_entry->mid == smb_buffer->Mid) && - (mid_entry->midState == MID_REQUEST_SUBMITTED) && - (mid_entry->command == smb_buffer->Command)) { - if (length == 0 && - check2ndT2(smb_buffer, server->maxBuf) > 0) { - /* We have a multipart transact2 resp */ - isMultiRsp = true; - if (mid_entry->resp_buf) { - /* merge response - fix up 1st*/ - if (coalesce_t2(smb_buffer, - mid_entry->resp_buf)) { - mid_entry->multiRsp = - true; - break; - } else { - /* all parts received */ - mid_entry->multiEnd = - true; - goto multi_t2_fnd; - } + if (mid_entry->mid != smb_buffer->Mid || + mid_entry->midState != MID_REQUEST_SUBMITTED || + mid_entry->command != smb_buffer->Command) { + mid_entry = NULL; + continue; + } + + if (length == 0 && + check2ndT2(smb_buffer, server->maxBuf) > 0) { + /* We have a multipart transact2 resp */ + isMultiRsp = true; + if (mid_entry->resp_buf) { + /* merge response - fix up 1st*/ + length = coalesce_t2(smb_buffer, + mid_entry->resp_buf); + if (length > 0) { + length = 0; + mid_entry->multiRsp = true; + break; } else { - if (!isLargeBuf) { - cERROR(1, "1st trans2 resp needs bigbuf"); - /* BB maybe we can fix this up, switch - to already allocated large buffer? */ - } else { - /* Have first buffer */ - mid_entry->resp_buf = - smb_buffer; - mid_entry->largeBuf = - true; - bigbuf = NULL; - } + /* all parts received or + * packet is malformed + */ + mid_entry->multiEnd = true; + goto multi_t2_fnd; + } + } else { + if (!isLargeBuf) { + /* + * FIXME: switch to already + * allocated largebuf? + */ + cERROR(1, "1st trans2 resp " + "needs bigbuf"); + } else { + /* Have first buffer */ + mid_entry->resp_buf = + smb_buffer; + mid_entry->largeBuf = true; + bigbuf = NULL; } - break; } - mid_entry->resp_buf = smb_buffer; - mid_entry->largeBuf = isLargeBuf; + break; + } + mid_entry->resp_buf = smb_buffer; + mid_entry->largeBuf = isLargeBuf; multi_t2_fnd: - if (length == 0) - mid_entry->midState = - MID_RESPONSE_RECEIVED; - else - mid_entry->midState = - MID_RESPONSE_MALFORMED; + if (length == 0) + mid_entry->midState = MID_RESPONSE_RECEIVED; + else + mid_entry->midState = MID_RESPONSE_MALFORMED; #ifdef CONFIG_CIFS_STATS2 - mid_entry->when_received = jiffies; + mid_entry->when_received = jiffies; #endif - list_del_init(&mid_entry->qhead); - mid_entry->callback(mid_entry); - break; - } - mid_entry = NULL; + list_del_init(&mid_entry->qhead); + mid_entry->callback(mid_entry); + break; } spin_unlock(&GlobalMid_Lock); @@ -2659,6 +2673,11 @@ is_path_accessible(int xid, struct cifsTconInfo *tcon, 0 /* not legacy */, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + + if (rc == -EOPNOTSUPP || rc == -EINVAL) + rc = SMBQueryInformation(xid, tcon, full_path, pfile_info, + cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & + CIFS_MOUNT_MAP_SPECIAL_CHR); kfree(pfile_info); return rc; } diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index f6728eb6f4b..645114ad0a1 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -276,7 +276,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, } static void -decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses, +decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses, const struct nls_table *nls_cp) { int len; @@ -284,19 +284,6 @@ decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses, cFYI(1, "bleft %d", bleft); - /* - * Windows servers do not always double null terminate their final - * Unicode string. Check to see if there are an uneven number of bytes - * left. If so, then add an extra NULL pad byte to the end of the - * response. - * - * See section 2.7.2 in "Implementing CIFS" for details - */ - if (bleft % 2) { - data[bleft] = 0; - ++bleft; - } - kfree(ses->serverOS); ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp); cFYI(1, "serverOS=%s", ses->serverOS); @@ -929,7 +916,9 @@ ssetup_ntlmssp_authenticate: } /* BB check if Unicode and decode strings */ - if (smb_buf->Flags2 & SMBFLG2_UNICODE) { + if (bytes_remaining == 0) { + /* no string area to decode, do nothing */ + } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { /* unicode string area must be word-aligned */ if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { ++bcc_ptr; diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 3313dd19f54..9a37a9b6de3 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -53,11 +53,14 @@ DEFINE_SPINLOCK(configfs_dirent_lock); static void configfs_d_iput(struct dentry * dentry, struct inode * inode) { - struct configfs_dirent * sd = dentry->d_fsdata; + struct configfs_dirent *sd = dentry->d_fsdata; if (sd) { BUG_ON(sd->s_dentry != dentry); + /* Coordinate with configfs_readdir */ + spin_lock(&configfs_dirent_lock); sd->s_dentry = NULL; + spin_unlock(&configfs_dirent_lock); configfs_put(sd); } iput(inode); @@ -689,7 +692,8 @@ static int create_default_group(struct config_group *parent_group, sd = child->d_fsdata; sd->s_type |= CONFIGFS_USET_DEFAULT; } else { - d_delete(child); + BUG_ON(child->d_inode); + d_drop(child); dput(child); } } @@ -1545,7 +1549,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir struct configfs_dirent * parent_sd = dentry->d_fsdata; struct configfs_dirent *cursor = filp->private_data; struct list_head *p, *q = &cursor->s_sibling; - ino_t ino; + ino_t ino = 0; int i = filp->f_pos; switch (i) { @@ -1573,6 +1577,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir struct configfs_dirent *next; const char * name; int len; + struct inode *inode = NULL; next = list_entry(p, struct configfs_dirent, s_sibling); @@ -1581,9 +1586,28 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir name = configfs_get_name(next); len = strlen(name); - if (next->s_dentry) - ino = next->s_dentry->d_inode->i_ino; - else + + /* + * We'll have a dentry and an inode for + * PINNED items and for open attribute + * files. We lock here to prevent a race + * with configfs_d_iput() clearing + * s_dentry before calling iput(). + * + * Why do we go to the trouble? If + * someone has an attribute file open, + * the inode number should match until + * they close it. Beyond that, we don't + * care. + */ + spin_lock(&configfs_dirent_lock); + dentry = next->s_dentry; + if (dentry) + inode = dentry->d_inode; + if (inode) + ino = inode->i_ino; + spin_unlock(&configfs_dirent_lock); + if (!inode) ino = iunique(configfs_sb, 2); if (filldir(dirent, name, len, filp->f_pos, ino, @@ -1683,7 +1707,8 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) err = configfs_attach_group(sd->s_element, &group->cg_item, dentry); if (err) { - d_delete(dentry); + BUG_ON(dentry->d_inode); + d_drop(dentry); dput(dentry); } else { spin_lock(&configfs_dirent_lock); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index c6ba49bd95b..b32eb29a4e6 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -174,7 +174,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) if (!inode) return 0; - if (nd->flags & LOOKUP_RCU) + if (nd && (nd->flags & LOOKUP_RCU)) return -ECHILD; fc = get_fuse_conn(inode); diff --git a/fs/hpfs/Kconfig b/fs/hpfs/Kconfig index 0c39dc3ef7d..56bd15c5bf6 100644 --- a/fs/hpfs/Kconfig +++ b/fs/hpfs/Kconfig @@ -1,7 +1,6 @@ config HPFS_FS tristate "OS/2 HPFS file system support" depends on BLOCK - depends on BROKEN || !PREEMPT help OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS is the file system used for organizing files on OS/2 hard disk diff --git a/fs/hpfs/alloc.c b/fs/hpfs/alloc.c index 5503e2c2891..7a5eb2c718c 100644 --- a/fs/hpfs/alloc.c +++ b/fs/hpfs/alloc.c @@ -8,8 +8,6 @@ #include "hpfs_fn.h" -static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec); - /* * Check if a sector is allocated in bitmap * This is really slow. Turned on only if chk==2 @@ -18,9 +16,9 @@ static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec); static int chk_if_allocated(struct super_block *s, secno sec, char *msg) { struct quad_buffer_head qbh; - unsigned *bmp; + u32 *bmp; if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail; - if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f)) & 1) { + if ((cpu_to_le32(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) { hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec); goto fail1; } @@ -28,7 +26,7 @@ static int chk_if_allocated(struct super_block *s, secno sec, char *msg) if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) { unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4; if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail; - if ((bmp[ssec >> 5] >> (ssec & 0x1f)) & 1) { + if ((le32_to_cpu(bmp[ssec >> 5]) >> (ssec & 0x1f)) & 1) { hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec); goto fail1; } @@ -75,7 +73,6 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne hpfs_error(s, "Bad allocation size: %d", n); return 0; } - lock_super(s); if (bs != ~0x3fff) { if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls; } else { @@ -85,10 +82,6 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne ret = bs + nr; goto rt; } - /*if (!tstbits(bmp, nr + n, n + forward)) { - ret = bs + nr + n; - goto rt; - }*/ q = nr + n; b = 0; while ((a = tstbits(bmp, q, n + forward)) != 0) { q += a; @@ -105,14 +98,14 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne goto rt; } nr >>= 5; - /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) {*/ + /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) */ i = nr; do { - if (!bmp[i]) goto cont; - if (n + forward >= 0x3f && bmp[i] != -1) goto cont; + if (!le32_to_cpu(bmp[i])) goto cont; + if (n + forward >= 0x3f && le32_to_cpu(bmp[i]) != 0xffffffff) goto cont; q = i<<5; if (i > 0) { - unsigned k = bmp[i-1]; + unsigned k = le32_to_cpu(bmp[i-1]); while (k & 0x80000000) { q--; k <<= 1; } @@ -132,18 +125,17 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne } while (i != nr); rt: if (ret) { - if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (bmp[(ret & 0x3fff) >> 5] | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) { + if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) { hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret); ret = 0; goto b; } - bmp[(ret & 0x3fff) >> 5] &= ~(((1 << n) - 1) << (ret & 0x1f)); + bmp[(ret & 0x3fff) >> 5] &= cpu_to_le32(~(((1 << n) - 1) << (ret & 0x1f))); hpfs_mark_4buffers_dirty(&qbh); } b: hpfs_brelse4(&qbh); uls: - unlock_super(s); return ret; } @@ -155,7 +147,7 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne * sectors */ -secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward, int lock) +secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward) { secno sec; int i; @@ -167,7 +159,6 @@ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forwa forward = -forward; f_p = 1; } - if (lock) hpfs_lock_creation(s); n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14; if (near && near < sbi->sb_fs_size) { if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret; @@ -214,18 +205,17 @@ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forwa ret: if (sec && f_p) { for (i = 0; i < forward; i++) { - if (!hpfs_alloc_if_possible_nolock(s, sec + i + 1)) { + if (!hpfs_alloc_if_possible(s, sec + i + 1)) { hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i); sec = 0; break; } } } - if (lock) hpfs_unlock_creation(s); return sec; } -static secno alloc_in_dirband(struct super_block *s, secno near, int lock) +static secno alloc_in_dirband(struct super_block *s, secno near) { unsigned nr = near; secno sec; @@ -236,49 +226,35 @@ static secno alloc_in_dirband(struct super_block *s, secno near, int lock) nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4; nr -= sbi->sb_dirband_start; nr >>= 2; - if (lock) hpfs_lock_creation(s); sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0); - if (lock) hpfs_unlock_creation(s); if (!sec) return 0; return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start; } /* Alloc sector if it's free */ -static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec) +int hpfs_alloc_if_possible(struct super_block *s, secno sec) { struct quad_buffer_head qbh; - unsigned *bmp; - lock_super(s); + u32 *bmp; if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end; - if (bmp[(sec & 0x3fff) >> 5] & (1 << (sec & 0x1f))) { - bmp[(sec & 0x3fff) >> 5] &= ~(1 << (sec & 0x1f)); + if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) { + bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f))); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); - unlock_super(s); return 1; } hpfs_brelse4(&qbh); end: - unlock_super(s); return 0; } -int hpfs_alloc_if_possible(struct super_block *s, secno sec) -{ - int r; - hpfs_lock_creation(s); - r = hpfs_alloc_if_possible_nolock(s, sec); - hpfs_unlock_creation(s); - return r; -} - /* Free sectors in bitmaps */ void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n) { struct quad_buffer_head qbh; - unsigned *bmp; + u32 *bmp; struct hpfs_sb_info *sbi = hpfs_sb(s); /*printk("2 - ");*/ if (!n) return; @@ -286,26 +262,22 @@ void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n) hpfs_error(s, "Trying to free reserved sector %08x", sec); return; } - lock_super(s); sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n; if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff; new_map: if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) { - unlock_super(s); return; } new_tst: - if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f) & 1)) { + if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f) & 1)) { hpfs_error(s, "sector %08x not allocated", sec); hpfs_brelse4(&qbh); - unlock_super(s); return; } - bmp[(sec & 0x3fff) >> 5] |= 1 << (sec & 0x1f); + bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f)); if (!--n) { hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); - unlock_super(s); return; } if (!(++sec & 0x3fff)) { @@ -327,13 +299,13 @@ int hpfs_check_free_dnodes(struct super_block *s, int n) int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14; int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff; int i, j; - unsigned *bmp; + u32 *bmp; struct quad_buffer_head qbh; if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) { for (j = 0; j < 512; j++) { unsigned k; - if (!bmp[j]) continue; - for (k = bmp[j]; k; k >>= 1) if (k & 1) if (!--n) { + if (!le32_to_cpu(bmp[j])) continue; + for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) { hpfs_brelse4(&qbh); return 0; } @@ -352,10 +324,10 @@ int hpfs_check_free_dnodes(struct super_block *s, int n) chk_bmp: if (bmp) { for (j = 0; j < 512; j++) { - unsigned k; - if (!bmp[j]) continue; + u32 k; + if (!le32_to_cpu(bmp[j])) continue; for (k = 0xf; k; k <<= 4) - if ((bmp[j] & k) == k) { + if ((le32_to_cpu(bmp[j]) & k) == k) { if (!--n) { hpfs_brelse4(&qbh); return 0; @@ -379,44 +351,40 @@ void hpfs_free_dnode(struct super_block *s, dnode_secno dno) hpfs_free_sectors(s, dno, 4); } else { struct quad_buffer_head qbh; - unsigned *bmp; + u32 *bmp; unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4; - lock_super(s); if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) { - unlock_super(s); return; } - bmp[ssec >> 5] |= 1 << (ssec & 0x1f); + bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f)); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); - unlock_super(s); } } struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near, - dnode_secno *dno, struct quad_buffer_head *qbh, - int lock) + dnode_secno *dno, struct quad_buffer_head *qbh) { struct dnode *d; if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) { - if (!(*dno = alloc_in_dirband(s, near, lock))) - if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock))) return NULL; + if (!(*dno = alloc_in_dirband(s, near))) + if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL; } else { - if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock))) - if (!(*dno = alloc_in_dirband(s, near, lock))) return NULL; + if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) + if (!(*dno = alloc_in_dirband(s, near))) return NULL; } if (!(d = hpfs_get_4sectors(s, *dno, qbh))) { hpfs_free_dnode(s, *dno); return NULL; } memset(d, 0, 2048); - d->magic = DNODE_MAGIC; - d->first_free = 52; + d->magic = cpu_to_le32(DNODE_MAGIC); + d->first_free = cpu_to_le32(52); d->dirent[0] = 32; d->dirent[2] = 8; d->dirent[30] = 1; d->dirent[31] = 255; - d->self = *dno; + d->self = cpu_to_le32(*dno); return d; } @@ -424,16 +392,16 @@ struct fnode *hpfs_alloc_fnode(struct super_block *s, secno near, fnode_secno *f struct buffer_head **bh) { struct fnode *f; - if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD, 1))) return NULL; + if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD))) return NULL; if (!(f = hpfs_get_sector(s, *fno, bh))) { hpfs_free_sectors(s, *fno, 1); return NULL; } memset(f, 0, 512); - f->magic = FNODE_MAGIC; - f->ea_offs = 0xc4; + f->magic = cpu_to_le32(FNODE_MAGIC); + f->ea_offs = cpu_to_le16(0xc4); f->btree.n_free_nodes = 8; - f->btree.first_free = 8; + f->btree.first_free = cpu_to_le16(8); return f; } @@ -441,16 +409,16 @@ struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *a struct buffer_head **bh) { struct anode *a; - if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD, 1))) return NULL; + if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD))) return NULL; if (!(a = hpfs_get_sector(s, *ano, bh))) { hpfs_free_sectors(s, *ano, 1); return NULL; } memset(a, 0, 512); - a->magic = ANODE_MAGIC; - a->self = *ano; + a->magic = cpu_to_le32(ANODE_MAGIC); + a->self = cpu_to_le32(*ano); a->btree.n_free_nodes = 40; a->btree.n_used_nodes = 0; - a->btree.first_free = 8; + a->btree.first_free = cpu_to_le16(8); return a; } diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c index 6a2f04bf3df..08b503e8ed2 100644 --- a/fs/hpfs/anode.c +++ b/fs/hpfs/anode.c @@ -22,8 +22,8 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode, if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1; if (btree->internal) { for (i = 0; i < btree->n_used_nodes; i++) - if (btree->u.internal[i].file_secno > sec) { - a = btree->u.internal[i].down; + if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { + a = le32_to_cpu(btree->u.internal[i].down); brelse(bh); if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; btree = &anode->btree; @@ -34,18 +34,18 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode, return -1; } for (i = 0; i < btree->n_used_nodes; i++) - if (btree->u.external[i].file_secno <= sec && - btree->u.external[i].file_secno + btree->u.external[i].length > sec) { - a = btree->u.external[i].disk_secno + sec - btree->u.external[i].file_secno; + if (le32_to_cpu(btree->u.external[i].file_secno) <= sec && + le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) { + a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno); if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) { brelse(bh); return -1; } if (inode) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); - hpfs_inode->i_file_sec = btree->u.external[i].file_secno; - hpfs_inode->i_disk_sec = btree->u.external[i].disk_secno; - hpfs_inode->i_n_secs = btree->u.external[i].length; + hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno); + hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno); + hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length); } brelse(bh); return a; @@ -83,8 +83,8 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi return -1; } if (btree->internal) { - a = btree->u.internal[n].down; - btree->u.internal[n].file_secno = -1; + a = le32_to_cpu(btree->u.internal[n].down); + btree->u.internal[n].file_secno = cpu_to_le32(-1); mark_buffer_dirty(bh); brelse(bh); if (hpfs_sb(s)->sb_chk) @@ -94,15 +94,15 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi goto go_down; } if (n >= 0) { - if (btree->u.external[n].file_secno + btree->u.external[n].length != fsecno) { + if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) { hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x", - btree->u.external[n].file_secno + btree->u.external[n].length, fsecno, + le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno, fnod?'f':'a', node); brelse(bh); return -1; } - if (hpfs_alloc_if_possible(s, se = btree->u.external[n].disk_secno + btree->u.external[n].length)) { - btree->u.external[n].length++; + if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) { + btree->u.external[n].length = cpu_to_le32(le32_to_cpu(btree->u.external[n].length) + 1); mark_buffer_dirty(bh); brelse(bh); return se; @@ -115,20 +115,20 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi } se = !fnod ? node : (node + 16384) & ~16383; } - if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M, 1))) { + if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) { brelse(bh); return -1; } - fs = n < 0 ? 0 : btree->u.external[n].file_secno + btree->u.external[n].length; + fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length); if (!btree->n_free_nodes) { - up = a != node ? anode->up : -1; + up = a != node ? le32_to_cpu(anode->up) : -1; if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) { brelse(bh); hpfs_free_sectors(s, se, 1); return -1; } if (a == node && fnod) { - anode->up = node; + anode->up = cpu_to_le32(node); anode->btree.fnode_parent = 1; anode->btree.n_used_nodes = btree->n_used_nodes; anode->btree.first_free = btree->first_free; @@ -137,9 +137,9 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi btree->internal = 1; btree->n_free_nodes = 11; btree->n_used_nodes = 1; - btree->first_free = (char *)&(btree->u.internal[1]) - (char *)btree; - btree->u.internal[0].file_secno = -1; - btree->u.internal[0].down = na; + btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree); + btree->u.internal[0].file_secno = cpu_to_le32(-1); + btree->u.internal[0].down = cpu_to_le32(na); mark_buffer_dirty(bh); } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) { brelse(bh); @@ -153,15 +153,15 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi btree = &anode->btree; } btree->n_free_nodes--; n = btree->n_used_nodes++; - btree->first_free += 12; - btree->u.external[n].disk_secno = se; - btree->u.external[n].file_secno = fs; - btree->u.external[n].length = 1; + btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 12); + btree->u.external[n].disk_secno = cpu_to_le32(se); + btree->u.external[n].file_secno = cpu_to_le32(fs); + btree->u.external[n].length = cpu_to_le32(1); mark_buffer_dirty(bh); brelse(bh); if ((a == node && fnod) || na == -1) return se; c2 = 0; - while (up != -1) { + while (up != (anode_secno)-1) { struct anode *new_anode; if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1; @@ -174,47 +174,47 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi } if (btree->n_free_nodes) { btree->n_free_nodes--; n = btree->n_used_nodes++; - btree->first_free += 8; - btree->u.internal[n].file_secno = -1; - btree->u.internal[n].down = na; - btree->u.internal[n-1].file_secno = fs; + btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 8); + btree->u.internal[n].file_secno = cpu_to_le32(-1); + btree->u.internal[n].down = cpu_to_le32(na); + btree->u.internal[n-1].file_secno = cpu_to_le32(fs); mark_buffer_dirty(bh); brelse(bh); brelse(bh2); hpfs_free_sectors(s, ra, 1); if ((anode = hpfs_map_anode(s, na, &bh))) { - anode->up = up; + anode->up = cpu_to_le32(up); anode->btree.fnode_parent = up == node && fnod; mark_buffer_dirty(bh); brelse(bh); } return se; } - up = up != node ? anode->up : -1; - btree->u.internal[btree->n_used_nodes - 1].file_secno = /*fs*/-1; + up = up != node ? le32_to_cpu(anode->up) : -1; + btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1); mark_buffer_dirty(bh); brelse(bh); a = na; if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) { anode = new_anode; - /*anode->up = up != -1 ? up : ra;*/ + /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/ anode->btree.internal = 1; anode->btree.n_used_nodes = 1; anode->btree.n_free_nodes = 59; - anode->btree.first_free = 16; - anode->btree.u.internal[0].down = a; - anode->btree.u.internal[0].file_secno = -1; + anode->btree.first_free = cpu_to_le16(16); + anode->btree.u.internal[0].down = cpu_to_le32(a); + anode->btree.u.internal[0].file_secno = cpu_to_le32(-1); mark_buffer_dirty(bh); brelse(bh); if ((anode = hpfs_map_anode(s, a, &bh))) { - anode->up = na; + anode->up = cpu_to_le32(na); mark_buffer_dirty(bh); brelse(bh); } } else na = a; } if ((anode = hpfs_map_anode(s, na, &bh))) { - anode->up = node; + anode->up = cpu_to_le32(node); if (fnod) anode->btree.fnode_parent = 1; mark_buffer_dirty(bh); brelse(bh); @@ -232,14 +232,14 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi } btree = &fnode->btree; } - ranode->up = node; - memcpy(&ranode->btree, btree, btree->first_free); + ranode->up = cpu_to_le32(node); + memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free)); if (fnod) ranode->btree.fnode_parent = 1; ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes; if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) { struct anode *unode; - if ((unode = hpfs_map_anode(s, ranode->u.internal[n].down, &bh1))) { - unode->up = ra; + if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) { + unode->up = cpu_to_le32(ra); unode->btree.fnode_parent = 0; mark_buffer_dirty(bh1); brelse(bh1); @@ -248,11 +248,11 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi btree->internal = 1; btree->n_free_nodes = fnod ? 10 : 58; btree->n_used_nodes = 2; - btree->first_free = (char *)&btree->u.internal[2] - (char *)btree; - btree->u.internal[0].file_secno = fs; - btree->u.internal[0].down = ra; - btree->u.internal[1].file_secno = -1; - btree->u.internal[1].down = na; + btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree); + btree->u.internal[0].file_secno = cpu_to_le32(fs); + btree->u.internal[0].down = cpu_to_le32(ra); + btree->u.internal[1].file_secno = cpu_to_le32(-1); + btree->u.internal[1].down = cpu_to_le32(na); mark_buffer_dirty(bh); brelse(bh); mark_buffer_dirty(bh2); @@ -279,7 +279,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree) go_down: d2 = 0; while (btree1->internal) { - ano = btree1->u.internal[pos].down; + ano = le32_to_cpu(btree1->u.internal[pos].down); if (level) brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1")) @@ -290,7 +290,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree) pos = 0; } for (i = 0; i < btree1->n_used_nodes; i++) - hpfs_free_sectors(s, btree1->u.external[i].disk_secno, btree1->u.external[i].length); + hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length)); go_up: if (!level) return; brelse(bh); @@ -298,13 +298,13 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree) if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return; hpfs_free_sectors(s, ano, 1); oano = ano; - ano = anode->up; + ano = le32_to_cpu(anode->up); if (--level) { if (!(anode = hpfs_map_anode(s, ano, &bh))) return; btree1 = &anode->btree; } else btree1 = btree; for (i = 0; i < btree1->n_used_nodes; i++) { - if (btree1->u.internal[i].down == oano) { + if (le32_to_cpu(btree1->u.internal[i].down) == oano) { if ((pos = i + 1) < btree1->n_used_nodes) goto go_down; else @@ -411,7 +411,7 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs) if (fno) { btree->n_free_nodes = 8; btree->n_used_nodes = 0; - btree->first_free = 8; + btree->first_free = cpu_to_le16(8); btree->internal = 0; mark_buffer_dirty(bh); } else hpfs_free_sectors(s, f, 1); @@ -421,22 +421,22 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs) while (btree->internal) { nodes = btree->n_used_nodes + btree->n_free_nodes; for (i = 0; i < btree->n_used_nodes; i++) - if (btree->u.internal[i].file_secno >= secs) goto f; + if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f; brelse(bh); hpfs_error(s, "internal btree %08x doesn't end with -1", node); return; f: for (j = i + 1; j < btree->n_used_nodes; j++) - hpfs_ea_remove(s, btree->u.internal[j].down, 1, 0); + hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0); btree->n_used_nodes = i + 1; btree->n_free_nodes = nodes - btree->n_used_nodes; - btree->first_free = 8 + 8 * btree->n_used_nodes; + btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes); mark_buffer_dirty(bh); - if (btree->u.internal[i].file_secno == secs) { + if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) { brelse(bh); return; } - node = btree->u.internal[i].down; + node = le32_to_cpu(btree->u.internal[i].down); brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree")) @@ -446,25 +446,25 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs) } nodes = btree->n_used_nodes + btree->n_free_nodes; for (i = 0; i < btree->n_used_nodes; i++) - if (btree->u.external[i].file_secno + btree->u.external[i].length >= secs) goto ff; + if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff; brelse(bh); return; ff: - if (secs <= btree->u.external[i].file_secno) { + if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) { hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs); if (i) i--; } - else if (btree->u.external[i].file_secno + btree->u.external[i].length > secs) { - hpfs_free_sectors(s, btree->u.external[i].disk_secno + secs - - btree->u.external[i].file_secno, btree->u.external[i].length - - secs + btree->u.external[i].file_secno); /* I hope gcc optimizes this :-) */ - btree->u.external[i].length = secs - btree->u.external[i].file_secno; + else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) { + hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs - + le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length) + - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */ + btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno)); } for (j = i + 1; j < btree->n_used_nodes; j++) - hpfs_free_sectors(s, btree->u.external[j].disk_secno, btree->u.external[j].length); + hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length)); btree->n_used_nodes = i + 1; btree->n_free_nodes = nodes - btree->n_used_nodes; - btree->first_free = 8 + 12 * btree->n_used_nodes; + btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes); mark_buffer_dirty(bh); brelse(bh); } @@ -480,12 +480,12 @@ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno) struct extended_attribute *ea_end; if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return; if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree); - else hpfs_remove_dtree(s, fnode->u.external[0].disk_secno); + else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno)); ea_end = fnode_end_ea(fnode); for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) if (ea->indirect) hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea)); - hpfs_ea_ext_remove(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l); + hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l)); brelse(bh); hpfs_free_sectors(s, fno, 1); } diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c index 793cb9d943d..9ecde27d1e2 100644 --- a/fs/hpfs/buffer.c +++ b/fs/hpfs/buffer.c @@ -9,22 +9,6 @@ #include <linux/slab.h> #include "hpfs_fn.h" -void hpfs_lock_creation(struct super_block *s) -{ -#ifdef DEBUG_LOCKS - printk("lock creation\n"); -#endif - mutex_lock(&hpfs_sb(s)->hpfs_creation_de); -} - -void hpfs_unlock_creation(struct super_block *s) -{ -#ifdef DEBUG_LOCKS - printk("unlock creation\n"); -#endif - mutex_unlock(&hpfs_sb(s)->hpfs_creation_de); -} - /* Map a sector into a buffer and return pointers to it and to the buffer. */ void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp, @@ -32,6 +16,8 @@ void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head { struct buffer_head *bh; + hpfs_lock_assert(s); + cond_resched(); *bhp = bh = sb_bread(s, secno); @@ -50,6 +36,8 @@ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head struct buffer_head *bh; /*return hpfs_map_sector(s, secno, bhp, 0);*/ + hpfs_lock_assert(s); + cond_resched(); if ((*bhp = bh = sb_getblk(s, secno)) != NULL) { @@ -70,6 +58,8 @@ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffe struct buffer_head *bh; char *data; + hpfs_lock_assert(s); + cond_resched(); if (secno & 3) { @@ -125,6 +115,8 @@ void *hpfs_get_4sectors(struct super_block *s, unsigned secno, { cond_resched(); + hpfs_lock_assert(s); + if (secno & 3) { printk("HPFS: hpfs_get_4sectors: unaligned read\n"); return NULL; diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c index b3d7c0ddb60..f46ae025bfb 100644 --- a/fs/hpfs/dir.c +++ b/fs/hpfs/dir.c @@ -88,9 +88,9 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) hpfs_error(inode->i_sb, "not a directory, fnode %08lx", (unsigned long)inode->i_ino); } - if (hpfs_inode->i_dno != fno->u.external[0].disk_secno) { + if (hpfs_inode->i_dno != le32_to_cpu(fno->u.external[0].disk_secno)) { e = 1; - hpfs_error(inode->i_sb, "corrupted inode: i_dno == %08x, fnode -> dnode == %08x", hpfs_inode->i_dno, fno->u.external[0].disk_secno); + hpfs_error(inode->i_sb, "corrupted inode: i_dno == %08x, fnode -> dnode == %08x", hpfs_inode->i_dno, le32_to_cpu(fno->u.external[0].disk_secno)); } brelse(bh); if (e) { @@ -156,7 +156,7 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) goto again; } tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3); - if (filldir(dirent, tempname, de->namelen, old_pos, de->fnode, DT_UNKNOWN) < 0) { + if (filldir(dirent, tempname, de->namelen, old_pos, le32_to_cpu(de->fnode), DT_UNKNOWN) < 0) { filp->f_pos = old_pos; if (tempname != de->name) kfree(tempname); hpfs_brelse4(&qbh); @@ -221,7 +221,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name * Get inode number, what we're after. */ - ino = de->fnode; + ino = le32_to_cpu(de->fnode); /* * Go find or make an inode. @@ -236,7 +236,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name hpfs_init_inode(result); if (de->directory) hpfs_read_inode(result); - else if (de->ea_size && hpfs_sb(dir->i_sb)->sb_eas) + else if (le32_to_cpu(de->ea_size) && hpfs_sb(dir->i_sb)->sb_eas) hpfs_read_inode(result); else { result->i_mode |= S_IFREG; @@ -250,8 +250,6 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name hpfs_result = hpfs_i(result); if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino; - hpfs_decide_conv(result, name, len); - if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) { hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures"); goto bail1; @@ -263,19 +261,19 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name */ if (!result->i_ctime.tv_sec) { - if (!(result->i_ctime.tv_sec = local_to_gmt(dir->i_sb, de->creation_date))) + if (!(result->i_ctime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->creation_date)))) result->i_ctime.tv_sec = 1; result->i_ctime.tv_nsec = 0; - result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, de->write_date); + result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->write_date)); result->i_mtime.tv_nsec = 0; - result->i_atime.tv_sec = local_to_gmt(dir->i_sb, de->read_date); + result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->read_date)); result->i_atime.tv_nsec = 0; - hpfs_result->i_ea_size = de->ea_size; + hpfs_result->i_ea_size = le32_to_cpu(de->ea_size); if (!hpfs_result->i_ea_mode && de->read_only) result->i_mode &= ~0222; if (!de->directory) { if (result->i_size == -1) { - result->i_size = de->file_size; + result->i_size = le32_to_cpu(de->file_size); result->i_data.a_ops = &hpfs_aops; hpfs_i(result)->mmu_private = result->i_size; /* diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c index 9b2ffadfc8c..1e0e2ac30fd 100644 --- a/fs/hpfs/dnode.c +++ b/fs/hpfs/dnode.c @@ -14,11 +14,11 @@ static loff_t get_pos(struct dnode *d, struct hpfs_dirent *fde) struct hpfs_dirent *de_end = dnode_end_de(d); int i = 1; for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { - if (de == fde) return ((loff_t) d->self << 4) | (loff_t)i; + if (de == fde) return ((loff_t) le32_to_cpu(d->self) << 4) | (loff_t)i; i++; } printk("HPFS: get_pos: not_found\n"); - return ((loff_t)d->self << 4) | (loff_t)1; + return ((loff_t)le32_to_cpu(d->self) << 4) | (loff_t)1; } void hpfs_add_pos(struct inode *inode, loff_t *pos) @@ -130,29 +130,30 @@ static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno { struct hpfs_dirent *de; if (!(de = dnode_last_de(d))) { - hpfs_error(s, "set_last_pointer: empty dnode %08x", d->self); + hpfs_error(s, "set_last_pointer: empty dnode %08x", le32_to_cpu(d->self)); return; } if (hpfs_sb(s)->sb_chk) { if (de->down) { hpfs_error(s, "set_last_pointer: dnode %08x has already last pointer %08x", - d->self, de_down_pointer(de)); + le32_to_cpu(d->self), de_down_pointer(de)); return; } - if (de->length != 32) { - hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", d->self); + if (le16_to_cpu(de->length) != 32) { + hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", le32_to_cpu(d->self)); return; } } if (ptr) { - if ((d->first_free += 4) > 2048) { - hpfs_error(s,"set_last_pointer: too long dnode %08x", d->self); - d->first_free -= 4; + d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + 4); + if (le32_to_cpu(d->first_free) > 2048) { + hpfs_error(s, "set_last_pointer: too long dnode %08x", le32_to_cpu(d->self)); + d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - 4); return; } - de->length = 36; + de->length = cpu_to_le16(36); de->down = 1; - *(dnode_secno *)((char *)de + 32) = ptr; + *(dnode_secno *)((char *)de + 32) = cpu_to_le32(ptr); } } @@ -168,7 +169,7 @@ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { int c = hpfs_compare_names(s, name, namelen, de->name, de->namelen, de->last); if (!c) { - hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, d->self); + hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, le32_to_cpu(d->self)); return NULL; } if (c < 0) break; @@ -176,15 +177,14 @@ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, memmove((char *)de + d_size, de, (char *)de_end - (char *)de); memset(de, 0, d_size); if (down_ptr) { - *(int *)((char *)de + d_size - 4) = down_ptr; + *(dnode_secno *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr); de->down = 1; } - de->length = d_size; - if (down_ptr) de->down = 1; + de->length = cpu_to_le16(d_size); de->not_8x3 = hpfs_is_name_long(name, namelen); de->namelen = namelen; memcpy(de->name, name, namelen); - d->first_free += d_size; + d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + d_size); return de; } @@ -194,25 +194,25 @@ static void hpfs_delete_de(struct super_block *s, struct dnode *d, struct hpfs_dirent *de) { if (de->last) { - hpfs_error(s, "attempt to delete last dirent in dnode %08x", d->self); + hpfs_error(s, "attempt to delete last dirent in dnode %08x", le32_to_cpu(d->self)); return; } - d->first_free -= de->length; - memmove(de, de_next_de(de), d->first_free + (char *)d - (char *)de); + d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - le16_to_cpu(de->length)); + memmove(de, de_next_de(de), le32_to_cpu(d->first_free) + (char *)d - (char *)de); } static void fix_up_ptrs(struct super_block *s, struct dnode *d) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); - dnode_secno dno = d->self; + dnode_secno dno = le32_to_cpu(d->self); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) if (de->down) { struct quad_buffer_head qbh; struct dnode *dd; if ((dd = hpfs_map_dnode(s, de_down_pointer(de), &qbh))) { - if (dd->up != dno || dd->root_dnode) { - dd->up = dno; + if (le32_to_cpu(dd->up) != dno || dd->root_dnode) { + dd->up = cpu_to_le32(dno); dd->root_dnode = 0; hpfs_mark_4buffers_dirty(&qbh); } @@ -262,7 +262,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, kfree(nname); return 1; } - if (d->first_free + de_size(namelen, down_ptr) <= 2048) { + if (le32_to_cpu(d->first_free) + de_size(namelen, down_ptr) <= 2048) { loff_t t; copy_de(de=hpfs_add_de(i->i_sb, d, name, namelen, down_ptr), new_de); t = get_pos(d, de); @@ -286,11 +286,11 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, kfree(nname); return 1; } - memcpy(nd, d, d->first_free); + memcpy(nd, d, le32_to_cpu(d->first_free)); copy_de(de = hpfs_add_de(i->i_sb, nd, name, namelen, down_ptr), new_de); for_all_poss(i, hpfs_pos_ins, get_pos(nd, de), 1); h = ((char *)dnode_last_de(nd) - (char *)nd) / 2 + 10; - if (!(ad = hpfs_alloc_dnode(i->i_sb, d->up, &adno, &qbh1, 0))) { + if (!(ad = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &adno, &qbh1))) { hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); hpfs_brelse4(&qbh); kfree(nd); @@ -313,20 +313,21 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, down_ptr = adno; set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0); de = de_next_de(de); - memmove((char *)nd + 20, de, nd->first_free + (char *)nd - (char *)de); - nd->first_free -= (char *)de - (char *)nd - 20; - memcpy(d, nd, nd->first_free); + memmove((char *)nd + 20, de, le32_to_cpu(nd->first_free) + (char *)nd - (char *)de); + nd->first_free = cpu_to_le32(le32_to_cpu(nd->first_free) - ((char *)de - (char *)nd - 20)); + memcpy(d, nd, le32_to_cpu(nd->first_free)); for_all_poss(i, hpfs_pos_del, (loff_t)dno << 4, pos); fix_up_ptrs(i->i_sb, ad); if (!d->root_dnode) { - dno = ad->up = d->up; + ad->up = d->up; + dno = le32_to_cpu(ad->up); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); goto go_up; } - if (!(rd = hpfs_alloc_dnode(i->i_sb, d->up, &rdno, &qbh2, 0))) { + if (!(rd = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &rdno, &qbh2))) { hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); hpfs_brelse4(&qbh); hpfs_brelse4(&qbh1); @@ -338,7 +339,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, i->i_blocks += 4; rd->root_dnode = 1; rd->up = d->up; - if (!(fnode = hpfs_map_fnode(i->i_sb, d->up, &bh))) { + if (!(fnode = hpfs_map_fnode(i->i_sb, le32_to_cpu(d->up), &bh))) { hpfs_free_dnode(i->i_sb, rdno); hpfs_brelse4(&qbh); hpfs_brelse4(&qbh1); @@ -347,10 +348,11 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, kfree(nname); return 1; } - fnode->u.external[0].disk_secno = rdno; + fnode->u.external[0].disk_secno = cpu_to_le32(rdno); mark_buffer_dirty(bh); brelse(bh); - d->up = ad->up = hpfs_i(i)->i_dno = rdno; + hpfs_i(i)->i_dno = rdno; + d->up = ad->up = cpu_to_le32(rdno); d->root_dnode = ad->root_dnode = 0; hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); @@ -373,7 +375,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, int hpfs_add_dirent(struct inode *i, const unsigned char *name, unsigned namelen, - struct hpfs_dirent *new_de, int cdepth) + struct hpfs_dirent *new_de) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct dnode *d; @@ -403,7 +405,6 @@ int hpfs_add_dirent(struct inode *i, } } hpfs_brelse4(&qbh); - if (!cdepth) hpfs_lock_creation(i->i_sb); if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_ADD)) { c = 1; goto ret; @@ -411,7 +412,6 @@ int hpfs_add_dirent(struct inode *i, i->i_version++; c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0); ret: - if (!cdepth) hpfs_unlock_creation(i->i_sb); return c; } @@ -437,9 +437,9 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to) return 0; if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 0; if (hpfs_sb(i->i_sb)->sb_chk) { - if (dnode->up != chk_up) { + if (le32_to_cpu(dnode->up) != chk_up) { hpfs_error(i->i_sb, "move_to_top: up pointer from %08x should be %08x, is %08x", - dno, chk_up, dnode->up); + dno, chk_up, le32_to_cpu(dnode->up)); hpfs_brelse4(&qbh); return 0; } @@ -455,7 +455,7 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to) hpfs_brelse4(&qbh); } while (!(de = dnode_pre_last_de(dnode))) { - dnode_secno up = dnode->up; + dnode_secno up = le32_to_cpu(dnode->up); hpfs_brelse4(&qbh); hpfs_free_dnode(i->i_sb, dno); i->i_size -= 2048; @@ -474,8 +474,8 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to) hpfs_brelse4(&qbh); return 0; } - dnode->first_free -= 4; - de->length -= 4; + dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4); + de->length = cpu_to_le16(le16_to_cpu(de->length) - 4); de->down = 0; hpfs_mark_4buffers_dirty(&qbh); dno = up; @@ -483,12 +483,12 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to) t = get_pos(dnode, de); for_all_poss(i, hpfs_pos_subst, t, 4); for_all_poss(i, hpfs_pos_subst, t + 1, 5); - if (!(nde = kmalloc(de->length, GFP_NOFS))) { + if (!(nde = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { hpfs_error(i->i_sb, "out of memory for dirent - directory will be corrupted"); hpfs_brelse4(&qbh); return 0; } - memcpy(nde, de, de->length); + memcpy(nde, de, le16_to_cpu(de->length)); ddno = de->down ? de_down_pointer(de) : 0; hpfs_delete_de(i->i_sb, dnode, de); set_last_pointer(i->i_sb, dnode, ddno); @@ -517,11 +517,11 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) try_it_again: if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "delete_empty_dnode")) return; if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return; - if (dnode->first_free > 56) goto end; - if (dnode->first_free == 52 || dnode->first_free == 56) { + if (le32_to_cpu(dnode->first_free) > 56) goto end; + if (le32_to_cpu(dnode->first_free) == 52 || le32_to_cpu(dnode->first_free) == 56) { struct hpfs_dirent *de_end; int root = dnode->root_dnode; - up = dnode->up; + up = le32_to_cpu(dnode->up); de = dnode_first_de(dnode); down = de->down ? de_down_pointer(de) : 0; if (hpfs_sb(i->i_sb)->sb_chk) if (root && !down) { @@ -545,13 +545,13 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) return; } if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { - d1->up = up; + d1->up = cpu_to_le32(up); d1->root_dnode = 1; hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } if ((fnode = hpfs_map_fnode(i->i_sb, up, &bh))) { - fnode->u.external[0].disk_secno = down; + fnode->u.external[0].disk_secno = cpu_to_le32(down); mark_buffer_dirty(bh); brelse(bh); } @@ -570,22 +570,22 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, ((loff_t)up << 4) | p); if (!down) { de->down = 0; - de->length -= 4; - dnode->first_free -= 4; + de->length = cpu_to_le16(le16_to_cpu(de->length) - 4); + dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4); memmove(de_next_de(de), (char *)de_next_de(de) + 4, - (char *)dnode + dnode->first_free - (char *)de_next_de(de)); + (char *)dnode + le32_to_cpu(dnode->first_free) - (char *)de_next_de(de)); } else { struct dnode *d1; struct quad_buffer_head qbh1; - *(dnode_secno *) ((void *) de + de->length - 4) = down; + *(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4) = down; if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { - d1->up = up; + d1->up = cpu_to_le32(up); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } } } else { - hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, dnode->first_free); + hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, le32_to_cpu(dnode->first_free)); goto end; } @@ -596,18 +596,18 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) struct quad_buffer_head qbh1; if (!de_next->down) goto endm; ndown = de_down_pointer(de_next); - if (!(de_cp = kmalloc(de->length, GFP_NOFS))) { + if (!(de_cp = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { printk("HPFS: out of memory for dtree balancing\n"); goto endm; } - memcpy(de_cp, de, de->length); + memcpy(de_cp, de, le16_to_cpu(de->length)); hpfs_delete_de(i->i_sb, dnode, de); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, 4); for_all_poss(i, hpfs_pos_del, ((loff_t)up << 4) | p, 1); if (de_cp->down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de_cp), &qbh1))) { - d1->up = ndown; + d1->up = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } @@ -635,7 +635,7 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) struct hpfs_dirent *del = dnode_last_de(d1); dlp = del->down ? de_down_pointer(del) : 0; if (!dlp && down) { - if (d1->first_free > 2044) { + if (le32_to_cpu(d1->first_free) > 2044) { if (hpfs_sb(i->i_sb)->sb_chk >= 2) { printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); printk("HPFS: warning: terminating balancing operation\n"); @@ -647,38 +647,38 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); printk("HPFS: warning: goin'on\n"); } - del->length += 4; + del->length = cpu_to_le16(le16_to_cpu(del->length) + 4); del->down = 1; - d1->first_free += 4; + d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) + 4); } if (dlp && !down) { - del->length -= 4; + del->length = cpu_to_le16(le16_to_cpu(del->length) - 4); del->down = 0; - d1->first_free -= 4; + d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) - 4); } else if (down) - *(dnode_secno *) ((void *) del + del->length - 4) = down; + *(dnode_secno *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down); } else goto endm; - if (!(de_cp = kmalloc(de_prev->length, GFP_NOFS))) { + if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) { printk("HPFS: out of memory for dtree balancing\n"); hpfs_brelse4(&qbh1); goto endm; } hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); - memcpy(de_cp, de_prev, de_prev->length); + memcpy(de_cp, de_prev, le16_to_cpu(de_prev->length)); hpfs_delete_de(i->i_sb, dnode, de_prev); if (!de_prev->down) { - de_prev->length += 4; + de_prev->length = cpu_to_le16(le16_to_cpu(de_prev->length) + 4); de_prev->down = 1; - dnode->first_free += 4; + dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) + 4); } - *(dnode_secno *) ((void *) de_prev + de_prev->length - 4) = ndown; + *(dnode_secno *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, ((loff_t)up << 4) | (p - 1)); if (down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de), &qbh1))) { - d1->up = ndown; + d1->up = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } @@ -701,7 +701,6 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de, { struct dnode *dnode = qbh->data; dnode_secno down = 0; - int lock = 0; loff_t t; if (de->first || de->last) { hpfs_error(i->i_sb, "hpfs_remove_dirent: attempt to delete first or last dirent in dnode %08x", dno); @@ -710,11 +709,8 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de, } if (de->down) down = de_down_pointer(de); if (depth && (de->down || (de == dnode_first_de(dnode) && de_next_de(de)->last))) { - lock = 1; - hpfs_lock_creation(i->i_sb); if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_DEL)) { hpfs_brelse4(qbh); - hpfs_unlock_creation(i->i_sb); return 2; } } @@ -727,11 +723,9 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de, dnode_secno a = move_to_top(i, down, dno); for_all_poss(i, hpfs_pos_subst, 5, t); if (a) delete_empty_dnode(i, a); - if (lock) hpfs_unlock_creation(i->i_sb); return !a; } delete_empty_dnode(i, dno); - if (lock) hpfs_unlock_creation(i->i_sb); return 0; } @@ -751,8 +745,8 @@ void hpfs_count_dnodes(struct super_block *s, dnode_secno dno, int *n_dnodes, ptr = 0; go_up: if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return; - if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && dnode->up != odno) - hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, dnode->up); + if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && le32_to_cpu(dnode->up) != odno) + hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, le32_to_cpu(dnode->up)); de = dnode_first_de(dnode); if (ptr) while(1) { if (de->down) if (de_down_pointer(de) == ptr) goto process_de; @@ -776,7 +770,7 @@ void hpfs_count_dnodes(struct super_block *s, dnode_secno dno, int *n_dnodes, if (!de->first && !de->last && n_items) (*n_items)++; if ((de = de_next_de(de)) < dnode_end_de(dnode)) goto next_de; ptr = dno; - dno = dnode->up; + dno = le32_to_cpu(dnode->up); if (dnode->root_dnode) { hpfs_brelse4(&qbh); return; @@ -824,8 +818,8 @@ dnode_secno hpfs_de_as_down_as_possible(struct super_block *s, dnode_secno dno) return d; if (!(de = map_nth_dirent(s, d, 1, &qbh, NULL))) return dno; if (hpfs_sb(s)->sb_chk) - if (up && ((struct dnode *)qbh.data)->up != up) - hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, ((struct dnode *)qbh.data)->up); + if (up && le32_to_cpu(((struct dnode *)qbh.data)->up) != up) + hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, le32_to_cpu(((struct dnode *)qbh.data)->up)); if (!de->down) { hpfs_brelse4(&qbh); return d; @@ -874,7 +868,7 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp, /* Going up */ if (dnode->root_dnode) goto bail; - if (!(up_dnode = hpfs_map_dnode(inode->i_sb, dnode->up, &qbh0))) + if (!(up_dnode = hpfs_map_dnode(inode->i_sb, le32_to_cpu(dnode->up), &qbh0))) goto bail; end_up_de = dnode_end_de(up_dnode); @@ -882,16 +876,16 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp, for (up_de = dnode_first_de(up_dnode); up_de < end_up_de; up_de = de_next_de(up_de)) { if (!(++c & 077)) hpfs_error(inode->i_sb, - "map_pos_dirent: pos crossed dnode boundary; dnode = %08x", dnode->up); + "map_pos_dirent: pos crossed dnode boundary; dnode = %08x", le32_to_cpu(dnode->up)); if (up_de->down && de_down_pointer(up_de) == dno) { - *posp = ((loff_t) dnode->up << 4) + c; + *posp = ((loff_t) le32_to_cpu(dnode->up) << 4) + c; hpfs_brelse4(&qbh0); return de; } } hpfs_error(inode->i_sb, "map_pos_dirent: pointer to dnode %08x not found in parent dnode %08x", - dno, dnode->up); + dno, le32_to_cpu(dnode->up)); hpfs_brelse4(&qbh0); bail: @@ -1017,17 +1011,17 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, /*name2[15] = 0xff;*/ name1len = 15; name2len = 256; } - if (!(upf = hpfs_map_fnode(s, f->up, &bh))) { + if (!(upf = hpfs_map_fnode(s, le32_to_cpu(f->up), &bh))) { kfree(name2); return NULL; } if (!upf->dirflag) { brelse(bh); - hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, f->up); + hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up)); kfree(name2); return NULL; } - dno = upf->u.external[0].disk_secno; + dno = le32_to_cpu(upf->u.external[0].disk_secno); brelse(bh); go_down: downd = 0; @@ -1049,7 +1043,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, return NULL; } next_de: - if (de->fnode == fno) { + if (le32_to_cpu(de->fnode) == fno) { kfree(name2); return de; } @@ -1065,7 +1059,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, goto go_down; } f: - if (de->fnode == fno) { + if (le32_to_cpu(de->fnode) == fno) { kfree(name2); return de; } @@ -1074,7 +1068,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, if ((de = de_next_de(de)) < de_end) goto next_de; if (d->root_dnode) goto not_found; downd = dno; - dno = d->up; + dno = le32_to_cpu(d->up); hpfs_brelse4(qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, downd, &d1, &d2, "map_fnode_dirent #2")) { diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c index 45e53d972b4..d8b84d113c8 100644 --- a/fs/hpfs/ea.c +++ b/fs/hpfs/ea.c @@ -24,7 +24,7 @@ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len) } if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return; if (ea->indirect) { - if (ea->valuelen != 8) { + if (ea_valuelen(ea) != 8) { hpfs_error(s, "ea->indirect set while ea->valuelen!=8, %s %08x, pos %08x", ano ? "anode" : "sectors", a, pos); return; @@ -33,7 +33,7 @@ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len) return; hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea)); } - pos += ea->namelen + ea->valuelen + 5; + pos += ea->namelen + ea_valuelen(ea) + 5; } if (!ano) hpfs_free_sectors(s, a, (len+511) >> 9); else { @@ -76,24 +76,24 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key, unsigned pos; int ano, len; secno a; + char ex[4 + 255 + 1 + 8]; struct extended_attribute *ea; struct extended_attribute *ea_end = fnode_end_ea(fnode); for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) if (!strcmp(ea->name, key)) { if (ea->indirect) goto indirect; - if (ea->valuelen >= size) + if (ea_valuelen(ea) >= size) return -EINVAL; - memcpy(buf, ea_data(ea), ea->valuelen); - buf[ea->valuelen] = 0; + memcpy(buf, ea_data(ea), ea_valuelen(ea)); + buf[ea_valuelen(ea)] = 0; return 0; } - a = fnode->ea_secno; - len = fnode->ea_size_l; + a = le32_to_cpu(fnode->ea_secno); + len = le32_to_cpu(fnode->ea_size_l); ano = fnode->ea_anode; pos = 0; while (pos < len) { - char ex[4 + 255 + 1 + 8]; ea = (struct extended_attribute *)ex; if (pos + 4 > len) { hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x", @@ -106,14 +106,14 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key, if (!strcmp(ea->name, key)) { if (ea->indirect) goto indirect; - if (ea->valuelen >= size) + if (ea_valuelen(ea) >= size) return -EINVAL; - if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea->valuelen, buf)) + if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), buf)) return -EIO; - buf[ea->valuelen] = 0; + buf[ea_valuelen(ea)] = 0; return 0; } - pos += ea->namelen + ea->valuelen + 5; + pos += ea->namelen + ea_valuelen(ea) + 5; } return -ENOENT; indirect: @@ -138,16 +138,16 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si if (!strcmp(ea->name, key)) { if (ea->indirect) return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea)); - if (!(ret = kmalloc((*size = ea->valuelen) + 1, GFP_NOFS))) { + if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) { printk("HPFS: out of memory for EA\n"); return NULL; } - memcpy(ret, ea_data(ea), ea->valuelen); - ret[ea->valuelen] = 0; + memcpy(ret, ea_data(ea), ea_valuelen(ea)); + ret[ea_valuelen(ea)] = 0; return ret; } - a = fnode->ea_secno; - len = fnode->ea_size_l; + a = le32_to_cpu(fnode->ea_secno); + len = le32_to_cpu(fnode->ea_size_l); ano = fnode->ea_anode; pos = 0; while (pos < len) { @@ -164,18 +164,18 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si if (!strcmp(ea->name, key)) { if (ea->indirect) return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea)); - if (!(ret = kmalloc((*size = ea->valuelen) + 1, GFP_NOFS))) { + if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) { printk("HPFS: out of memory for EA\n"); return NULL; } - if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea->valuelen, ret)) { + if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), ret)) { kfree(ret); return NULL; } - ret[ea->valuelen] = 0; + ret[ea_valuelen(ea)] = 0; return ret; } - pos += ea->namelen + ea->valuelen + 5; + pos += ea->namelen + ea_valuelen(ea) + 5; } return NULL; } @@ -202,13 +202,13 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, if (ea->indirect) { if (ea_len(ea) == size) set_indirect_ea(s, ea->anode, ea_sec(ea), data, size); - } else if (ea->valuelen == size) { + } else if (ea_valuelen(ea) == size) { memcpy(ea_data(ea), data, size); } return; } - a = fnode->ea_secno; - len = fnode->ea_size_l; + a = le32_to_cpu(fnode->ea_secno); + len = le32_to_cpu(fnode->ea_size_l); ano = fnode->ea_anode; pos = 0; while (pos < len) { @@ -228,68 +228,70 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, set_indirect_ea(s, ea->anode, ea_sec(ea), data, size); } else { - if (ea->valuelen == size) + if (ea_valuelen(ea) == size) hpfs_ea_write(s, a, ano, pos + 4 + ea->namelen + 1, size, data); } return; } - pos += ea->namelen + ea->valuelen + 5; + pos += ea->namelen + ea_valuelen(ea) + 5; } - if (!fnode->ea_offs) { - /*if (fnode->ea_size_s) { + if (!le16_to_cpu(fnode->ea_offs)) { + /*if (le16_to_cpu(fnode->ea_size_s)) { hpfs_error(s, "fnode %08x: ea_size_s == %03x, ea_offs == 0", - inode->i_ino, fnode->ea_size_s); + inode->i_ino, le16_to_cpu(fnode->ea_size_s)); return; }*/ - fnode->ea_offs = 0xc4; + fnode->ea_offs = cpu_to_le16(0xc4); } - if (fnode->ea_offs < 0xc4 || fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200) { + if (le16_to_cpu(fnode->ea_offs) < 0xc4 || le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200) { hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x", (unsigned long)inode->i_ino, - fnode->ea_offs, fnode->ea_size_s); + le32_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s)); return; } - if ((fnode->ea_size_s || !fnode->ea_size_l) && - fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s + strlen(key) + size + 5 <= 0x200) { + if ((le16_to_cpu(fnode->ea_size_s) || !le32_to_cpu(fnode->ea_size_l)) && + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) + strlen(key) + size + 5 <= 0x200) { ea = fnode_end_ea(fnode); *(char *)ea = 0; ea->namelen = strlen(key); - ea->valuelen = size; + ea->valuelen_lo = size; + ea->valuelen_hi = size >> 8; strcpy(ea->name, key); memcpy(ea_data(ea), data, size); - fnode->ea_size_s += strlen(key) + size + 5; + fnode->ea_size_s = cpu_to_le16(le16_to_cpu(fnode->ea_size_s) + strlen(key) + size + 5); goto ret; } /* Most the code here is 99.9993422% unused. I hope there are no bugs. But what .. HPFS.IFS has also bugs in ea management. */ - if (fnode->ea_size_s && !fnode->ea_size_l) { + if (le16_to_cpu(fnode->ea_size_s) && !le32_to_cpu(fnode->ea_size_l)) { secno n; struct buffer_head *bh; char *data; - if (!(n = hpfs_alloc_sector(s, fno, 1, 0, 1))) return; + if (!(n = hpfs_alloc_sector(s, fno, 1, 0))) return; if (!(data = hpfs_get_sector(s, n, &bh))) { hpfs_free_sectors(s, n, 1); return; } - memcpy(data, fnode_ea(fnode), fnode->ea_size_s); - fnode->ea_size_l = fnode->ea_size_s; - fnode->ea_size_s = 0; - fnode->ea_secno = n; - fnode->ea_anode = 0; + memcpy(data, fnode_ea(fnode), le16_to_cpu(fnode->ea_size_s)); + fnode->ea_size_l = cpu_to_le32(le16_to_cpu(fnode->ea_size_s)); + fnode->ea_size_s = cpu_to_le16(0); + fnode->ea_secno = cpu_to_le32(n); + fnode->ea_anode = cpu_to_le32(0); mark_buffer_dirty(bh); brelse(bh); } - pos = fnode->ea_size_l + 5 + strlen(key) + size; - len = (fnode->ea_size_l + 511) >> 9; + pos = le32_to_cpu(fnode->ea_size_l) + 5 + strlen(key) + size; + len = (le32_to_cpu(fnode->ea_size_l) + 511) >> 9; if (pos >= 30000) goto bail; while (((pos + 511) >> 9) > len) { if (!len) { - if (!(fnode->ea_secno = hpfs_alloc_sector(s, fno, 1, 0, 1))) - goto bail; + secno q = hpfs_alloc_sector(s, fno, 1, 0); + if (!q) goto bail; + fnode->ea_secno = cpu_to_le32(q); fnode->ea_anode = 0; len++; } else if (!fnode->ea_anode) { - if (hpfs_alloc_if_possible(s, fnode->ea_secno + len)) { + if (hpfs_alloc_if_possible(s, le32_to_cpu(fnode->ea_secno) + len)) { len++; } else { /* Aargh... don't know how to create ea anodes :-( */ @@ -298,26 +300,26 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, anode_secno a_s; if (!(anode = hpfs_alloc_anode(s, fno, &a_s, &bh))) goto bail; - anode->up = fno; + anode->up = cpu_to_le32(fno); anode->btree.fnode_parent = 1; anode->btree.n_free_nodes--; anode->btree.n_used_nodes++; - anode->btree.first_free += 12; - anode->u.external[0].disk_secno = fnode->ea_secno; - anode->u.external[0].file_secno = 0; - anode->u.external[0].length = len; + anode->btree.first_free = cpu_to_le16(le16_to_cpu(anode->btree.first_free) + 12); + anode->u.external[0].disk_secno = cpu_to_le32(le32_to_cpu(fnode->ea_secno)); + anode->u.external[0].file_secno = cpu_to_le32(0); + anode->u.external[0].length = cpu_to_le32(len); mark_buffer_dirty(bh); brelse(bh); fnode->ea_anode = 1; - fnode->ea_secno = a_s;*/ + fnode->ea_secno = cpu_to_le32(a_s);*/ secno new_sec; int i; - if (!(new_sec = hpfs_alloc_sector(s, fno, 1, 1 - ((pos + 511) >> 9), 1))) + if (!(new_sec = hpfs_alloc_sector(s, fno, 1, 1 - ((pos + 511) >> 9)))) goto bail; for (i = 0; i < len; i++) { struct buffer_head *bh1, *bh2; void *b1, *b2; - if (!(b1 = hpfs_map_sector(s, fnode->ea_secno + i, &bh1, len - i - 1))) { + if (!(b1 = hpfs_map_sector(s, le32_to_cpu(fnode->ea_secno) + i, &bh1, len - i - 1))) { hpfs_free_sectors(s, new_sec, (pos + 511) >> 9); goto bail; } @@ -331,13 +333,13 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, mark_buffer_dirty(bh2); brelse(bh2); } - hpfs_free_sectors(s, fnode->ea_secno, len); - fnode->ea_secno = new_sec; + hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno), len); + fnode->ea_secno = cpu_to_le32(new_sec); len = (pos + 511) >> 9; } } if (fnode->ea_anode) { - if (hpfs_add_sector_to_btree(s, fnode->ea_secno, + if (hpfs_add_sector_to_btree(s, le32_to_cpu(fnode->ea_secno), 0, len) != -1) { len++; } else { @@ -349,17 +351,17 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, h[1] = strlen(key); h[2] = size & 0xff; h[3] = size >> 8; - if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l, 4, h)) goto bail; - if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l + 4, h[1] + 1, key)) goto bail; - if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l + 5 + h[1], size, data)) goto bail; - fnode->ea_size_l = pos; + if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail; + if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail; + if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail; + fnode->ea_size_l = cpu_to_le32(pos); ret: hpfs_i(inode)->i_ea_size += 5 + strlen(key) + size; return; bail: - if (fnode->ea_secno) - if (fnode->ea_anode) hpfs_truncate_btree(s, fnode->ea_secno, 1, (fnode->ea_size_l + 511) >> 9); - else hpfs_free_sectors(s, fnode->ea_secno + ((fnode->ea_size_l + 511) >> 9), len - ((fnode->ea_size_l + 511) >> 9)); - else fnode->ea_secno = fnode->ea_size_l = 0; + if (le32_to_cpu(fnode->ea_secno)) + if (fnode->ea_anode) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9); + else hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno) + ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9), len - ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9)); + else fnode->ea_secno = fnode->ea_size_l = cpu_to_le32(0); } diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index 9b9eb6933e4..89c500ee521 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -20,8 +20,8 @@ static int hpfs_file_release(struct inode *inode, struct file *file) int hpfs_file_fsync(struct file *file, int datasync) { - /*return file_fsync(file, datasync);*/ - return 0; /* Don't fsync :-) */ + struct inode *inode = file->f_mapping->host; + return sync_blockdev(inode->i_sb->s_bdev); } /* @@ -48,38 +48,46 @@ static secno hpfs_bmap(struct inode *inode, unsigned file_secno) static void hpfs_truncate(struct inode *i) { if (IS_IMMUTABLE(i)) return /*-EPERM*/; - hpfs_lock(i->i_sb); + hpfs_lock_assert(i->i_sb); + hpfs_i(i)->i_n_secs = 0; i->i_blocks = 1 + ((i->i_size + 511) >> 9); hpfs_i(i)->mmu_private = i->i_size; hpfs_truncate_btree(i->i_sb, i->i_ino, 1, ((i->i_size + 511) >> 9)); hpfs_write_inode(i); hpfs_i(i)->i_n_secs = 0; - hpfs_unlock(i->i_sb); } static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { + int r; secno s; + hpfs_lock(inode->i_sb); s = hpfs_bmap(inode, iblock); if (s) { map_bh(bh_result, inode->i_sb, s); - return 0; + goto ret_0; } - if (!create) return 0; + if (!create) goto ret_0; if (iblock<<9 != hpfs_i(inode)->mmu_private) { BUG(); - return -EIO; + r = -EIO; + goto ret_r; } if ((s = hpfs_add_sector_to_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1)) == -1) { hpfs_truncate_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1); - return -ENOSPC; + r = -ENOSPC; + goto ret_r; } inode->i_blocks++; hpfs_i(inode)->mmu_private += 512; set_buffer_new(bh_result); map_bh(bh_result, inode->i_sb, s); - return 0; + ret_0: + r = 0; + ret_r: + hpfs_unlock(inode->i_sb); + return r; } static int hpfs_writepage(struct page *page, struct writeback_control *wbc) @@ -130,8 +138,11 @@ static ssize_t hpfs_file_write(struct file *file, const char __user *buf, ssize_t retval; retval = do_sync_write(file, buf, count, ppos); - if (retval > 0) + if (retval > 0) { + hpfs_lock(file->f_path.dentry->d_sb); hpfs_i(file->f_path.dentry->d_inode)->i_dirty = 1; + hpfs_unlock(file->f_path.dentry->d_sb); + } return retval; } diff --git a/fs/hpfs/hpfs.h b/fs/hpfs/hpfs.h index 0e84c73cd9c..8b0650aae32 100644 --- a/fs/hpfs/hpfs.h +++ b/fs/hpfs/hpfs.h @@ -19,9 +19,13 @@ For definitive information on HPFS, ask somebody else -- this is guesswork. There are certain to be many mistakes. */ +#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) +#error unknown endian +#endif + /* Notation */ -typedef unsigned secno; /* sector number, partition relative */ +typedef u32 secno; /* sector number, partition relative */ typedef secno dnode_secno; /* sector number of a dnode */ typedef secno fnode_secno; /* sector number of an fnode */ @@ -38,28 +42,28 @@ typedef u32 time32_t; /* 32-bit time_t type */ struct hpfs_boot_block { - unsigned char jmp[3]; - unsigned char oem_id[8]; - unsigned char bytes_per_sector[2]; /* 512 */ - unsigned char sectors_per_cluster; - unsigned char n_reserved_sectors[2]; - unsigned char n_fats; - unsigned char n_rootdir_entries[2]; - unsigned char n_sectors_s[2]; - unsigned char media_byte; - unsigned short sectors_per_fat; - unsigned short sectors_per_track; - unsigned short heads_per_cyl; - unsigned int n_hidden_sectors; - unsigned int n_sectors_l; /* size of partition */ - unsigned char drive_number; - unsigned char mbz; - unsigned char sig_28h; /* 28h */ - unsigned char vol_serno[4]; - unsigned char vol_label[11]; - unsigned char sig_hpfs[8]; /* "HPFS " */ - unsigned char pad[448]; - unsigned short magic; /* aa55 */ + u8 jmp[3]; + u8 oem_id[8]; + u8 bytes_per_sector[2]; /* 512 */ + u8 sectors_per_cluster; + u8 n_reserved_sectors[2]; + u8 n_fats; + u8 n_rootdir_entries[2]; + u8 n_sectors_s[2]; + u8 media_byte; + u16 sectors_per_fat; + u16 sectors_per_track; + u16 heads_per_cyl; + u32 n_hidden_sectors; + u32 n_sectors_l; /* size of partition */ + u8 drive_number; + u8 mbz; + u8 sig_28h; /* 28h */ + u8 vol_serno[4]; + u8 vol_label[11]; + u8 sig_hpfs[8]; /* "HPFS " */ + u8 pad[448]; + u16 magic; /* aa55 */ }; @@ -71,31 +75,29 @@ struct hpfs_boot_block struct hpfs_super_block { - unsigned magic; /* f995 e849 */ - unsigned magic1; /* fa53 e9c5, more magic? */ - /*unsigned huh202;*/ /* ?? 202 = N. of B. in 1.00390625 S.*/ - char version; /* version of a filesystem usually 2 */ - char funcversion; /* functional version - oldest version + u32 magic; /* f995 e849 */ + u32 magic1; /* fa53 e9c5, more magic? */ + u8 version; /* version of a filesystem usually 2 */ + u8 funcversion; /* functional version - oldest version of filesystem that can understand this disk */ - unsigned short int zero; /* 0 */ + u16 zero; /* 0 */ fnode_secno root; /* fnode of root directory */ secno n_sectors; /* size of filesystem */ - unsigned n_badblocks; /* number of bad blocks */ + u32 n_badblocks; /* number of bad blocks */ secno bitmaps; /* pointers to free space bit maps */ - unsigned zero1; /* 0 */ + u32 zero1; /* 0 */ secno badblocks; /* bad block list */ - unsigned zero3; /* 0 */ + u32 zero3; /* 0 */ time32_t last_chkdsk; /* date last checked, 0 if never */ - /*unsigned zero4;*/ /* 0 */ - time32_t last_optimize; /* date last optimized, 0 if never */ + time32_t last_optimize; /* date last optimized, 0 if never */ secno n_dir_band; /* number of sectors in dir band */ secno dir_band_start; /* first sector in dir band */ secno dir_band_end; /* last sector in dir band */ secno dir_band_bitmap; /* free space map, 1 dnode per bit */ - char volume_name[32]; /* not used */ + u8 volume_name[32]; /* not used */ secno user_id_table; /* 8 preallocated sectors - user id */ - unsigned zero6[103]; /* 0 */ + u32 zero6[103]; /* 0 */ }; @@ -107,44 +109,65 @@ struct hpfs_super_block struct hpfs_spare_block { - unsigned magic; /* f991 1849 */ - unsigned magic1; /* fa52 29c5, more magic? */ - - unsigned dirty: 1; /* 0 clean, 1 "improperly stopped" */ - /*unsigned flag1234: 4;*/ /* unknown flags */ - unsigned sparedir_used: 1; /* spare dirblks used */ - unsigned hotfixes_used: 1; /* hotfixes used */ - unsigned bad_sector: 1; /* bad sector, corrupted disk (???) */ - unsigned bad_bitmap: 1; /* bad bitmap */ - unsigned fast: 1; /* partition was fast formatted */ - unsigned old_wrote: 1; /* old version wrote to partion */ - unsigned old_wrote_1: 1; /* old version wrote to partion (?) */ - unsigned install_dasd_limits: 1; /* HPFS386 flags */ - unsigned resynch_dasd_limits: 1; - unsigned dasd_limits_operational: 1; - unsigned multimedia_active: 1; - unsigned dce_acls_active: 1; - unsigned dasd_limits_dirty: 1; - unsigned flag67: 2; - unsigned char mm_contlgulty; - unsigned char unused; + u32 magic; /* f991 1849 */ + u32 magic1; /* fa52 29c5, more magic? */ + +#ifdef __LITTLE_ENDIAN + u8 dirty: 1; /* 0 clean, 1 "improperly stopped" */ + u8 sparedir_used: 1; /* spare dirblks used */ + u8 hotfixes_used: 1; /* hotfixes used */ + u8 bad_sector: 1; /* bad sector, corrupted disk (???) */ + u8 bad_bitmap: 1; /* bad bitmap */ + u8 fast: 1; /* partition was fast formatted */ + u8 old_wrote: 1; /* old version wrote to partion */ + u8 old_wrote_1: 1; /* old version wrote to partion (?) */ +#else + u8 old_wrote_1: 1; /* old version wrote to partion (?) */ + u8 old_wrote: 1; /* old version wrote to partion */ + u8 fast: 1; /* partition was fast formatted */ + u8 bad_bitmap: 1; /* bad bitmap */ + u8 bad_sector: 1; /* bad sector, corrupted disk (???) */ + u8 hotfixes_used: 1; /* hotfixes used */ + u8 sparedir_used: 1; /* spare dirblks used */ + u8 dirty: 1; /* 0 clean, 1 "improperly stopped" */ +#endif + +#ifdef __LITTLE_ENDIAN + u8 install_dasd_limits: 1; /* HPFS386 flags */ + u8 resynch_dasd_limits: 1; + u8 dasd_limits_operational: 1; + u8 multimedia_active: 1; + u8 dce_acls_active: 1; + u8 dasd_limits_dirty: 1; + u8 flag67: 2; +#else + u8 flag67: 2; + u8 dasd_limits_dirty: 1; + u8 dce_acls_active: 1; + u8 multimedia_active: 1; + u8 dasd_limits_operational: 1; + u8 resynch_dasd_limits: 1; + u8 install_dasd_limits: 1; /* HPFS386 flags */ +#endif + + u8 mm_contlgulty; + u8 unused; secno hotfix_map; /* info about remapped bad sectors */ - unsigned n_spares_used; /* number of hotfixes */ - unsigned n_spares; /* number of spares in hotfix map */ - unsigned n_dnode_spares_free; /* spare dnodes unused */ - unsigned n_dnode_spares; /* length of spare_dnodes[] list, + u32 n_spares_used; /* number of hotfixes */ + u32 n_spares; /* number of spares in hotfix map */ + u32 n_dnode_spares_free; /* spare dnodes unused */ + u32 n_dnode_spares; /* length of spare_dnodes[] list, follows in this block*/ secno code_page_dir; /* code page directory block */ - unsigned n_code_pages; /* number of code pages */ - /*unsigned large_numbers[2];*/ /* ?? */ - unsigned super_crc; /* on HPFS386 and LAN Server this is + u32 n_code_pages; /* number of code pages */ + u32 super_crc; /* on HPFS386 and LAN Server this is checksum of superblock, on normal OS/2 unused */ - unsigned spare_crc; /* on HPFS386 checksum of spareblock */ - unsigned zero1[15]; /* unused */ + u32 spare_crc; /* on HPFS386 checksum of spareblock */ + u32 zero1[15]; /* unused */ dnode_secno spare_dnodes[100]; /* emergency free dnode list */ - unsigned zero2[1]; /* room for more? */ + u32 zero2[1]; /* room for more? */ }; /* The bad block list is 4 sectors long. The first word must be zero, @@ -179,18 +202,18 @@ struct hpfs_spare_block struct code_page_directory { - unsigned magic; /* 4945 21f7 */ - unsigned n_code_pages; /* number of pointers following */ - unsigned zero1[2]; + u32 magic; /* 4945 21f7 */ + u32 n_code_pages; /* number of pointers following */ + u32 zero1[2]; struct { - unsigned short ix; /* index */ - unsigned short code_page_number; /* code page number */ - unsigned bounds; /* matches corresponding word + u16 ix; /* index */ + u16 code_page_number; /* code page number */ + u32 bounds; /* matches corresponding word in data block */ secno code_page_data; /* sector number of a code_page_data containing c.p. array */ - unsigned short index; /* index in c.p. array in that sector*/ - unsigned short unknown; /* some unknown value; usually 0; + u16 index; /* index in c.p. array in that sector*/ + u16 unknown; /* some unknown value; usually 0; 2 in Japanese version */ } array[31]; /* unknown length */ }; @@ -201,21 +224,21 @@ struct code_page_directory struct code_page_data { - unsigned magic; /* 8945 21f7 */ - unsigned n_used; /* # elements used in c_p_data[] */ - unsigned bounds[3]; /* looks a bit like + u32 magic; /* 8945 21f7 */ + u32 n_used; /* # elements used in c_p_data[] */ + u32 bounds[3]; /* looks a bit like (beg1,end1), (beg2,end2) one byte each */ - unsigned short offs[3]; /* offsets from start of sector + u16 offs[3]; /* offsets from start of sector to start of c_p_data[ix] */ struct { - unsigned short ix; /* index */ - unsigned short code_page_number; /* code page number */ - unsigned short unknown; /* the same as in cp directory */ - unsigned char map[128]; /* upcase table for chars 80..ff */ - unsigned short zero2; + u16 ix; /* index */ + u16 code_page_number; /* code page number */ + u16 unknown; /* the same as in cp directory */ + u8 map[128]; /* upcase table for chars 80..ff */ + u16 zero2; } code_page[3]; - unsigned char incognita[78]; + u8 incognita[78]; }; @@ -255,50 +278,84 @@ struct code_page_data #define DNODE_MAGIC 0x77e40aae struct dnode { - unsigned magic; /* 77e4 0aae */ - unsigned first_free; /* offset from start of dnode to + u32 magic; /* 77e4 0aae */ + u32 first_free; /* offset from start of dnode to first free dir entry */ - unsigned root_dnode:1; /* Is it root dnode? */ - unsigned increment_me:31; /* some kind of activity counter? - Neither HPFS.IFS nor CHKDSK cares +#ifdef __LITTLE_ENDIAN + u8 root_dnode: 1; /* Is it root dnode? */ + u8 increment_me: 7; /* some kind of activity counter? */ + /* Neither HPFS.IFS nor CHKDSK cares + if you change this word */ +#else + u8 increment_me: 7; /* some kind of activity counter? */ + /* Neither HPFS.IFS nor CHKDSK cares if you change this word */ + u8 root_dnode: 1; /* Is it root dnode? */ +#endif + u8 increment_me2[3]; secno up; /* (root dnode) directory's fnode (nonroot) parent dnode */ dnode_secno self; /* pointer to this dnode */ - unsigned char dirent[2028]; /* one or more dirents */ + u8 dirent[2028]; /* one or more dirents */ }; struct hpfs_dirent { - unsigned short length; /* offset to next dirent */ - unsigned first: 1; /* set on phony ^A^A (".") entry */ - unsigned has_acl: 1; - unsigned down: 1; /* down pointer present (after name) */ - unsigned last: 1; /* set on phony \377 entry */ - unsigned has_ea: 1; /* entry has EA */ - unsigned has_xtd_perm: 1; /* has extended perm list (???) */ - unsigned has_explicit_acl: 1; - unsigned has_needea: 1; /* ?? some EA has NEEDEA set + u16 length; /* offset to next dirent */ + +#ifdef __LITTLE_ENDIAN + u8 first: 1; /* set on phony ^A^A (".") entry */ + u8 has_acl: 1; + u8 down: 1; /* down pointer present (after name) */ + u8 last: 1; /* set on phony \377 entry */ + u8 has_ea: 1; /* entry has EA */ + u8 has_xtd_perm: 1; /* has extended perm list (???) */ + u8 has_explicit_acl: 1; + u8 has_needea: 1; /* ?? some EA has NEEDEA set + I have no idea why this is + interesting in a dir entry */ +#else + u8 has_needea: 1; /* ?? some EA has NEEDEA set I have no idea why this is interesting in a dir entry */ - unsigned read_only: 1; /* dos attrib */ - unsigned hidden: 1; /* dos attrib */ - unsigned system: 1; /* dos attrib */ - unsigned flag11: 1; /* would be volume label dos attrib */ - unsigned directory: 1; /* dos attrib */ - unsigned archive: 1; /* dos attrib */ - unsigned not_8x3: 1; /* name is not 8.3 */ - unsigned flag15: 1; + u8 has_explicit_acl: 1; + u8 has_xtd_perm: 1; /* has extended perm list (???) */ + u8 has_ea: 1; /* entry has EA */ + u8 last: 1; /* set on phony \377 entry */ + u8 down: 1; /* down pointer present (after name) */ + u8 has_acl: 1; + u8 first: 1; /* set on phony ^A^A (".") entry */ +#endif + +#ifdef __LITTLE_ENDIAN + u8 read_only: 1; /* dos attrib */ + u8 hidden: 1; /* dos attrib */ + u8 system: 1; /* dos attrib */ + u8 flag11: 1; /* would be volume label dos attrib */ + u8 directory: 1; /* dos attrib */ + u8 archive: 1; /* dos attrib */ + u8 not_8x3: 1; /* name is not 8.3 */ + u8 flag15: 1; +#else + u8 flag15: 1; + u8 not_8x3: 1; /* name is not 8.3 */ + u8 archive: 1; /* dos attrib */ + u8 directory: 1; /* dos attrib */ + u8 flag11: 1; /* would be volume label dos attrib */ + u8 system: 1; /* dos attrib */ + u8 hidden: 1; /* dos attrib */ + u8 read_only: 1; /* dos attrib */ +#endif + fnode_secno fnode; /* fnode giving allocation info */ time32_t write_date; /* mtime */ - unsigned file_size; /* file length, bytes */ + u32 file_size; /* file length, bytes */ time32_t read_date; /* atime */ time32_t creation_date; /* ctime */ - unsigned ea_size; /* total EA length, bytes */ - unsigned char no_of_acls : 3; /* number of ACL's */ - unsigned char reserver : 5; - unsigned char ix; /* code page index (of filename), see + u32 ea_size; /* total EA length, bytes */ + u8 no_of_acls; /* number of ACL's (low 3 bits) */ + u8 ix; /* code page index (of filename), see struct code_page_data */ - unsigned char namelen, name[1]; /* file name */ + u8 namelen, name[1]; /* file name */ /* dnode_secno down; btree down pointer, if present, follows name on next word boundary, or maybe it precedes next dirent, which is on a word boundary. */ @@ -318,38 +375,50 @@ struct hpfs_dirent { struct bplus_leaf_node { - unsigned file_secno; /* first file sector in extent */ - unsigned length; /* length, sectors */ + u32 file_secno; /* first file sector in extent */ + u32 length; /* length, sectors */ secno disk_secno; /* first corresponding disk sector */ }; struct bplus_internal_node { - unsigned file_secno; /* subtree maps sectors < this */ + u32 file_secno; /* subtree maps sectors < this */ anode_secno down; /* pointer to subtree */ }; struct bplus_header { - unsigned hbff: 1; /* high bit of first free entry offset */ - unsigned flag1: 1; - unsigned flag2: 1; - unsigned flag3: 1; - unsigned flag4: 1; - unsigned fnode_parent: 1; /* ? we're pointed to by an fnode, +#ifdef __LITTLE_ENDIAN + u8 hbff: 1; /* high bit of first free entry offset */ + u8 flag1234: 4; + u8 fnode_parent: 1; /* ? we're pointed to by an fnode, the data btree or some ea or the main ea bootage pointer ea_secno */ /* also can get set in fnodes, which may be a chkdsk glitch or may mean this bit is irrelevant in fnodes, or this interpretation is all wet */ - unsigned binary_search: 1; /* suggest binary search (unused) */ - unsigned internal: 1; /* 1 -> (internal) tree of anodes + u8 binary_search: 1; /* suggest binary search (unused) */ + u8 internal: 1; /* 1 -> (internal) tree of anodes + 0 -> (leaf) list of extents */ +#else + u8 internal: 1; /* 1 -> (internal) tree of anodes 0 -> (leaf) list of extents */ - unsigned char fill[3]; - unsigned char n_free_nodes; /* free nodes in following array */ - unsigned char n_used_nodes; /* used nodes in following array */ - unsigned short first_free; /* offset from start of header to + u8 binary_search: 1; /* suggest binary search (unused) */ + u8 fnode_parent: 1; /* ? we're pointed to by an fnode, + the data btree or some ea or the + main ea bootage pointer ea_secno */ + /* also can get set in fnodes, which + may be a chkdsk glitch or may mean + this bit is irrelevant in fnodes, + or this interpretation is all wet */ + u8 flag1234: 4; + u8 hbff: 1; /* high bit of first free entry offset */ +#endif + u8 fill[3]; + u8 n_free_nodes; /* free nodes in following array */ + u8 n_used_nodes; /* used nodes in following array */ + u16 first_free; /* offset from start of header to first free node in array */ union { struct bplus_internal_node internal[0]; /* (internal) 2-word entries giving @@ -369,37 +438,38 @@ struct bplus_header struct fnode { - unsigned magic; /* f7e4 0aae */ - unsigned zero1[2]; /* read history */ - unsigned char len, name[15]; /* true length, truncated name */ + u32 magic; /* f7e4 0aae */ + u32 zero1[2]; /* read history */ + u8 len, name[15]; /* true length, truncated name */ fnode_secno up; /* pointer to file's directory fnode */ - /*unsigned zero2[3];*/ secno acl_size_l; secno acl_secno; - unsigned short acl_size_s; - char acl_anode; - char zero2; /* history bit count */ - unsigned ea_size_l; /* length of disk-resident ea's */ + u16 acl_size_s; + u8 acl_anode; + u8 zero2; /* history bit count */ + u32 ea_size_l; /* length of disk-resident ea's */ secno ea_secno; /* first sector of disk-resident ea's*/ - unsigned short ea_size_s; /* length of fnode-resident ea's */ - - unsigned flag0: 1; - unsigned ea_anode: 1; /* 1 -> ea_secno is an anode */ - unsigned flag2: 1; - unsigned flag3: 1; - unsigned flag4: 1; - unsigned flag5: 1; - unsigned flag6: 1; - unsigned flag7: 1; - unsigned dirflag: 1; /* 1 -> directory. first & only extent + u16 ea_size_s; /* length of fnode-resident ea's */ + +#ifdef __LITTLE_ENDIAN + u8 flag0: 1; + u8 ea_anode: 1; /* 1 -> ea_secno is an anode */ + u8 flag234567: 6; +#else + u8 flag234567: 6; + u8 ea_anode: 1; /* 1 -> ea_secno is an anode */ + u8 flag0: 1; +#endif + +#ifdef __LITTLE_ENDIAN + u8 dirflag: 1; /* 1 -> directory. first & only extent points to dnode. */ - unsigned flag9: 1; - unsigned flag10: 1; - unsigned flag11: 1; - unsigned flag12: 1; - unsigned flag13: 1; - unsigned flag14: 1; - unsigned flag15: 1; + u8 flag9012345: 7; +#else + u8 flag9012345: 7; + u8 dirflag: 1; /* 1 -> directory. first & only extent + points to dnode. */ +#endif struct bplus_header btree; /* b+ tree, 8 extents or 12 subtrees */ union { @@ -407,17 +477,16 @@ struct fnode struct bplus_internal_node internal[12]; } u; - unsigned file_size; /* file length, bytes */ - unsigned n_needea; /* number of EA's with NEEDEA set */ - char user_id[16]; /* unused */ - unsigned short ea_offs; /* offset from start of fnode + u32 file_size; /* file length, bytes */ + u32 n_needea; /* number of EA's with NEEDEA set */ + u8 user_id[16]; /* unused */ + u16 ea_offs; /* offset from start of fnode to first fnode-resident ea */ - char dasd_limit_treshhold; - char dasd_limit_delta; - unsigned dasd_limit; - unsigned dasd_usage; - /*unsigned zero5[2];*/ - unsigned char ea[316]; /* zero or more EA's, packed together + u8 dasd_limit_treshhold; + u8 dasd_limit_delta; + u32 dasd_limit; + u32 dasd_usage; + u8 ea[316]; /* zero or more EA's, packed together with no alignment padding. (Do not use this name, get here via fnode + ea_offs. I think.) */ @@ -430,7 +499,7 @@ struct fnode struct anode { - unsigned magic; /* 37e4 0aae */ + u32 magic; /* 37e4 0aae */ anode_secno self; /* pointer to this anode */ secno up; /* parent anode or fnode */ @@ -440,7 +509,7 @@ struct anode struct bplus_internal_node internal[60]; } u; - unsigned fill[3]; /* unused */ + u32 fill[3]; /* unused */ }; @@ -461,25 +530,31 @@ struct anode struct extended_attribute { - unsigned indirect: 1; /* 1 -> value gives sector number +#ifdef __LITTLE_ENDIAN + u8 indirect: 1; /* 1 -> value gives sector number where real value starts */ - unsigned anode: 1; /* 1 -> sector is an anode + u8 anode: 1; /* 1 -> sector is an anode + that points to fragmented value */ + u8 flag23456: 5; + u8 needea: 1; /* required ea */ +#else + u8 needea: 1; /* required ea */ + u8 flag23456: 5; + u8 anode: 1; /* 1 -> sector is an anode that points to fragmented value */ - unsigned flag2: 1; - unsigned flag3: 1; - unsigned flag4: 1; - unsigned flag5: 1; - unsigned flag6: 1; - unsigned needea: 1; /* required ea */ - unsigned char namelen; /* length of name, bytes */ - unsigned short valuelen; /* length of value, bytes */ - unsigned char name[0]; + u8 indirect: 1; /* 1 -> value gives sector number + where real value starts */ +#endif + u8 namelen; /* length of name, bytes */ + u8 valuelen_lo; /* length of value, bytes */ + u8 valuelen_hi; /* length of value, bytes */ + u8 name[0]; /* - unsigned char name[namelen]; ascii attrib name - unsigned char nul; terminating '\0', not counted - unsigned char value[valuelen]; value, arbitrary + u8 name[namelen]; ascii attrib name + u8 nul; terminating '\0', not counted + u8 value[valuelen]; value, arbitrary if this.indirect, valuelen is 8 and the value is - unsigned length; real length of value, bytes + u32 length; real length of value, bytes secno secno; sector address where it starts if this.anode, the above sector number is the root of an anode tree which points to the value. diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index c15adbca07f..dd552f862c8 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -13,6 +13,7 @@ #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/slab.h> +#include <asm/unaligned.h> #include "hpfs.h" @@ -51,18 +52,16 @@ struct hpfs_inode_info { unsigned i_disk_sec; /* (files) minimalist cache of alloc info */ unsigned i_n_secs; /* (files) minimalist cache of alloc info */ unsigned i_ea_size; /* size of extended attributes */ - unsigned i_conv : 2; /* (files) crlf->newline hackery */ unsigned i_ea_mode : 1; /* file's permission is stored in ea */ unsigned i_ea_uid : 1; /* file's uid is stored in ea */ unsigned i_ea_gid : 1; /* file's gid is stored in ea */ unsigned i_dirty : 1; - struct mutex i_mutex; - struct mutex i_parent_mutex; loff_t **i_rddir_off; struct inode vfs_inode; }; struct hpfs_sb_info { + struct mutex hpfs_mutex; /* global hpfs lock */ ino_t sb_root; /* inode number of root dir */ unsigned sb_fs_size; /* file system size, sectors */ unsigned sb_bitmaps; /* sector number of bitmap list */ @@ -74,7 +73,6 @@ struct hpfs_sb_info { uid_t sb_uid; /* uid from mount options */ gid_t sb_gid; /* gid from mount options */ umode_t sb_mode; /* mode from mount options */ - unsigned sb_conv : 2; /* crlf->newline hackery */ unsigned sb_eas : 2; /* eas: 0-ignore, 1-ro, 2-rw */ unsigned sb_err : 2; /* on errs: 0-cont, 1-ro, 2-panic */ unsigned sb_chk : 2; /* checks: 0-no, 1-normal, 2-strict */ @@ -87,20 +85,9 @@ struct hpfs_sb_info { unsigned *sb_bmp_dir; /* main bitmap directory */ unsigned sb_c_bitmap; /* current bitmap */ unsigned sb_max_fwd_alloc; /* max forwad allocation */ - struct mutex hpfs_creation_de; /* when creating dirents, nobody else - can alloc blocks */ - /*unsigned sb_mounting : 1;*/ int sb_timeshift; }; -/* - * conv= options - */ - -#define CONV_BINARY 0 /* no conversion */ -#define CONV_TEXT 1 /* crlf->newline */ -#define CONV_AUTO 2 /* decide based on file contents */ - /* Four 512-byte buffers and the 2k block obtained by concatenating them */ struct quad_buffer_head { @@ -113,7 +100,7 @@ struct quad_buffer_head { static inline dnode_secno de_down_pointer (struct hpfs_dirent *de) { CHKCOND(de->down,("HPFS: de_down_pointer: !de->down\n")); - return *(dnode_secno *) ((void *) de + de->length - 4); + return le32_to_cpu(*(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4)); } /* The first dir entry in a dnode */ @@ -127,41 +114,46 @@ static inline struct hpfs_dirent *dnode_first_de (struct dnode *dnode) static inline struct hpfs_dirent *dnode_end_de (struct dnode *dnode) { - CHKCOND(dnode->first_free>=0x14 && dnode->first_free<=0xa00,("HPFS: dnode_end_de: dnode->first_free = %d\n",(int)dnode->first_free)); - return (void *) dnode + dnode->first_free; + CHKCOND(le32_to_cpu(dnode->first_free)>=0x14 && le32_to_cpu(dnode->first_free)<=0xa00,("HPFS: dnode_end_de: dnode->first_free = %x\n",(unsigned)le32_to_cpu(dnode->first_free))); + return (void *) dnode + le32_to_cpu(dnode->first_free); } /* The dir entry after dir entry de */ static inline struct hpfs_dirent *de_next_de (struct hpfs_dirent *de) { - CHKCOND(de->length>=0x20 && de->length<0x800,("HPFS: de_next_de: de->length = %d\n",(int)de->length)); - return (void *) de + de->length; + CHKCOND(le16_to_cpu(de->length)>=0x20 && le16_to_cpu(de->length)<0x800,("HPFS: de_next_de: de->length = %x\n",(unsigned)le16_to_cpu(de->length))); + return (void *) de + le16_to_cpu(de->length); } static inline struct extended_attribute *fnode_ea(struct fnode *fnode) { - return (struct extended_attribute *)((char *)fnode + fnode->ea_offs + fnode->acl_size_s); + return (struct extended_attribute *)((char *)fnode + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s)); } static inline struct extended_attribute *fnode_end_ea(struct fnode *fnode) { - return (struct extended_attribute *)((char *)fnode + fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s); + return (struct extended_attribute *)((char *)fnode + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s)); +} + +static unsigned ea_valuelen(struct extended_attribute *ea) +{ + return ea->valuelen_lo + 256 * ea->valuelen_hi; } static inline struct extended_attribute *next_ea(struct extended_attribute *ea) { - return (struct extended_attribute *)((char *)ea + 5 + ea->namelen + ea->valuelen); + return (struct extended_attribute *)((char *)ea + 5 + ea->namelen + ea_valuelen(ea)); } static inline secno ea_sec(struct extended_attribute *ea) { - return *(secno *)((char *)ea + 9 + ea->namelen); + return le32_to_cpu(get_unaligned((secno *)((char *)ea + 9 + ea->namelen))); } static inline secno ea_len(struct extended_attribute *ea) { - return *(secno *)((char *)ea + 5 + ea->namelen); + return le32_to_cpu(get_unaligned((secno *)((char *)ea + 5 + ea->namelen))); } static inline char *ea_data(struct extended_attribute *ea) @@ -186,13 +178,13 @@ static inline void copy_de(struct hpfs_dirent *dst, struct hpfs_dirent *src) dst->not_8x3 = n; } -static inline unsigned tstbits(unsigned *bmp, unsigned b, unsigned n) +static inline unsigned tstbits(u32 *bmp, unsigned b, unsigned n) { int i; if ((b >= 0x4000) || (b + n - 1 >= 0x4000)) return n; - if (!((bmp[(b & 0x3fff) >> 5] >> (b & 0x1f)) & 1)) return 1; + if (!((le32_to_cpu(bmp[(b & 0x3fff) >> 5]) >> (b & 0x1f)) & 1)) return 1; for (i = 1; i < n; i++) - if (/*b+i < 0x4000 &&*/ !((bmp[((b+i) & 0x3fff) >> 5] >> ((b+i) & 0x1f)) & 1)) + if (!((le32_to_cpu(bmp[((b+i) & 0x3fff) >> 5]) >> ((b+i) & 0x1f)) & 1)) return i + 1; return 0; } @@ -200,12 +192,12 @@ static inline unsigned tstbits(unsigned *bmp, unsigned b, unsigned n) /* alloc.c */ int hpfs_chk_sectors(struct super_block *, secno, int, char *); -secno hpfs_alloc_sector(struct super_block *, secno, unsigned, int, int); +secno hpfs_alloc_sector(struct super_block *, secno, unsigned, int); int hpfs_alloc_if_possible(struct super_block *, secno); void hpfs_free_sectors(struct super_block *, secno, unsigned); int hpfs_check_free_dnodes(struct super_block *, int); void hpfs_free_dnode(struct super_block *, secno); -struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *, int); +struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *); struct fnode *hpfs_alloc_fnode(struct super_block *, secno, fnode_secno *, struct buffer_head **); struct anode *hpfs_alloc_anode(struct super_block *, secno, anode_secno *, struct buffer_head **); @@ -222,8 +214,6 @@ void hpfs_remove_fnode(struct super_block *, fnode_secno fno); /* buffer.c */ -void hpfs_lock_creation(struct super_block *); -void hpfs_unlock_creation(struct super_block *); void *hpfs_map_sector(struct super_block *, unsigned, struct buffer_head **, int); void *hpfs_get_sector(struct super_block *, unsigned, struct buffer_head **); void *hpfs_map_4sectors(struct super_block *, unsigned, struct quad_buffer_head *, int); @@ -247,7 +237,7 @@ void hpfs_del_pos(struct inode *, loff_t *); struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *, const unsigned char *, unsigned, secno); int hpfs_add_dirent(struct inode *, const unsigned char *, unsigned, - struct hpfs_dirent *, int); + struct hpfs_dirent *); int hpfs_remove_dirent(struct inode *, dnode_secno, struct hpfs_dirent *, struct quad_buffer_head *, int); void hpfs_count_dnodes(struct super_block *, dnode_secno, int *, int *, int *); dnode_secno hpfs_de_as_down_as_possible(struct super_block *, dnode_secno dno); @@ -303,7 +293,6 @@ int hpfs_compare_names(struct super_block *, const unsigned char *, unsigned, const unsigned char *, unsigned, int); int hpfs_is_name_long(const unsigned char *, unsigned); void hpfs_adjust_length(const unsigned char *, unsigned *); -void hpfs_decide_conv(struct inode *, const unsigned char *, unsigned); /* namei.c */ @@ -346,21 +335,26 @@ static inline time32_t gmt_to_local(struct super_block *s, time_t t) /* * Locking: * - * hpfs_lock() is a leftover from the big kernel lock. - * Right now, these functions are empty and only left - * for documentation purposes. The file system no longer - * works on SMP systems, so the lock is not needed - * any more. + * hpfs_lock() locks the whole filesystem. It must be taken + * on any method called by the VFS. * - * If someone is interested in making it work again, this - * would be the place to start by adding a per-superblock - * mutex and fixing all the bugs and performance issues - * caused by that. + * We don't do any per-file locking anymore, it is hard to + * review and HPFS is not performance-sensitive anyway. */ static inline void hpfs_lock(struct super_block *s) { + struct hpfs_sb_info *sbi = hpfs_sb(s); + mutex_lock(&sbi->hpfs_mutex); } static inline void hpfs_unlock(struct super_block *s) { + struct hpfs_sb_info *sbi = hpfs_sb(s); + mutex_unlock(&sbi->hpfs_mutex); +} + +static inline void hpfs_lock_assert(struct super_block *s) +{ + struct hpfs_sb_info *sbi = hpfs_sb(s); + WARN_ON(!mutex_is_locked(&sbi->hpfs_mutex)); } diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c index 87f1f787e76..338cd836845 100644 --- a/fs/hpfs/inode.c +++ b/fs/hpfs/inode.c @@ -17,7 +17,6 @@ void hpfs_init_inode(struct inode *i) i->i_uid = hpfs_sb(sb)->sb_uid; i->i_gid = hpfs_sb(sb)->sb_gid; i->i_mode = hpfs_sb(sb)->sb_mode; - hpfs_inode->i_conv = hpfs_sb(sb)->sb_conv; i->i_size = -1; i->i_blocks = -1; @@ -116,8 +115,8 @@ void hpfs_read_inode(struct inode *i) i->i_mode |= S_IFDIR; i->i_op = &hpfs_dir_iops; i->i_fop = &hpfs_dir_ops; - hpfs_inode->i_parent_dir = fnode->up; - hpfs_inode->i_dno = fnode->u.external[0].disk_secno; + hpfs_inode->i_parent_dir = le32_to_cpu(fnode->up); + hpfs_inode->i_dno = le32_to_cpu(fnode->u.external[0].disk_secno); if (hpfs_sb(sb)->sb_chk >= 2) { struct buffer_head *bh0; if (hpfs_map_fnode(sb, hpfs_inode->i_parent_dir, &bh0)) brelse(bh0); @@ -133,7 +132,7 @@ void hpfs_read_inode(struct inode *i) i->i_op = &hpfs_file_iops; i->i_fop = &hpfs_file_ops; i->i_nlink = 1; - i->i_size = fnode->file_size; + i->i_size = le32_to_cpu(fnode->file_size); i->i_blocks = ((i->i_size + 511) >> 9) + 1; i->i_data.a_ops = &hpfs_aops; hpfs_i(i)->mmu_private = i->i_size; @@ -144,7 +143,7 @@ void hpfs_read_inode(struct inode *i) static void hpfs_write_inode_ea(struct inode *i, struct fnode *fnode) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); - /*if (fnode->acl_size_l || fnode->acl_size_s) { + /*if (le32_to_cpu(fnode->acl_size_l) || le16_to_cpu(fnode->acl_size_s)) { Some unknown structures like ACL may be in fnode, we'd better not overwrite them hpfs_error(i->i_sb, "fnode %08x has some unknown HPFS386 stuctures", i->i_ino); @@ -187,9 +186,7 @@ void hpfs_write_inode(struct inode *i) kfree(hpfs_inode->i_rddir_off); hpfs_inode->i_rddir_off = NULL; } - mutex_lock(&hpfs_inode->i_parent_mutex); if (!i->i_nlink) { - mutex_unlock(&hpfs_inode->i_parent_mutex); return; } parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir); @@ -200,14 +197,9 @@ void hpfs_write_inode(struct inode *i) hpfs_read_inode(parent); unlock_new_inode(parent); } - mutex_lock(&hpfs_inode->i_mutex); hpfs_write_inode_nolock(i); - mutex_unlock(&hpfs_inode->i_mutex); iput(parent); - } else { - mark_inode_dirty(i); } - mutex_unlock(&hpfs_inode->i_parent_mutex); } void hpfs_write_inode_nolock(struct inode *i) @@ -226,30 +218,30 @@ void hpfs_write_inode_nolock(struct inode *i) } } else de = NULL; if (S_ISREG(i->i_mode)) { - fnode->file_size = i->i_size; - if (de) de->file_size = i->i_size; + fnode->file_size = cpu_to_le32(i->i_size); + if (de) de->file_size = cpu_to_le32(i->i_size); } else if (S_ISDIR(i->i_mode)) { - fnode->file_size = 0; - if (de) de->file_size = 0; + fnode->file_size = cpu_to_le32(0); + if (de) de->file_size = cpu_to_le32(0); } hpfs_write_inode_ea(i, fnode); if (de) { - de->write_date = gmt_to_local(i->i_sb, i->i_mtime.tv_sec); - de->read_date = gmt_to_local(i->i_sb, i->i_atime.tv_sec); - de->creation_date = gmt_to_local(i->i_sb, i->i_ctime.tv_sec); + de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec)); + de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec)); + de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec)); de->read_only = !(i->i_mode & 0222); - de->ea_size = hpfs_inode->i_ea_size; + de->ea_size = cpu_to_le32(hpfs_inode->i_ea_size); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); } if (S_ISDIR(i->i_mode)) { if ((de = map_dirent(i, hpfs_inode->i_dno, "\001\001", 2, NULL, &qbh))) { - de->write_date = gmt_to_local(i->i_sb, i->i_mtime.tv_sec); - de->read_date = gmt_to_local(i->i_sb, i->i_atime.tv_sec); - de->creation_date = gmt_to_local(i->i_sb, i->i_ctime.tv_sec); + de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec)); + de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec)); + de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec)); de->read_only = !(i->i_mode & 0222); - de->ea_size = /*hpfs_inode->i_ea_size*/0; - de->file_size = 0; + de->ea_size = cpu_to_le32(/*hpfs_inode->i_ea_size*/0); + de->file_size = cpu_to_le32(0); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); } else @@ -269,6 +261,10 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr) hpfs_lock(inode->i_sb); if (inode->i_ino == hpfs_sb(inode->i_sb)->sb_root) goto out_unlock; + if ((attr->ia_valid & ATTR_UID) && attr->ia_uid >= 0x10000) + goto out_unlock; + if ((attr->ia_valid & ATTR_GID) && attr->ia_gid >= 0x10000) + goto out_unlock; if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size) goto out_unlock; @@ -284,7 +280,6 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr) } setattr_copy(inode, attr); - mark_inode_dirty(inode); hpfs_write_inode(inode); diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c index 840d033ecee..a790821366a 100644 --- a/fs/hpfs/map.c +++ b/fs/hpfs/map.c @@ -21,7 +21,7 @@ unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block, hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id); return NULL; } - sec = hpfs_sb(s)->sb_bmp_dir[bmp_block]; + sec = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]); if (!sec || sec > hpfs_sb(s)->sb_fs_size-4) { hpfs_error(s, "invalid bitmap block pointer %08x -> %08x at %s", bmp_block, sec, id); return NULL; @@ -46,18 +46,18 @@ unsigned char *hpfs_load_code_page(struct super_block *s, secno cps) struct code_page_data *cpd; struct code_page_directory *cp = hpfs_map_sector(s, cps, &bh, 0); if (!cp) return NULL; - if (cp->magic != CP_DIR_MAGIC) { - printk("HPFS: Code page directory magic doesn't match (magic = %08x)\n", cp->magic); + if (le32_to_cpu(cp->magic) != CP_DIR_MAGIC) { + printk("HPFS: Code page directory magic doesn't match (magic = %08x)\n", le32_to_cpu(cp->magic)); brelse(bh); return NULL; } - if (!cp->n_code_pages) { + if (!le32_to_cpu(cp->n_code_pages)) { printk("HPFS: n_code_pages == 0\n"); brelse(bh); return NULL; } - cpds = cp->array[0].code_page_data; - cpi = cp->array[0].index; + cpds = le32_to_cpu(cp->array[0].code_page_data); + cpi = le16_to_cpu(cp->array[0].index); brelse(bh); if (cpi >= 3) { @@ -66,12 +66,12 @@ unsigned char *hpfs_load_code_page(struct super_block *s, secno cps) } if (!(cpd = hpfs_map_sector(s, cpds, &bh, 0))) return NULL; - if ((unsigned)cpd->offs[cpi] > 0x178) { + if (le16_to_cpu(cpd->offs[cpi]) > 0x178) { printk("HPFS: Code page index out of sector\n"); brelse(bh); return NULL; } - ptr = (unsigned char *)cpd + cpd->offs[cpi] + 6; + ptr = (unsigned char *)cpd + le16_to_cpu(cpd->offs[cpi]) + 6; if (!(cp_table = kmalloc(256, GFP_KERNEL))) { printk("HPFS: out of memory for code page table\n"); brelse(bh); @@ -125,7 +125,7 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea if (hpfs_sb(s)->sb_chk) { struct extended_attribute *ea; struct extended_attribute *ea_end; - if (fnode->magic != FNODE_MAGIC) { + if (le32_to_cpu(fnode->magic) != FNODE_MAGIC) { hpfs_error(s, "bad magic on fnode %08lx", (unsigned long)ino); goto bail; @@ -138,7 +138,7 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea (unsigned long)ino); goto bail; } - if (fnode->btree.first_free != + if (le16_to_cpu(fnode->btree.first_free) != 8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) { hpfs_error(s, "bad first_free pointer in fnode %08lx", @@ -146,12 +146,12 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea goto bail; } } - if (fnode->ea_size_s && ((signed int)fnode->ea_offs < 0xc4 || - (signed int)fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200)) { + if (le16_to_cpu(fnode->ea_size_s) && (le16_to_cpu(fnode->ea_offs) < 0xc4 || + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200)) { hpfs_error(s, "bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x", (unsigned long)ino, - fnode->ea_offs, fnode->ea_size_s); + le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s)); goto bail; } ea = fnode_ea(fnode); @@ -178,16 +178,20 @@ struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buff if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ano, 1, "anode")) return NULL; if ((anode = hpfs_map_sector(s, ano, bhp, ANODE_RD_AHEAD))) if (hpfs_sb(s)->sb_chk) { - if (anode->magic != ANODE_MAGIC || anode->self != ano) { + if (le32_to_cpu(anode->magic) != ANODE_MAGIC) { hpfs_error(s, "bad magic on anode %08x", ano); goto bail; } + if (le32_to_cpu(anode->self) != ano) { + hpfs_error(s, "self pointer invalid on anode %08x", ano); + goto bail; + } if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes != (anode->btree.internal ? 60 : 40)) { hpfs_error(s, "bad number of nodes in anode %08x", ano); goto bail; } - if (anode->btree.first_free != + if (le16_to_cpu(anode->btree.first_free) != 8 + anode->btree.n_used_nodes * (anode->btree.internal ? 8 : 12)) { hpfs_error(s, "bad first_free pointer in anode %08x", ano); goto bail; @@ -219,26 +223,26 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno, unsigned p, pp = 0; unsigned char *d = (unsigned char *)dnode; int b = 0; - if (dnode->magic != DNODE_MAGIC) { + if (le32_to_cpu(dnode->magic) != DNODE_MAGIC) { hpfs_error(s, "bad magic on dnode %08x", secno); goto bail; } - if (dnode->self != secno) - hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, dnode->self); + if (le32_to_cpu(dnode->self) != secno) + hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, le32_to_cpu(dnode->self)); /* Check dirents - bad dirents would cause infinite loops or shooting to memory */ - if (dnode->first_free > 2048/* || dnode->first_free < 84*/) { - hpfs_error(s, "dnode %08x has first_free == %08x", secno, dnode->first_free); + if (le32_to_cpu(dnode->first_free) > 2048) { + hpfs_error(s, "dnode %08x has first_free == %08x", secno, le32_to_cpu(dnode->first_free)); goto bail; } - for (p = 20; p < dnode->first_free; p += d[p] + (d[p+1] << 8)) { + for (p = 20; p < le32_to_cpu(dnode->first_free); p += d[p] + (d[p+1] << 8)) { struct hpfs_dirent *de = (struct hpfs_dirent *)((char *)dnode + p); - if (de->length > 292 || (de->length < 32) || (de->length & 3) || p + de->length > 2048) { + if (le16_to_cpu(de->length) > 292 || (le16_to_cpu(de->length) < 32) || (le16_to_cpu(de->length) & 3) || p + le16_to_cpu(de->length) > 2048) { hpfs_error(s, "bad dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp); goto bail; } - if (((31 + de->namelen + de->down*4 + 3) & ~3) != de->length) { - if (((31 + de->namelen + de->down*4 + 3) & ~3) < de->length && s->s_flags & MS_RDONLY) goto ok; + if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) { + if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & MS_RDONLY) goto ok; hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp); goto bail; } @@ -251,7 +255,7 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno, pp = p; } - if (p != dnode->first_free) { + if (p != le32_to_cpu(dnode->first_free)) { hpfs_error(s, "size on last dirent does not match first_free; dnode %08x", secno); goto bail; } @@ -277,7 +281,7 @@ dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino) if (!fnode) return 0; - dno = fnode->u.external[0].disk_secno; + dno = le32_to_cpu(fnode->u.external[0].disk_secno); brelse(bh); return dno; } diff --git a/fs/hpfs/name.c b/fs/hpfs/name.c index f24736d7a43..9acdf338def 100644 --- a/fs/hpfs/name.c +++ b/fs/hpfs/name.c @@ -8,39 +8,6 @@ #include "hpfs_fn.h" -static const char *text_postfix[]={ -".ASM", ".BAS", ".BAT", ".C", ".CC", ".CFG", ".CMD", ".CON", ".CPP", ".DEF", -".DOC", ".DPR", ".ERX", ".H", ".HPP", ".HTM", ".HTML", ".JAVA", ".LOG", ".PAS", -".RC", ".TEX", ".TXT", ".Y", ""}; - -static const char *text_prefix[]={ -"AUTOEXEC.", "CHANGES", "COPYING", "CONFIG.", "CREDITS", "FAQ", "FILE_ID.DIZ", -"MAKEFILE", "READ.ME", "README", "TERMCAP", ""}; - -void hpfs_decide_conv(struct inode *inode, const unsigned char *name, unsigned len) -{ - struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); - int i; - if (hpfs_inode->i_conv != CONV_AUTO) return; - for (i = 0; *text_postfix[i]; i++) { - int l = strlen(text_postfix[i]); - if (l <= len) - if (!hpfs_compare_names(inode->i_sb, text_postfix[i], l, name + len - l, l, 0)) - goto text; - } - for (i = 0; *text_prefix[i]; i++) { - int l = strlen(text_prefix[i]); - if (l <= len) - if (!hpfs_compare_names(inode->i_sb, text_prefix[i], l, name, l, 0)) - goto text; - } - hpfs_inode->i_conv = CONV_BINARY; - return; - text: - hpfs_inode->i_conv = CONV_TEXT; - return; -} - static inline int not_allowed_char(unsigned char c) { return c<' ' || c=='"' || c=='*' || c=='/' || c==':' || c=='<' || diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c index d5f8c8a1902..1f05839c27a 100644 --- a/fs/hpfs/namei.c +++ b/fs/hpfs/namei.c @@ -29,7 +29,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh); if (!fnode) goto bail; - dnode = hpfs_alloc_dnode(dir->i_sb, fno, &dno, &qbh0, 1); + dnode = hpfs_alloc_dnode(dir->i_sb, fno, &dno, &qbh0); if (!dnode) goto bail1; memset(&dee, 0, sizeof dee); @@ -37,8 +37,8 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) if (!(mode & 0222)) dee.read_only = 1; /*dee.archive = 0;*/ dee.hidden = name[0] == '.'; - dee.fnode = fno; - dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); + dee.fnode = cpu_to_le32(fno); + dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds())); result = new_inode(dir->i_sb); if (!result) goto bail2; @@ -46,7 +46,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) result->i_ino = fno; hpfs_i(result)->i_parent_dir = dir->i_ino; hpfs_i(result)->i_dno = dno; - result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); + result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)); result->i_ctime.tv_nsec = 0; result->i_mtime.tv_nsec = 0; result->i_atime.tv_nsec = 0; @@ -60,8 +60,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) if (dee.read_only) result->i_mode &= ~0222; - mutex_lock(&hpfs_i(dir)->i_mutex); - r = hpfs_add_dirent(dir, name, len, &dee, 0); + r = hpfs_add_dirent(dir, name, len, &dee); if (r == 1) goto bail3; if (r == -1) { @@ -70,21 +69,21 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) } fnode->len = len; memcpy(fnode->name, name, len > 15 ? 15 : len); - fnode->up = dir->i_ino; + fnode->up = cpu_to_le32(dir->i_ino); fnode->dirflag = 1; fnode->btree.n_free_nodes = 7; fnode->btree.n_used_nodes = 1; - fnode->btree.first_free = 0x14; - fnode->u.external[0].disk_secno = dno; - fnode->u.external[0].file_secno = -1; + fnode->btree.first_free = cpu_to_le16(0x14); + fnode->u.external[0].disk_secno = cpu_to_le32(dno); + fnode->u.external[0].file_secno = cpu_to_le32(-1); dnode->root_dnode = 1; - dnode->up = fno; + dnode->up = cpu_to_le32(fno); de = hpfs_add_de(dir->i_sb, dnode, "\001\001", 2, 0); - de->creation_date = de->write_date = de->read_date = gmt_to_local(dir->i_sb, get_seconds()); + de->creation_date = de->write_date = de->read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds())); if (!(mode & 0222)) de->read_only = 1; de->first = de->directory = 1; /*de->hidden = de->system = 0;*/ - de->fnode = fno; + de->fnode = cpu_to_le32(fno); mark_buffer_dirty(bh); brelse(bh); hpfs_mark_4buffers_dirty(&qbh0); @@ -101,11 +100,9 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) hpfs_write_inode_nolock(result); } d_instantiate(dentry, result); - mutex_unlock(&hpfs_i(dir)->i_mutex); hpfs_unlock(dir->i_sb); return 0; bail3: - mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail2: hpfs_brelse4(&qbh0); @@ -140,8 +137,8 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc if (!(mode & 0222)) dee.read_only = 1; dee.archive = 1; dee.hidden = name[0] == '.'; - dee.fnode = fno; - dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); + dee.fnode = cpu_to_le32(fno); + dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds())); result = new_inode(dir->i_sb); if (!result) @@ -154,9 +151,8 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc result->i_op = &hpfs_file_iops; result->i_fop = &hpfs_file_ops; result->i_nlink = 1; - hpfs_decide_conv(result, name, len); hpfs_i(result)->i_parent_dir = dir->i_ino; - result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); + result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)); result->i_ctime.tv_nsec = 0; result->i_mtime.tv_nsec = 0; result->i_atime.tv_nsec = 0; @@ -168,8 +164,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc result->i_data.a_ops = &hpfs_aops; hpfs_i(result)->mmu_private = 0; - mutex_lock(&hpfs_i(dir)->i_mutex); - r = hpfs_add_dirent(dir, name, len, &dee, 0); + r = hpfs_add_dirent(dir, name, len, &dee); if (r == 1) goto bail2; if (r == -1) { @@ -178,7 +173,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc } fnode->len = len; memcpy(fnode->name, name, len > 15 ? 15 : len); - fnode->up = dir->i_ino; + fnode->up = cpu_to_le32(dir->i_ino); mark_buffer_dirty(bh); brelse(bh); @@ -193,12 +188,10 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc hpfs_write_inode_nolock(result); } d_instantiate(dentry, result); - mutex_unlock(&hpfs_i(dir)->i_mutex); hpfs_unlock(dir->i_sb); return 0; bail2: - mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail1: brelse(bh); @@ -232,8 +225,8 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t if (!(mode & 0222)) dee.read_only = 1; dee.archive = 1; dee.hidden = name[0] == '.'; - dee.fnode = fno; - dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); + dee.fnode = cpu_to_le32(fno); + dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds())); result = new_inode(dir->i_sb); if (!result) @@ -242,7 +235,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t hpfs_init_inode(result); result->i_ino = fno; hpfs_i(result)->i_parent_dir = dir->i_ino; - result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); + result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)); result->i_ctime.tv_nsec = 0; result->i_mtime.tv_nsec = 0; result->i_atime.tv_nsec = 0; @@ -254,8 +247,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t result->i_blocks = 1; init_special_inode(result, mode, rdev); - mutex_lock(&hpfs_i(dir)->i_mutex); - r = hpfs_add_dirent(dir, name, len, &dee, 0); + r = hpfs_add_dirent(dir, name, len, &dee); if (r == 1) goto bail2; if (r == -1) { @@ -264,19 +256,17 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t } fnode->len = len; memcpy(fnode->name, name, len > 15 ? 15 : len); - fnode->up = dir->i_ino; + fnode->up = cpu_to_le32(dir->i_ino); mark_buffer_dirty(bh); insert_inode_hash(result); hpfs_write_inode_nolock(result); d_instantiate(dentry, result); - mutex_unlock(&hpfs_i(dir)->i_mutex); brelse(bh); hpfs_unlock(dir->i_sb); return 0; bail2: - mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail1: brelse(bh); @@ -310,8 +300,8 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy memset(&dee, 0, sizeof dee); dee.archive = 1; dee.hidden = name[0] == '.'; - dee.fnode = fno; - dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); + dee.fnode = cpu_to_le32(fno); + dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds())); result = new_inode(dir->i_sb); if (!result) @@ -319,7 +309,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy result->i_ino = fno; hpfs_init_inode(result); hpfs_i(result)->i_parent_dir = dir->i_ino; - result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); + result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)); result->i_ctime.tv_nsec = 0; result->i_mtime.tv_nsec = 0; result->i_atime.tv_nsec = 0; @@ -333,8 +323,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy result->i_op = &page_symlink_inode_operations; result->i_data.a_ops = &hpfs_symlink_aops; - mutex_lock(&hpfs_i(dir)->i_mutex); - r = hpfs_add_dirent(dir, name, len, &dee, 0); + r = hpfs_add_dirent(dir, name, len, &dee); if (r == 1) goto bail2; if (r == -1) { @@ -343,7 +332,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy } fnode->len = len; memcpy(fnode->name, name, len > 15 ? 15 : len); - fnode->up = dir->i_ino; + fnode->up = cpu_to_le32(dir->i_ino); hpfs_set_ea(result, fnode, "SYMLINK", symlink, strlen(symlink)); mark_buffer_dirty(bh); brelse(bh); @@ -352,11 +341,9 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy hpfs_write_inode_nolock(result); d_instantiate(dentry, result); - mutex_unlock(&hpfs_i(dir)->i_mutex); hpfs_unlock(dir->i_sb); return 0; bail2: - mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail1: brelse(bh); @@ -374,7 +361,6 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) struct hpfs_dirent *de; struct inode *inode = dentry->d_inode; dnode_secno dno; - fnode_secno fno; int r; int rep = 0; int err; @@ -382,8 +368,6 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) hpfs_lock(dir->i_sb); hpfs_adjust_length(name, &len); again: - mutex_lock(&hpfs_i(inode)->i_parent_mutex); - mutex_lock(&hpfs_i(dir)->i_mutex); err = -ENOENT; de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); if (!de) @@ -397,7 +381,6 @@ again: if (de->directory) goto out1; - fno = de->fnode; r = hpfs_remove_dirent(dir, dno, de, &qbh, 1); switch (r) { case 1: @@ -410,8 +393,6 @@ again: if (rep++) break; - mutex_unlock(&hpfs_i(dir)->i_mutex); - mutex_unlock(&hpfs_i(inode)->i_parent_mutex); dentry_unhash(dentry); if (!d_unhashed(dentry)) { dput(dentry); @@ -445,8 +426,6 @@ again: out1: hpfs_brelse4(&qbh); out: - mutex_unlock(&hpfs_i(dir)->i_mutex); - mutex_unlock(&hpfs_i(inode)->i_parent_mutex); hpfs_unlock(dir->i_sb); return err; } @@ -459,15 +438,12 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) struct hpfs_dirent *de; struct inode *inode = dentry->d_inode; dnode_secno dno; - fnode_secno fno; int n_items = 0; int err; int r; hpfs_adjust_length(name, &len); hpfs_lock(dir->i_sb); - mutex_lock(&hpfs_i(inode)->i_parent_mutex); - mutex_lock(&hpfs_i(dir)->i_mutex); err = -ENOENT; de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); if (!de) @@ -486,7 +462,6 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) if (n_items) goto out1; - fno = de->fnode; r = hpfs_remove_dirent(dir, dno, de, &qbh, 1); switch (r) { case 1: @@ -505,8 +480,6 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) out1: hpfs_brelse4(&qbh); out: - mutex_unlock(&hpfs_i(dir)->i_mutex); - mutex_unlock(&hpfs_i(inode)->i_parent_mutex); hpfs_unlock(dir->i_sb); return err; } @@ -568,12 +541,6 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, hpfs_lock(i->i_sb); /* order doesn't matter, due to VFS exclusion */ - mutex_lock(&hpfs_i(i)->i_parent_mutex); - if (new_inode) - mutex_lock(&hpfs_i(new_inode)->i_parent_mutex); - mutex_lock(&hpfs_i(old_dir)->i_mutex); - if (new_dir != old_dir) - mutex_lock(&hpfs_i(new_dir)->i_mutex); /* Erm? Moving over the empty non-busy directory is perfectly legal */ if (new_inode && S_ISDIR(new_inode->i_mode)) { @@ -610,9 +577,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_dir == old_dir) hpfs_brelse4(&qbh); - hpfs_lock_creation(i->i_sb); - if ((r = hpfs_add_dirent(new_dir, new_name, new_len, &de, 1))) { - hpfs_unlock_creation(i->i_sb); + if ((r = hpfs_add_dirent(new_dir, new_name, new_len, &de))) { if (r == -1) hpfs_error(new_dir->i_sb, "hpfs_rename: dirent already exists!"); err = r == 1 ? -ENOSPC : -EFSERROR; if (new_dir != old_dir) hpfs_brelse4(&qbh); @@ -621,20 +586,17 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_dir == old_dir) if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) { - hpfs_unlock_creation(i->i_sb); hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2"); err = -ENOENT; goto end1; } if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 0))) { - hpfs_unlock_creation(i->i_sb); hpfs_error(i->i_sb, "hpfs_rename: could not remove dirent"); err = r == 2 ? -ENOSPC : -EFSERROR; goto end1; } - hpfs_unlock_creation(i->i_sb); - + end: hpfs_i(i)->i_parent_dir = new_dir->i_ino; if (S_ISDIR(i->i_mode)) { @@ -642,22 +604,14 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, drop_nlink(old_dir); } if ((fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh))) { - fnode->up = new_dir->i_ino; + fnode->up = cpu_to_le32(new_dir->i_ino); fnode->len = new_len; memcpy(fnode->name, new_name, new_len>15?15:new_len); if (new_len < 15) memset(&fnode->name[new_len], 0, 15 - new_len); mark_buffer_dirty(bh); brelse(bh); } - hpfs_i(i)->i_conv = hpfs_sb(i->i_sb)->sb_conv; - hpfs_decide_conv(i, new_name, new_len); end1: - if (old_dir != new_dir) - mutex_unlock(&hpfs_i(new_dir)->i_mutex); - mutex_unlock(&hpfs_i(old_dir)->i_mutex); - mutex_unlock(&hpfs_i(i)->i_parent_mutex); - if (new_inode) - mutex_unlock(&hpfs_i(new_inode)->i_parent_mutex); hpfs_unlock(i->i_sb); return err; } diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index c89b4080858..98580a3b500 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -18,15 +18,16 @@ /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */ -static void mark_dirty(struct super_block *s) +static void mark_dirty(struct super_block *s, int remount) { - if (hpfs_sb(s)->sb_chkdsk && !(s->s_flags & MS_RDONLY)) { + if (hpfs_sb(s)->sb_chkdsk && (remount || !(s->s_flags & MS_RDONLY))) { struct buffer_head *bh; struct hpfs_spare_block *sb; if ((sb = hpfs_map_sector(s, 17, &bh, 0))) { sb->dirty = 1; sb->old_wrote = 0; mark_buffer_dirty(bh); + sync_dirty_buffer(bh); brelse(bh); } } @@ -40,10 +41,12 @@ static void unmark_dirty(struct super_block *s) struct buffer_head *bh; struct hpfs_spare_block *sb; if (s->s_flags & MS_RDONLY) return; + sync_blockdev(s->s_bdev); if ((sb = hpfs_map_sector(s, 17, &bh, 0))) { sb->dirty = hpfs_sb(s)->sb_chkdsk > 1 - hpfs_sb(s)->sb_was_error; sb->old_wrote = hpfs_sb(s)->sb_chkdsk >= 2 && !hpfs_sb(s)->sb_was_error; mark_buffer_dirty(bh); + sync_dirty_buffer(bh); brelse(bh); } } @@ -63,13 +66,13 @@ void hpfs_error(struct super_block *s, const char *fmt, ...) if (!hpfs_sb(s)->sb_was_error) { if (hpfs_sb(s)->sb_err == 2) { printk("; crashing the system because you wanted it\n"); - mark_dirty(s); + mark_dirty(s, 0); panic("HPFS panic"); } else if (hpfs_sb(s)->sb_err == 1) { if (s->s_flags & MS_RDONLY) printk("; already mounted read-only\n"); else { printk("; remounting read-only\n"); - mark_dirty(s); + mark_dirty(s, 0); s->s_flags |= MS_RDONLY; } } else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n"); @@ -102,9 +105,12 @@ static void hpfs_put_super(struct super_block *s) { struct hpfs_sb_info *sbi = hpfs_sb(s); + hpfs_lock(s); + unmark_dirty(s); + hpfs_unlock(s); + kfree(sbi->sb_cp_table); kfree(sbi->sb_bmp_dir); - unmark_dirty(s); s->s_fs_info = NULL; kfree(sbi); } @@ -129,7 +135,7 @@ static unsigned count_bitmaps(struct super_block *s) n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14; count = 0; for (n = 0; n < n_bands; n++) - count += hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_bmp_dir[n]); + count += hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n])); return count; } @@ -188,8 +194,6 @@ static void init_once(void *foo) { struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; - mutex_init(&ei->i_mutex); - mutex_init(&ei->i_parent_mutex); inode_init_once(&ei->vfs_inode); } @@ -218,7 +222,6 @@ static void destroy_inodecache(void) enum { Opt_help, Opt_uid, Opt_gid, Opt_umask, Opt_case_lower, Opt_case_asis, - Opt_conv_binary, Opt_conv_text, Opt_conv_auto, Opt_check_none, Opt_check_normal, Opt_check_strict, Opt_err_cont, Opt_err_ro, Opt_err_panic, Opt_eas_no, Opt_eas_ro, Opt_eas_rw, @@ -233,9 +236,6 @@ static const match_table_t tokens = { {Opt_umask, "umask=%o"}, {Opt_case_lower, "case=lower"}, {Opt_case_asis, "case=asis"}, - {Opt_conv_binary, "conv=binary"}, - {Opt_conv_text, "conv=text"}, - {Opt_conv_auto, "conv=auto"}, {Opt_check_none, "check=none"}, {Opt_check_normal, "check=normal"}, {Opt_check_strict, "check=strict"}, @@ -253,7 +253,7 @@ static const match_table_t tokens = { }; static int parse_opts(char *opts, uid_t *uid, gid_t *gid, umode_t *umask, - int *lowercase, int *conv, int *eas, int *chk, int *errs, + int *lowercase, int *eas, int *chk, int *errs, int *chkdsk, int *timeshift) { char *p; @@ -295,15 +295,6 @@ static int parse_opts(char *opts, uid_t *uid, gid_t *gid, umode_t *umask, case Opt_case_asis: *lowercase = 0; break; - case Opt_conv_binary: - *conv = CONV_BINARY; - break; - case Opt_conv_text: - *conv = CONV_TEXT; - break; - case Opt_conv_auto: - *conv = CONV_AUTO; - break; case Opt_check_none: *chk = 0; break; @@ -370,9 +361,6 @@ HPFS filesystem options:\n\ umask=xxx set mode of files that don't have mode specified in eas\n\ case=lower lowercase all files\n\ case=asis do not lowercase files (default)\n\ - conv=binary do not convert CR/LF -> LF (default)\n\ - conv=auto convert only files with known text extensions\n\ - conv=text convert all files\n\ check=none no fs checks - kernel may crash on corrupted filesystem\n\ check=normal do some checks - it should not crash (default)\n\ check=strict do extra time-consuming checks, used for debugging\n\ @@ -394,7 +382,7 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data) uid_t uid; gid_t gid; umode_t umask; - int lowercase, conv, eas, chk, errs, chkdsk, timeshift; + int lowercase, eas, chk, errs, chkdsk, timeshift; int o; struct hpfs_sb_info *sbi = hpfs_sb(s); char *new_opts = kstrdup(data, GFP_KERNEL); @@ -405,11 +393,11 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data) lock_super(s); uid = sbi->sb_uid; gid = sbi->sb_gid; umask = 0777 & ~sbi->sb_mode; - lowercase = sbi->sb_lowercase; conv = sbi->sb_conv; + lowercase = sbi->sb_lowercase; eas = sbi->sb_eas; chk = sbi->sb_chk; chkdsk = sbi->sb_chkdsk; errs = sbi->sb_err; timeshift = sbi->sb_timeshift; - if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase, &conv, + if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase, &eas, &chk, &errs, &chkdsk, ×hift))) { printk("HPFS: bad mount options.\n"); goto out_err; @@ -427,11 +415,11 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data) sbi->sb_uid = uid; sbi->sb_gid = gid; sbi->sb_mode = 0777 & ~umask; - sbi->sb_lowercase = lowercase; sbi->sb_conv = conv; + sbi->sb_lowercase = lowercase; sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk; sbi->sb_err = errs; sbi->sb_timeshift = timeshift; - if (!(*flags & MS_RDONLY)) mark_dirty(s); + if (!(*flags & MS_RDONLY)) mark_dirty(s, 1); replace_mount_options(s, new_opts); @@ -471,7 +459,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) uid_t uid; gid_t gid; umode_t umask; - int lowercase, conv, eas, chk, errs, chkdsk, timeshift; + int lowercase, eas, chk, errs, chkdsk, timeshift; dnode_secno root_dno; struct hpfs_dirent *de = NULL; @@ -479,11 +467,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) int o; - if (num_possible_cpus() > 1) { - printk(KERN_ERR "HPFS is not SMP safe\n"); - return -EINVAL; - } - save_mount_options(s, options); sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); @@ -495,20 +478,20 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) sbi->sb_bmp_dir = NULL; sbi->sb_cp_table = NULL; - mutex_init(&sbi->hpfs_creation_de); + mutex_init(&sbi->hpfs_mutex); + hpfs_lock(s); uid = current_uid(); gid = current_gid(); umask = current_umask(); lowercase = 0; - conv = CONV_BINARY; eas = 2; chk = 1; errs = 1; chkdsk = 1; timeshift = 0; - if (!(o = parse_opts(options, &uid, &gid, &umask, &lowercase, &conv, + if (!(o = parse_opts(options, &uid, &gid, &umask, &lowercase, &eas, &chk, &errs, &chkdsk, ×hift))) { printk("HPFS: bad mount options.\n"); goto bail0; @@ -526,9 +509,9 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) if (!(spareblock = hpfs_map_sector(s, 17, &bh2, 0))) goto bail3; /* Check magics */ - if (/*bootblock->magic != BB_MAGIC - ||*/ superblock->magic != SB_MAGIC - || spareblock->magic != SP_MAGIC) { + if (/*le16_to_cpu(bootblock->magic) != BB_MAGIC + ||*/ le32_to_cpu(superblock->magic) != SB_MAGIC + || le32_to_cpu(spareblock->magic) != SP_MAGIC) { if (!silent) printk("HPFS: Bad magic ... probably not HPFS\n"); goto bail4; } @@ -549,19 +532,18 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) s->s_op = &hpfs_sops; s->s_d_op = &hpfs_dentry_operations; - sbi->sb_root = superblock->root; - sbi->sb_fs_size = superblock->n_sectors; - sbi->sb_bitmaps = superblock->bitmaps; - sbi->sb_dirband_start = superblock->dir_band_start; - sbi->sb_dirband_size = superblock->n_dir_band; - sbi->sb_dmap = superblock->dir_band_bitmap; + sbi->sb_root = le32_to_cpu(superblock->root); + sbi->sb_fs_size = le32_to_cpu(superblock->n_sectors); + sbi->sb_bitmaps = le32_to_cpu(superblock->bitmaps); + sbi->sb_dirband_start = le32_to_cpu(superblock->dir_band_start); + sbi->sb_dirband_size = le32_to_cpu(superblock->n_dir_band); + sbi->sb_dmap = le32_to_cpu(superblock->dir_band_bitmap); sbi->sb_uid = uid; sbi->sb_gid = gid; sbi->sb_mode = 0777 & ~umask; sbi->sb_n_free = -1; sbi->sb_n_free_dnodes = -1; sbi->sb_lowercase = lowercase; - sbi->sb_conv = conv; sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk; @@ -573,7 +555,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) sbi->sb_max_fwd_alloc = 0xffffff; /* Load bitmap directory */ - if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, superblock->bitmaps))) + if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps)))) goto bail4; /* Check for general fs errors*/ @@ -591,20 +573,20 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) mark_buffer_dirty(bh2); } - if (spareblock->hotfixes_used || spareblock->n_spares_used) { + if (le32_to_cpu(spareblock->hotfixes_used) || le32_to_cpu(spareblock->n_spares_used)) { if (errs >= 2) { printk("HPFS: Hotfixes not supported here, try chkdsk\n"); - mark_dirty(s); + mark_dirty(s, 0); goto bail4; } hpfs_error(s, "hotfixes not supported here, try chkdsk"); if (errs == 0) printk("HPFS: Proceeding, but your filesystem will be probably corrupted by this driver...\n"); else printk("HPFS: This driver may read bad files or crash when operating on disk with hotfixes.\n"); } - if (spareblock->n_dnode_spares != spareblock->n_dnode_spares_free) { + if (le32_to_cpu(spareblock->n_dnode_spares) != le32_to_cpu(spareblock->n_dnode_spares_free)) { if (errs >= 2) { printk("HPFS: Spare dnodes used, try chkdsk\n"); - mark_dirty(s); + mark_dirty(s, 0); goto bail4; } hpfs_error(s, "warning: spare dnodes used, try chkdsk"); @@ -612,26 +594,26 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) } if (chk) { unsigned a; - if (superblock->dir_band_end - superblock->dir_band_start + 1 != superblock->n_dir_band || - superblock->dir_band_end < superblock->dir_band_start || superblock->n_dir_band > 0x4000) { + if (le32_to_cpu(superblock->dir_band_end) - le32_to_cpu(superblock->dir_band_start) + 1 != le32_to_cpu(superblock->n_dir_band) || + le32_to_cpu(superblock->dir_band_end) < le32_to_cpu(superblock->dir_band_start) || le32_to_cpu(superblock->n_dir_band) > 0x4000) { hpfs_error(s, "dir band size mismatch: dir_band_start==%08x, dir_band_end==%08x, n_dir_band==%08x", - superblock->dir_band_start, superblock->dir_band_end, superblock->n_dir_band); + le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->dir_band_end), le32_to_cpu(superblock->n_dir_band)); goto bail4; } a = sbi->sb_dirband_size; sbi->sb_dirband_size = 0; - if (hpfs_chk_sectors(s, superblock->dir_band_start, superblock->n_dir_band, "dir_band") || - hpfs_chk_sectors(s, superblock->dir_band_bitmap, 4, "dir_band_bitmap") || - hpfs_chk_sectors(s, superblock->bitmaps, 4, "bitmaps")) { - mark_dirty(s); + if (hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->n_dir_band), "dir_band") || + hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_bitmap), 4, "dir_band_bitmap") || + hpfs_chk_sectors(s, le32_to_cpu(superblock->bitmaps), 4, "bitmaps")) { + mark_dirty(s, 0); goto bail4; } sbi->sb_dirband_size = a; } else printk("HPFS: You really don't want any checks? You are crazy...\n"); /* Load code page table */ - if (spareblock->n_code_pages) - if (!(sbi->sb_cp_table = hpfs_load_code_page(s, spareblock->code_page_dir))) + if (le32_to_cpu(spareblock->n_code_pages)) + if (!(sbi->sb_cp_table = hpfs_load_code_page(s, le32_to_cpu(spareblock->code_page_dir)))) printk("HPFS: Warning: code page support is disabled\n"); brelse(bh2); @@ -660,13 +642,13 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) if (!de) hpfs_error(s, "unable to find root dir"); else { - root->i_atime.tv_sec = local_to_gmt(s, de->read_date); + root->i_atime.tv_sec = local_to_gmt(s, le32_to_cpu(de->read_date)); root->i_atime.tv_nsec = 0; - root->i_mtime.tv_sec = local_to_gmt(s, de->write_date); + root->i_mtime.tv_sec = local_to_gmt(s, le32_to_cpu(de->write_date)); root->i_mtime.tv_nsec = 0; - root->i_ctime.tv_sec = local_to_gmt(s, de->creation_date); + root->i_ctime.tv_sec = local_to_gmt(s, le32_to_cpu(de->creation_date)); root->i_ctime.tv_nsec = 0; - hpfs_i(root)->i_ea_size = de->ea_size; + hpfs_i(root)->i_ea_size = le16_to_cpu(de->ea_size); hpfs_i(root)->i_parent_dir = root->i_ino; if (root->i_size == -1) root->i_size = 2048; @@ -674,6 +656,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) root->i_blocks = 5; hpfs_brelse4(&qbh); } + hpfs_unlock(s); return 0; bail4: brelse(bh2); @@ -681,6 +664,7 @@ bail3: brelse(bh1); bail2: brelse(bh0); bail1: bail0: + hpfs_unlock(s); kfree(sbi->sb_bmp_dir); kfree(sbi->sb_cp_table); s->s_fs_info = NULL; diff --git a/fs/namei.c b/fs/namei.c index 54fc993e302..e3c4f112ebf 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -179,7 +179,7 @@ EXPORT_SYMBOL(putname); static int acl_permission_check(struct inode *inode, int mask, unsigned int flags, int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) { - umode_t mode = inode->i_mode; + unsigned int mode = inode->i_mode; mask &= MAY_READ | MAY_WRITE | MAY_EXEC; diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 6f8192f4cfc..be79dc9f386 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -117,6 +117,8 @@ static int filelayout_async_handle_error(struct rpc_task *task, case -EKEYEXPIRED: rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX); break; + case -NFS4ERR_RETRY_UNCACHED_REP: + break; default: dprintk("%s DS error. Retry through MDS %d\n", __func__, task->tk_status); @@ -416,7 +418,8 @@ static int filelayout_check_layout(struct pnfs_layout_hdr *lo, struct nfs4_filelayout_segment *fl, struct nfs4_layoutget_res *lgr, - struct nfs4_deviceid *id) + struct nfs4_deviceid *id, + gfp_t gfp_flags) { struct nfs4_file_layout_dsaddr *dsaddr; int status = -EINVAL; @@ -439,7 +442,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo, /* find and reference the deviceid */ dsaddr = nfs4_fl_find_get_deviceid(id); if (dsaddr == NULL) { - dsaddr = get_device_info(lo->plh_inode, id); + dsaddr = get_device_info(lo->plh_inode, id, gfp_flags); if (dsaddr == NULL) goto out; } @@ -500,7 +503,8 @@ static int filelayout_decode_layout(struct pnfs_layout_hdr *flo, struct nfs4_filelayout_segment *fl, struct nfs4_layoutget_res *lgr, - struct nfs4_deviceid *id) + struct nfs4_deviceid *id, + gfp_t gfp_flags) { struct xdr_stream stream; struct xdr_buf buf = { @@ -516,7 +520,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, dprintk("%s: set_layout_map Begin\n", __func__); - scratch = alloc_page(GFP_KERNEL); + scratch = alloc_page(gfp_flags); if (!scratch) return -ENOMEM; @@ -554,13 +558,13 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, goto out_err; fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), - GFP_KERNEL); + gfp_flags); if (!fl->fh_array) goto out_err; for (i = 0; i < fl->num_fh; i++) { /* Do we want to use a mempool here? */ - fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL); + fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags); if (!fl->fh_array[i]) goto out_err_free; @@ -605,19 +609,20 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg) static struct pnfs_layout_segment * filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, - struct nfs4_layoutget_res *lgr) + struct nfs4_layoutget_res *lgr, + gfp_t gfp_flags) { struct nfs4_filelayout_segment *fl; int rc; struct nfs4_deviceid id; dprintk("--> %s\n", __func__); - fl = kzalloc(sizeof(*fl), GFP_KERNEL); + fl = kzalloc(sizeof(*fl), gfp_flags); if (!fl) return NULL; - rc = filelayout_decode_layout(layoutid, fl, lgr, &id); - if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id)) { + rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags); + if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) { _filelayout_free_lseg(fl); return NULL; } @@ -633,7 +638,7 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, int size = (fl->stripe_type == STRIPE_SPARSE) ? fl->dsaddr->ds_num : fl->dsaddr->stripe_count; - fl->commit_buckets = kcalloc(size, sizeof(struct list_head), GFP_KERNEL); + fl->commit_buckets = kcalloc(size, sizeof(struct list_head), gfp_flags); if (!fl->commit_buckets) { filelayout_free_lseg(&fl->generic_hdr); return NULL; diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h index 7c44579f583..2b461d77b43 100644 --- a/fs/nfs/nfs4filelayout.h +++ b/fs/nfs/nfs4filelayout.h @@ -104,6 +104,6 @@ extern struct nfs4_file_layout_dsaddr * nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id); extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr); struct nfs4_file_layout_dsaddr * -get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id); +get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags); #endif /* FS_NFS_NFS4FILELAYOUT_H */ diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index de5350f2b24..db07c7af139 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c @@ -225,11 +225,11 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) } static struct nfs4_pnfs_ds * -nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port) +nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port, gfp_t gfp_flags) { struct nfs4_pnfs_ds *tmp_ds, *ds; - ds = kzalloc(sizeof(*tmp_ds), GFP_KERNEL); + ds = kzalloc(sizeof(*tmp_ds), gfp_flags); if (!ds) goto out; @@ -261,7 +261,7 @@ out: * Currently only support ipv4, and one multi-path address. */ static struct nfs4_pnfs_ds * -decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode) +decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_flags) { struct nfs4_pnfs_ds *ds = NULL; char *buf; @@ -303,7 +303,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode) rlen); goto out_err; } - buf = kmalloc(rlen + 1, GFP_KERNEL); + buf = kmalloc(rlen + 1, gfp_flags); if (!buf) { dprintk("%s: Not enough memory\n", __func__); goto out_err; @@ -333,7 +333,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode) sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]); port = htons((tmp[0] << 8) | (tmp[1])); - ds = nfs4_pnfs_ds_add(inode, ip_addr, port); + ds = nfs4_pnfs_ds_add(inode, ip_addr, port, gfp_flags); dprintk("%s: Decoded address and port %s\n", __func__, buf); out_free: kfree(buf); @@ -343,7 +343,7 @@ out_err: /* Decode opaque device data and return the result */ static struct nfs4_file_layout_dsaddr* -decode_device(struct inode *ino, struct pnfs_device *pdev) +decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) { int i; u32 cnt, num; @@ -362,7 +362,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev) struct page *scratch; /* set up xdr stream */ - scratch = alloc_page(GFP_KERNEL); + scratch = alloc_page(gfp_flags); if (!scratch) goto out_err; @@ -384,7 +384,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev) } /* read stripe indices */ - stripe_indices = kcalloc(cnt, sizeof(u8), GFP_KERNEL); + stripe_indices = kcalloc(cnt, sizeof(u8), gfp_flags); if (!stripe_indices) goto out_err_free_scratch; @@ -423,7 +423,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev) dsaddr = kzalloc(sizeof(*dsaddr) + (sizeof(struct nfs4_pnfs_ds *) * (num - 1)), - GFP_KERNEL); + gfp_flags); if (!dsaddr) goto out_err_free_stripe_indices; @@ -452,7 +452,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev) for (j = 0; j < mp_count; j++) { if (j == 0) { dsaddr->ds_list[i] = decode_and_add_ds(&stream, - ino); + ino, gfp_flags); if (dsaddr->ds_list[i] == NULL) goto out_err_free_deviceid; } else { @@ -503,12 +503,12 @@ out_err: * available devices. */ static struct nfs4_file_layout_dsaddr * -decode_and_add_device(struct inode *inode, struct pnfs_device *dev) +decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags) { struct nfs4_file_layout_dsaddr *d, *new; long hash; - new = decode_device(inode, dev); + new = decode_device(inode, dev, gfp_flags); if (!new) { printk(KERN_WARNING "%s: Could not decode or add device\n", __func__); @@ -537,7 +537,7 @@ decode_and_add_device(struct inode *inode, struct pnfs_device *dev) * of available devices, and return it. */ struct nfs4_file_layout_dsaddr * -get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id) +get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags) { struct pnfs_device *pdev = NULL; u32 max_resp_sz; @@ -556,17 +556,17 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id) dprintk("%s inode %p max_resp_sz %u max_pages %d\n", __func__, inode, max_resp_sz, max_pages); - pdev = kzalloc(sizeof(struct pnfs_device), GFP_KERNEL); + pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags); if (pdev == NULL) return NULL; - pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL); + pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); if (pages == NULL) { kfree(pdev); return NULL; } for (i = 0; i < max_pages; i++) { - pages[i] = alloc_page(GFP_KERNEL); + pages[i] = alloc_page(gfp_flags); if (!pages[i]) goto out_free; } @@ -587,7 +587,7 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id) * Found new device, need to decode it and then add it to the * list of known devices for this mountpoint. */ - dsaddr = decode_and_add_device(inode, pdev); + dsaddr = decode_and_add_device(inode, pdev, gfp_flags); out_free: for (i = 0; i < max_pages; i++) __free_page(pages[i]); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 69c0f3c5ee7..cf1b339c393 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -300,6 +300,7 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc ret = nfs4_delay(server->client, &exception->timeout); if (ret != 0) break; + case -NFS4ERR_RETRY_UNCACHED_REP: case -NFS4ERR_OLD_STATEID: exception->retry = 1; break; @@ -3695,6 +3696,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, rpc_delay(task, NFS4_POLL_RETRY_MAX); task->tk_status = 0; return -EAGAIN; + case -NFS4ERR_RETRY_UNCACHED_REP: case -NFS4ERR_OLD_STATEID: task->tk_status = 0; return -EAGAIN; @@ -4844,6 +4846,8 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); rpc_delay(task, NFS4_POLL_RETRY_MIN); task->tk_status = 0; + /* fall through */ + case -NFS4ERR_RETRY_UNCACHED_REP: nfs_restart_rpc(task, data->clp); return; } @@ -5479,6 +5483,8 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf break; case -NFS4ERR_DELAY: rpc_delay(task, NFS4_POLL_RETRY_MAX); + /* fall through */ + case -NFS4ERR_RETRY_UNCACHED_REP: return -EAGAIN; default: nfs4_schedule_lease_recovery(clp); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index ff681ab65d3..f57f5281a52 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -383,6 +383,7 @@ pnfs_destroy_all_layouts(struct nfs_client *clp) plh_layouts); dprintk("%s freeing layout for inode %lu\n", __func__, lo->plh_inode->i_ino); + list_del_init(&lo->plh_layouts); pnfs_destroy_layout(NFS_I(lo->plh_inode)); } } @@ -466,7 +467,8 @@ pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, static struct pnfs_layout_segment * send_layoutget(struct pnfs_layout_hdr *lo, struct nfs_open_context *ctx, - u32 iomode) + u32 iomode, + gfp_t gfp_flags) { struct inode *ino = lo->plh_inode; struct nfs_server *server = NFS_SERVER(ino); @@ -479,7 +481,7 @@ send_layoutget(struct pnfs_layout_hdr *lo, dprintk("--> %s\n", __func__); BUG_ON(ctx == NULL); - lgp = kzalloc(sizeof(*lgp), GFP_KERNEL); + lgp = kzalloc(sizeof(*lgp), gfp_flags); if (lgp == NULL) return NULL; @@ -487,12 +489,12 @@ send_layoutget(struct pnfs_layout_hdr *lo, max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; max_pages = max_resp_sz >> PAGE_SHIFT; - pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL); + pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); if (!pages) goto out_err_free; for (i = 0; i < max_pages; i++) { - pages[i] = alloc_page(GFP_KERNEL); + pages[i] = alloc_page(gfp_flags); if (!pages[i]) goto out_err_free; } @@ -508,6 +510,7 @@ send_layoutget(struct pnfs_layout_hdr *lo, lgp->args.layout.pages = pages; lgp->args.layout.pglen = max_pages * PAGE_SIZE; lgp->lsegpp = &lseg; + lgp->gfp_flags = gfp_flags; /* Synchronously retrieve layout information from server and * store in lseg. @@ -665,11 +668,11 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo, } static struct pnfs_layout_hdr * -alloc_init_layout_hdr(struct inode *ino) +alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags) { struct pnfs_layout_hdr *lo; - lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL); + lo = kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags); if (!lo) return NULL; atomic_set(&lo->plh_refcount, 1); @@ -681,7 +684,7 @@ alloc_init_layout_hdr(struct inode *ino) } static struct pnfs_layout_hdr * -pnfs_find_alloc_layout(struct inode *ino) +pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags) { struct nfs_inode *nfsi = NFS_I(ino); struct pnfs_layout_hdr *new = NULL; @@ -696,7 +699,7 @@ pnfs_find_alloc_layout(struct inode *ino) return nfsi->layout; } spin_unlock(&ino->i_lock); - new = alloc_init_layout_hdr(ino); + new = alloc_init_layout_hdr(ino, gfp_flags); spin_lock(&ino->i_lock); if (likely(nfsi->layout == NULL)) /* Won the race? */ @@ -756,7 +759,8 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode) struct pnfs_layout_segment * pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, - enum pnfs_iomode iomode) + enum pnfs_iomode iomode, + gfp_t gfp_flags) { struct nfs_inode *nfsi = NFS_I(ino); struct nfs_client *clp = NFS_SERVER(ino)->nfs_client; @@ -767,7 +771,7 @@ pnfs_update_layout(struct inode *ino, if (!pnfs_enabled_sb(NFS_SERVER(ino))) return NULL; spin_lock(&ino->i_lock); - lo = pnfs_find_alloc_layout(ino); + lo = pnfs_find_alloc_layout(ino, gfp_flags); if (lo == NULL) { dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__); goto out_unlock; @@ -807,7 +811,7 @@ pnfs_update_layout(struct inode *ino, spin_unlock(&clp->cl_lock); } - lseg = send_layoutget(lo, ctx, iomode); + lseg = send_layoutget(lo, ctx, iomode, gfp_flags); if (!lseg && first) { spin_lock(&clp->cl_lock); list_del_init(&lo->plh_layouts); @@ -846,7 +850,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) goto out; } /* Inject layout blob into I/O device driver */ - lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res); + lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags); if (!lseg || IS_ERR(lseg)) { if (!lseg) status = -ENOMEM; @@ -899,7 +903,8 @@ static int pnfs_read_pg_test(struct nfs_pageio_descriptor *pgio, /* This is first coelesce call for a series of nfs_pages */ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, prev->wb_context, - IOMODE_READ); + IOMODE_READ, + GFP_KERNEL); } return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); } @@ -921,7 +926,8 @@ static int pnfs_write_pg_test(struct nfs_pageio_descriptor *pgio, /* This is first coelesce call for a series of nfs_pages */ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, prev->wb_context, - IOMODE_RW); + IOMODE_RW, + GFP_NOFS); } return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); } diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index bc4827202e7..0c015bad9e7 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -70,7 +70,7 @@ struct pnfs_layoutdriver_type { const u32 id; const char *name; struct module *owner; - struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr); + struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags); void (*free_lseg) (struct pnfs_layout_segment *lseg); /* test for nfs page cache coalescing */ @@ -126,7 +126,7 @@ void get_layout_hdr(struct pnfs_layout_hdr *lo); void put_lseg(struct pnfs_layout_segment *lseg); struct pnfs_layout_segment * pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, - enum pnfs_iomode access_type); + enum pnfs_iomode access_type, gfp_t gfp_flags); void set_pnfs_layoutdriver(struct nfs_server *, u32 id); void unset_pnfs_layoutdriver(struct nfs_server *); enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *, @@ -245,7 +245,7 @@ static inline void put_lseg(struct pnfs_layout_segment *lseg) static inline struct pnfs_layout_segment * pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, - enum pnfs_iomode access_type) + enum pnfs_iomode access_type, gfp_t gfp_flags) { return NULL; } diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 7cded2b12a0..2bcf0dc306a 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -288,7 +288,7 @@ static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc) atomic_set(&req->wb_complete, requests); BUG_ON(desc->pg_lseg != NULL); - lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ); + lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL); ClearPageError(page); offset = 0; nbytes = desc->pg_count; @@ -351,7 +351,7 @@ static int nfs_pagein_one(struct nfs_pageio_descriptor *desc) } req = nfs_list_entry(data->pages.next); if ((!lseg) && list_is_singular(&data->pages)) - lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ); + lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL); ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count, 0, lseg); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 3bd5d7e80f6..49c715b4ac9 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -939,7 +939,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc) atomic_set(&req->wb_complete, requests); BUG_ON(desc->pg_lseg); - lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW); + lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS); ClearPageError(page); offset = 0; nbytes = desc->pg_count; @@ -1013,7 +1013,7 @@ static int nfs_flush_one(struct nfs_pageio_descriptor *desc) } req = nfs_list_entry(data->pages.next); if ((!lseg) && list_is_singular(&data->pages)) - lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW); + lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS); if ((desc->pg_ioflags & FLUSH_COND_STABLE) && (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit)) diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index 0a0a66d98cc..f7684483785 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c @@ -646,7 +646,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) unsigned long group, group_offset; int i, j, n, ret; - for (i = 0; i < nitems; i += n) { + for (i = 0; i < nitems; i = j) { group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset); ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh); if (ret < 0) diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 643720209a9..9a3e6bbff27 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -539,25 +539,41 @@ static int o2hb_verify_crc(struct o2hb_region *reg, /* We want to make sure that nobody is heartbeating on top of us -- * this will help detect an invalid configuration. */ -static int o2hb_check_last_timestamp(struct o2hb_region *reg) +static void o2hb_check_last_timestamp(struct o2hb_region *reg) { - int node_num, ret; struct o2hb_disk_slot *slot; struct o2hb_disk_heartbeat_block *hb_block; + char *errstr; - node_num = o2nm_this_node(); - - ret = 1; - slot = ®->hr_slots[node_num]; + slot = ®->hr_slots[o2nm_this_node()]; /* Don't check on our 1st timestamp */ - if (slot->ds_last_time) { - hb_block = slot->ds_raw_block; + if (!slot->ds_last_time) + return; - if (le64_to_cpu(hb_block->hb_seq) != slot->ds_last_time) - ret = 0; - } + hb_block = slot->ds_raw_block; + if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && + le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && + hb_block->hb_node == slot->ds_node_num) + return; - return ret; +#define ERRSTR1 "Another node is heartbeating on device" +#define ERRSTR2 "Heartbeat generation mismatch on device" +#define ERRSTR3 "Heartbeat sequence mismatch on device" + + if (hb_block->hb_node != slot->ds_node_num) + errstr = ERRSTR1; + else if (le64_to_cpu(hb_block->hb_generation) != + slot->ds_last_generation) + errstr = ERRSTR2; + else + errstr = ERRSTR3; + + mlog(ML_ERROR, "%s (%s): expected(%u:0x%llx, 0x%llx), " + "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_dev_name, + slot->ds_node_num, (unsigned long long)slot->ds_last_generation, + (unsigned long long)slot->ds_last_time, hb_block->hb_node, + (unsigned long long)le64_to_cpu(hb_block->hb_generation), + (unsigned long long)le64_to_cpu(hb_block->hb_seq)); } static inline void o2hb_prepare_block(struct o2hb_region *reg, @@ -983,9 +999,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) /* With an up to date view of the slots, we can check that no * other node has been improperly configured to heartbeat in * our slot. */ - if (!o2hb_check_last_timestamp(reg)) - mlog(ML_ERROR, "Device \"%s\": another node is heartbeating " - "in our slot!\n", reg->hr_dev_name); + o2hb_check_last_timestamp(reg); /* fill in the proper info for our next heartbeat */ o2hb_prepare_block(reg, reg->hr_generation); @@ -999,8 +1013,8 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) } i = -1; - while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { - + while((i = find_next_bit(configured_nodes, + O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { change |= o2hb_check_slot(reg, ®->hr_slots[i]); } @@ -1690,6 +1704,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, struct file *filp = NULL; struct inode *inode = NULL; ssize_t ret = -EINVAL; + int live_threshold; if (reg->hr_bdev) goto out; @@ -1766,8 +1781,18 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, * A node is considered live after it has beat LIVE_THRESHOLD * times. We're not steady until we've given them a chance * _after_ our first read. + * The default threshold is bare minimum so as to limit the delay + * during mounts. For global heartbeat, the threshold doubled for the + * first region. */ - atomic_set(®->hr_steady_iterations, O2HB_LIVE_THRESHOLD + 1); + live_threshold = O2HB_LIVE_THRESHOLD; + if (o2hb_global_heartbeat_active()) { + spin_lock(&o2hb_live_lock); + if (o2hb_pop_count(&o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1) + live_threshold <<= 1; + spin_unlock(&o2hb_live_lock); + } + atomic_set(®->hr_steady_iterations, live_threshold + 1); hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", reg->hr_item.ci_name); diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 9fe5b8fd658..8582e3f4f12 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -2868,7 +2868,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, bytes = blocks_wanted << sb->s_blocksize_bits; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(dir); - struct ocfs2_alloc_context *data_ac; + struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *meta_ac = NULL; struct buffer_head *dirdata_bh = NULL; struct buffer_head *dx_root_bh = NULL; diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 7540a492eab..3b179d6cbde 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -1614,7 +1614,8 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) spin_unlock(&dlm->spinlock); /* Support for global heartbeat and node info was added in 1.1 */ - if (dlm_protocol.pv_major > 1 || dlm_protocol.pv_minor > 0) { + if (dlm->dlm_locking_proto.pv_major > 1 || + dlm->dlm_locking_proto.pv_minor > 0) { status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map); if (status) { mlog_errno(status); diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index fede57ed005..84d166328cf 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -2574,6 +2574,9 @@ fail: res->state &= ~DLM_LOCK_RES_MIGRATING; wake = 1; spin_unlock(&res->spinlock); + if (dlm_is_host_down(ret)) + dlm_wait_for_node_death(dlm, target, + DLM_NODE_DEATH_WAIT_MAX); goto leave; } diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 41565ae5285..89659d6dc20 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1607,6 +1607,9 @@ static void ocfs2_calc_trunc_pos(struct inode *inode, range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); if (le32_to_cpu(rec->e_cpos) >= trunc_start) { + /* + * remove an entire extent record. + */ *trunc_cpos = le32_to_cpu(rec->e_cpos); /* * Skip holes if any. @@ -1617,7 +1620,16 @@ static void ocfs2_calc_trunc_pos(struct inode *inode, *blkno = le64_to_cpu(rec->e_blkno); *trunc_end = le32_to_cpu(rec->e_cpos); } else if (range > trunc_start) { + /* + * remove a partial extent record, which means we're + * removing the last extent record. + */ *trunc_cpos = trunc_start; + /* + * skip hole if any. + */ + if (range < *trunc_end) + *trunc_end = range; *trunc_len = *trunc_end - trunc_start; coff = trunc_start - le32_to_cpu(rec->e_cpos); *blkno = le64_to_cpu(rec->e_blkno) + diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index b141a44605c..295d56454e8 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1260,6 +1260,9 @@ void ocfs2_complete_mount_recovery(struct ocfs2_super *osb) { struct ocfs2_journal *journal = osb->journal; + if (ocfs2_is_hard_readonly(osb)) + return; + /* No need to queue up our truncate_log as regular cleanup will catch * that */ ocfs2_queue_recovery_completion(journal, osb->slot_num, diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c index ac0ccb5026a..19d6750d1d6 100644 --- a/fs/partitions/efi.c +++ b/fs/partitions/efi.c @@ -348,6 +348,12 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba, goto fail; } + /* Check that sizeof_partition_entry has the correct value */ + if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) { + pr_debug("GUID Partitition Entry Size check failed.\n"); + goto fail; + } + if (!(*ptes = alloc_read_gpt_entries(state, *gpt))) goto fail; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 2e7addfd980..318d8654989 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -214,7 +214,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) int flags = vma->vm_flags; unsigned long ino = 0; unsigned long long pgoff = 0; - unsigned long start; + unsigned long start, end; dev_t dev = 0; int len; @@ -227,13 +227,15 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) /* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; - if (vma->vm_flags & VM_GROWSDOWN) - if (!vma_stack_continue(vma->vm_prev, vma->vm_start)) - start += PAGE_SIZE; + if (stack_guard_page_start(vma, start)) + start += PAGE_SIZE; + end = vma->vm_end; + if (stack_guard_page_end(vma, end)) + end -= PAGE_SIZE; seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", start, - vma->vm_end, + end, flags & VM_READ ? 'r' : '-', flags & VM_WRITE ? 'w' : '-', flags & VM_EXEC ? 'x' : '-', diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index e4f9c1b0836..3e898a48122 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -926,6 +926,7 @@ restart: XFS_LOOKUP_BATCH, XFS_ICI_RECLAIM_TAG); if (!nr_found) { + done = 1; rcu_read_unlock(); break; } diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index acdb92f14d5..5fc2380092c 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -346,20 +346,23 @@ xfs_ail_delete( */ STATIC void xfs_ail_worker( - struct work_struct *work) + struct work_struct *work) { - struct xfs_ail *ailp = container_of(to_delayed_work(work), + struct xfs_ail *ailp = container_of(to_delayed_work(work), struct xfs_ail, xa_work); - long tout; - xfs_lsn_t target = ailp->xa_target; - xfs_lsn_t lsn; - xfs_log_item_t *lip; - int flush_log, count, stuck; - xfs_mount_t *mp = ailp->xa_mount; + xfs_mount_t *mp = ailp->xa_mount; struct xfs_ail_cursor *cur = &ailp->xa_cursors; - int push_xfsbufd = 0; + xfs_log_item_t *lip; + xfs_lsn_t lsn; + xfs_lsn_t target; + long tout = 10; + int flush_log = 0; + int stuck = 0; + int count = 0; + int push_xfsbufd = 0; spin_lock(&ailp->xa_lock); + target = ailp->xa_target; xfs_trans_ail_cursor_init(ailp, cur); lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); if (!lip || XFS_FORCED_SHUTDOWN(mp)) { @@ -368,8 +371,7 @@ xfs_ail_worker( */ xfs_trans_ail_cursor_done(ailp, cur); spin_unlock(&ailp->xa_lock); - ailp->xa_last_pushed_lsn = 0; - return; + goto out_done; } XFS_STATS_INC(xs_push_ail); @@ -386,8 +388,7 @@ xfs_ail_worker( * lots of contention on the AIL lists. */ lsn = lip->li_lsn; - flush_log = stuck = count = 0; - while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) { + while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { int lock_result; /* * If we can lock the item without sleeping, unlock the AIL @@ -480,21 +481,25 @@ xfs_ail_worker( } /* assume we have more work to do in a short while */ - tout = 10; +out_done: if (!count) { /* We're past our target or empty, so idle */ ailp->xa_last_pushed_lsn = 0; /* - * Check for an updated push target before clearing the - * XFS_AIL_PUSHING_BIT. If the target changed, we've got more - * work to do. Wait a bit longer before starting that work. + * We clear the XFS_AIL_PUSHING_BIT first before checking + * whether the target has changed. If the target has changed, + * this pushes the requeue race directly onto the result of the + * atomic test/set bit, so we are guaranteed that either the + * the pusher that changed the target or ourselves will requeue + * the work (but not both). */ + clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); smp_rmb(); - if (ailp->xa_target == target) { - clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); + if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || + test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) return; - } + tout = 50; } else if (XFS_LSN_CMP(lsn, target) >= 0) { /* @@ -553,7 +558,7 @@ xfs_ail_push( * the XFS_AIL_PUSHING_BIT. */ smp_wmb(); - ailp->xa_target = threshold_lsn; + xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); } diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index ade09d7b427..c99c3d3e781 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -127,7 +127,7 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); -bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); +int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); int drm_fb_helper_debug_enter(struct fb_info *info); diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index c2f93a8ae2e..564b14aa7e1 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -86,7 +86,7 @@ static inline bool drm_mm_initialized(struct drm_mm *mm) } #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ &(mm)->head_node.node_list, \ - node_list); + node_list) #define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \ for (entry = (mm)->prev_scanned_node, \ next = entry ? list_entry(entry->node_list.next, \ diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index b8613e806aa..01eca1794e1 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -111,6 +111,8 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_node(pgdat, x) \ __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_node_nopanic(pgdat, x) \ + __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages_node(pgdat, x) \ __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages_node_nopanic(pgdat, x) \ diff --git a/include/linux/capability.h b/include/linux/capability.h index 16ee8b49a20..d4675af963f 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -546,18 +546,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap); extern bool capable(int cap); extern bool ns_capable(struct user_namespace *ns, int cap); extern bool task_ns_capable(struct task_struct *t, int cap); - -/** - * nsown_capable - Check superior capability to one's own user_ns - * @cap: The capability in question - * - * Return true if the current task has the given superior capability - * targeted at its own user namespace. - */ -static inline bool nsown_capable(int cap) -{ - return ns_capable(current_user_ns(), cap); -} +extern bool nsown_capable(int cap); /* audit system wants to get cap info from files as well */ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); diff --git a/include/linux/cred.h b/include/linux/cred.h index 9aeeb0ba200..be16b61283c 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -146,6 +146,7 @@ struct cred { void *security; /* subjective LSM security */ #endif struct user_struct *user; /* real user ID subscription */ + struct user_namespace *user_ns; /* cached user->user_ns */ struct group_info *group_info; /* supplementary groups for euid/fsgid */ struct rcu_head rcu; /* RCU deletion hook */ }; @@ -354,10 +355,15 @@ static inline void put_cred(const struct cred *_cred) #define current_fsgid() (current_cred_xxx(fsgid)) #define current_cap() (current_cred_xxx(cap_effective)) #define current_user() (current_cred_xxx(user)) -#define _current_user_ns() (current_cred_xxx(user)->user_ns) #define current_security() (current_cred_xxx(security)) -extern struct user_namespace *current_user_ns(void); +#ifdef CONFIG_USER_NS +#define current_user_ns() (current_cred_xxx(user_ns)) +#else +extern struct user_namespace init_user_ns; +#define current_user_ns() (&init_user_ns) +#endif + #define current_uid_gid(_uid, _gid) \ do { \ diff --git a/include/linux/device.h b/include/linux/device.h index ab8dfc09570..d08399db6e2 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -442,7 +442,6 @@ struct device { struct dev_archdata archdata; struct device_node *of_node; /* associated device tree node */ - const struct of_device_id *of_match; /* matching of_device_id from driver */ dev_t devt; /* dev_t, creates the sysfs "dev" */ diff --git a/include/linux/fb.h b/include/linux/fb.h index df728c1c29e..6a827487717 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -832,6 +832,7 @@ struct fb_tile_ops { #define FBINFO_CAN_FORCE_OUTPUT 0x200000 struct fb_info { + atomic_t count; int node; int flags; struct mutex lock; /* Lock for open/release/ioctl funcs */ diff --git a/include/linux/fs.h b/include/linux/fs.h index dbd860af080..cdf9495df20 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -358,7 +358,6 @@ struct inodes_stat_t { #define FS_EXTENT_FL 0x00080000 /* Extents */ #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ -#define FS_COW_FL 0x02000000 /* Cow file */ #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 22b32af1b5e..b5a550a39a7 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -37,6 +37,7 @@ struct trace_entry { unsigned char flags; unsigned char preempt_count; int pid; + int padding; }; #define FTRACE_MAX_EVENT \ diff --git a/include/linux/gfp.h b/include/linux/gfp.h index bfb8f934521..56d8fc87fbb 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -353,6 +353,8 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask); void *alloc_pages_exact(size_t size, gfp_t gfp_mask); void free_pages_exact(void *virt, size_t size); +/* This is different from alloc_pages_exact_node !!! */ +void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); #define __get_free_page(gfp_mask) \ __get_free_pages((gfp_mask), 0) diff --git a/include/linux/list.h b/include/linux/list.h index 3a54266a1e8..cc6d2aa6b41 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -4,7 +4,7 @@ #include <linux/types.h> #include <linux/stddef.h> #include <linux/poison.h> -#include <linux/prefetch.h> +#include <linux/const.h> /* * Simple doubly linked list implementation. @@ -367,18 +367,15 @@ static inline void list_splice_tail_init(struct list_head *list, * @head: the head for your list. */ #define list_for_each(pos, head) \ - for (pos = (head)->next; prefetch(pos->next), pos != (head); \ - pos = pos->next) + for (pos = (head)->next; pos != (head); pos = pos->next) /** * __list_for_each - iterate over a list * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. * - * This variant differs from list_for_each() in that it's the - * simplest possible list iteration code, no prefetching is done. - * Use this for code that knows the list to be very short (empty - * or 1 entry) most of the time. + * This variant doesn't differ from list_for_each() any more. + * We don't do prefetching in either case. */ #define __list_for_each(pos, head) \ for (pos = (head)->next; pos != (head); pos = pos->next) @@ -389,8 +386,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @head: the head for your list. */ #define list_for_each_prev(pos, head) \ - for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ - pos = pos->prev) + for (pos = (head)->prev; pos != (head); pos = pos->prev) /** * list_for_each_safe - iterate over a list safe against removal of list entry @@ -410,7 +406,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_prev_safe(pos, n, head) \ for (pos = (head)->prev, n = pos->prev; \ - prefetch(pos->prev), pos != (head); \ + pos != (head); \ pos = n, n = pos->prev) /** @@ -421,7 +417,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry(pos, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member); \ - prefetch(pos->member.next), &pos->member != (head); \ + &pos->member != (head); \ pos = list_entry(pos->member.next, typeof(*pos), member)) /** @@ -432,7 +428,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_reverse(pos, head, member) \ for (pos = list_entry((head)->prev, typeof(*pos), member); \ - prefetch(pos->member.prev), &pos->member != (head); \ + &pos->member != (head); \ pos = list_entry(pos->member.prev, typeof(*pos), member)) /** @@ -457,7 +453,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_continue(pos, head, member) \ for (pos = list_entry(pos->member.next, typeof(*pos), member); \ - prefetch(pos->member.next), &pos->member != (head); \ + &pos->member != (head); \ pos = list_entry(pos->member.next, typeof(*pos), member)) /** @@ -471,7 +467,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_continue_reverse(pos, head, member) \ for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ - prefetch(pos->member.prev), &pos->member != (head); \ + &pos->member != (head); \ pos = list_entry(pos->member.prev, typeof(*pos), member)) /** @@ -483,7 +479,7 @@ static inline void list_splice_tail_init(struct list_head *list, * Iterate over list of given type, continuing from current position. */ #define list_for_each_entry_from(pos, head, member) \ - for (; prefetch(pos->member.next), &pos->member != (head); \ + for (; &pos->member != (head); \ pos = list_entry(pos->member.next, typeof(*pos), member)) /** @@ -664,8 +660,7 @@ static inline void hlist_move_list(struct hlist_head *old, #define hlist_entry(ptr, type, member) container_of(ptr,type,member) #define hlist_for_each(pos, head) \ - for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ - pos = pos->next) + for (pos = (head)->first; pos ; pos = pos->next) #define hlist_for_each_safe(pos, n, head) \ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ @@ -680,7 +675,7 @@ static inline void hlist_move_list(struct hlist_head *old, */ #define hlist_for_each_entry(tpos, pos, head, member) \ for (pos = (head)->first; \ - pos && ({ prefetch(pos->next); 1;}) && \ + pos && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = pos->next) @@ -692,7 +687,7 @@ static inline void hlist_move_list(struct hlist_head *old, */ #define hlist_for_each_entry_continue(tpos, pos, member) \ for (pos = (pos)->next; \ - pos && ({ prefetch(pos->next); 1;}) && \ + pos && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = pos->next) @@ -703,7 +698,7 @@ static inline void hlist_move_list(struct hlist_head *old, * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_from(tpos, pos, member) \ - for (; pos && ({ prefetch(pos->next); 1;}) && \ + for (; pos && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = pos->next) diff --git a/include/linux/mm.h b/include/linux/mm.h index 2348db26bc3..6507dde38b1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1011,11 +1011,33 @@ int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); /* Is the vma a continuation of the stack vma above it? */ -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) +static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) { return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); } +static inline int stack_guard_page_start(struct vm_area_struct *vma, + unsigned long addr) +{ + return (vma->vm_flags & VM_GROWSDOWN) && + (vma->vm_start == addr) && + !vma_growsdown(vma->vm_prev, addr); +} + +/* Is the vma a continuation of the stack vma below it? */ +static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) +{ + return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); +} + +static inline int stack_guard_page_end(struct vm_area_struct *vma, + unsigned long addr) +{ + return (vma->vm_flags & VM_GROWSUP) && + (vma->vm_end == addr) && + !vma_growsup(vma->vm_next, addr); +} + extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len); diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index eb792cb6d74..bcb793ec737 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -183,6 +183,7 @@ struct mmc_host { struct work_struct clk_gate_work; /* delayed clock gate */ unsigned int clk_old; /* old clock value cache */ spinlock_t clk_lock; /* lock for clk fields */ + struct mutex clk_gate_mutex; /* mutex for clock gating */ #endif /* host specific block data */ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 890dce24263..7e371f7df9c 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -233,6 +233,7 @@ struct nfs4_layoutget { struct nfs4_layoutget_args args; struct nfs4_layoutget_res res; struct pnfs_layout_segment **lsegpp; + gfp_t gfp_flags; }; struct nfs4_getdeviceinfo_args { diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 8bfe6c1d436..ae5638480ef 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -21,8 +21,7 @@ extern void of_device_make_bus_id(struct device *dev); static inline int of_driver_match_device(struct device *dev, const struct device_driver *drv) { - dev->of_match = of_match_device(drv->of_match_table, dev); - return dev->of_match != NULL; + return of_match_device(drv->of_match_table, dev) != NULL; } extern struct platform_device *of_dev_get(struct platform_device *dev); @@ -58,6 +57,11 @@ static inline int of_device_uevent(struct device *dev, static inline void of_device_node_put(struct device *dev) { } +static inline const struct of_device_id *of_match_device( + const struct of_device_id *matches, const struct device *dev) +{ + return NULL; +} #endif /* CONFIG_OF_DEVICE */ #endif /* _LINUX_OF_DEVICE_H */ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 838c1149251..eaf4350c0f9 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -208,6 +208,8 @@ static inline struct proc_dir_entry *proc_symlink(const char *name, struct proc_dir_entry *parent,const char *dest) {return NULL;} static inline struct proc_dir_entry *proc_mkdir(const char *name, struct proc_dir_entry *parent) {return NULL;} +static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, + mode_t mode, struct proc_dir_entry *parent) { return NULL; } static inline struct proc_dir_entry *create_proc_read_entry(const char *name, mode_t mode, struct proc_dir_entry *base, diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index a1147e5dd24..9178d5cc0b0 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -189,6 +189,10 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) child->ptrace = current->ptrace; __ptrace_link(child, current->parent); } + +#ifdef CONFIG_HAVE_HW_BREAKPOINT + atomic_set(&child->ptrace_bp_refcnt, 1); +#endif } /** @@ -350,6 +354,13 @@ extern int task_current_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc); -#endif +#ifdef CONFIG_HAVE_HW_BREAKPOINT +extern int ptrace_get_breakpoints(struct task_struct *tsk); +extern void ptrace_put_breakpoints(struct task_struct *tsk); +#else +static inline void ptrace_put_breakpoints(struct task_struct *tsk) { } +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ + +#endif /* __KERNEL */ #endif diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 2dea94fc440..e3beb315517 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -253,7 +253,7 @@ static inline void list_splice_init_rcu(struct list_head *list, */ #define list_for_each_entry_rcu(pos, head, member) \ for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ - prefetch(pos->member.next), &pos->member != (head); \ + &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) @@ -270,7 +270,7 @@ static inline void list_splice_init_rcu(struct list_head *list, */ #define list_for_each_continue_rcu(pos, head) \ for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ - prefetch((pos)->next), (pos) != (head); \ + (pos) != (head); \ (pos) = rcu_dereference_raw(list_next_rcu(pos))) /** @@ -284,7 +284,7 @@ static inline void list_splice_init_rcu(struct list_head *list, */ #define list_for_each_entry_continue_rcu(pos, head, member) \ for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ - prefetch(pos->member.next), &pos->member != (head); \ + &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** @@ -427,7 +427,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, #define __hlist_for_each_rcu(pos, head) \ for (pos = rcu_dereference(hlist_first_rcu(head)); \ - pos && ({ prefetch(pos->next); 1; }); \ + pos; \ pos = rcu_dereference(hlist_next_rcu(pos))) /** @@ -443,7 +443,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, */ #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ - pos && ({ prefetch(pos->next); 1; }) && \ + pos && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_raw(hlist_next_rcu(pos))) @@ -460,7 +460,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, */ #define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \ for (pos = rcu_dereference_bh((head)->first); \ - pos && ({ prefetch(pos->next); 1; }) && \ + pos && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_bh(pos->next)) @@ -472,7 +472,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, */ #define hlist_for_each_entry_continue_rcu(tpos, pos, member) \ for (pos = rcu_dereference((pos)->next); \ - pos && ({ prefetch(pos->next); 1; }) && \ + pos && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference(pos->next)) @@ -484,7 +484,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, */ #define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \ for (pos = rcu_dereference_bh((pos)->next); \ - pos && ({ prefetch(pos->next); 1; }) && \ + pos && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_bh(pos->next)) diff --git a/include/linux/sched.h b/include/linux/sched.h index 18d63cea284..781abd13767 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1537,6 +1537,9 @@ struct task_struct { unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ } memcg_batch; #endif +#ifdef CONFIG_HAVE_HW_BREAKPOINT + atomic_t ptrace_bp_refcnt; +#endif }; /* Future-safe accessor for struct task_struct's cpus_allowed. */ diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 88bdd010d65..2fa8d1341a0 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -38,9 +38,19 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner) return outer; } -#define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0) -#define INET_ECN_dontxmit(sk) \ - do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0) +static inline void INET_ECN_xmit(struct sock *sk) +{ + inet_sk(sk)->tos |= INET_ECN_ECT_0; + if (inet6_sk(sk) != NULL) + inet6_sk(sk)->tclass |= INET_ECN_ECT_0; +} + +static inline void INET_ECN_dontxmit(struct sock *sk) +{ + inet_sk(sk)->tos &= ~INET_ECN_MASK; + if (inet6_sk(sk) != NULL) + inet6_sk(sk)->tclass &= ~INET_ECN_MASK; +} #define IP6_ECN_flow_init(label) do { \ (label) &= ~htonl(INET_ECN_MASK << 20); \ diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index d516f00c8e0..86aefed6140 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -791,6 +791,7 @@ struct ip_vs_app { /* IPVS in network namespace */ struct netns_ipvs { int gen; /* Generation */ + int enable; /* enable like nf_hooks do */ /* * Hash table: for real service lookups */ @@ -1089,6 +1090,22 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) atomic_inc(&ctl_cp->n_control); } +/* + * IPVS netns init & cleanup functions + */ +extern int __ip_vs_estimator_init(struct net *net); +extern int __ip_vs_control_init(struct net *net); +extern int __ip_vs_protocol_init(struct net *net); +extern int __ip_vs_app_init(struct net *net); +extern int __ip_vs_conn_init(struct net *net); +extern int __ip_vs_sync_init(struct net *net); +extern void __ip_vs_conn_cleanup(struct net *net); +extern void __ip_vs_app_cleanup(struct net *net); +extern void __ip_vs_protocol_cleanup(struct net *net); +extern void __ip_vs_control_cleanup(struct net *net); +extern void __ip_vs_estimator_cleanup(struct net *net); +extern void __ip_vs_sync_cleanup(struct net *net); +extern void __ip_vs_service_cleanup(struct net *net); /* * IPVS application functions diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index 75b8e2968c9..f57e7d46a45 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -199,7 +199,7 @@ struct llc_pdu_sn { u8 ssap; u8 ctrl_1; u8 ctrl_2; -}; +} __packed; static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) { @@ -211,7 +211,7 @@ struct llc_pdu_un { u8 dsap; u8 ssap; u8 ctrl_1; -}; +} __packed; static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) { @@ -359,7 +359,7 @@ struct llc_xid_info { u8 fmt_id; /* always 0x81 for LLC */ u8 type; /* different if NULL/non-NULL LSAP */ u8 rw; /* sender receive window */ -}; +} __packed; /** * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID @@ -415,7 +415,7 @@ struct llc_frmr_info { u8 curr_ssv; /* current send state variable val */ u8 curr_rsv; /* current receive state variable */ u8 ind_bits; /* indicator bits set with macro */ -}; +} __packed; extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type); extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 6ae4bc5ce8a..20afeaa3939 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -324,6 +324,7 @@ struct xfrm_state_afinfo { int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); int (*output)(struct sk_buff *skb); + int (*output_finish)(struct sk_buff *skb); int (*extract_input)(struct xfrm_state *x, struct sk_buff *skb); int (*extract_output)(struct xfrm_state *x, @@ -1454,6 +1455,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm4_output(struct sk_buff *skb); +extern int xfrm4_output_finish(struct sk_buff *skb); extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); extern int xfrm6_extract_header(struct sk_buff *skb); @@ -1470,6 +1472,7 @@ extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr); extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm6_output(struct sk_buff *skb); +extern int xfrm6_output_finish(struct sk_buff *skb); extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr); diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 2d3ec509468..dd82e02ddde 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -169,6 +169,7 @@ struct scsi_device { sdev_dev; struct execute_work ew; /* used to get process context on put */ + struct work_struct requeue_work; struct scsi_dh_data *scsi_dh_data; enum scsi_device_state sdev_state; diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h index e3615c09374..9fe3a36646e 100644 --- a/include/trace/events/gfpflags.h +++ b/include/trace/events/gfpflags.h @@ -10,6 +10,7 @@ */ #define show_gfp_flags(flags) \ (flags) ? __print_flags(flags, "|", \ + {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \ {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ {(unsigned long)GFP_USER, "GFP_USER"}, \ @@ -32,6 +33,9 @@ {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ - {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ + {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \ + {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \ + {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \ + {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \ ) : "GFP_NOWAIT" diff --git a/init/Kconfig b/init/Kconfig index d886b1e9278..7a71e0a9992 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1226,7 +1226,6 @@ config SLAB per cpu and per node queues. config SLUB - depends on BROKEN || NUMA || !DISCONTIGMEM bool "SLUB (Unqueued Allocator)" help SLUB is a slab allocator that minimizes cache line usage diff --git a/kernel/capability.c b/kernel/capability.c index bf0c734d0c1..32a80e08ff4 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -399,3 +399,15 @@ bool task_ns_capable(struct task_struct *t, int cap) return ns_capable(task_cred_xxx(t, user)->user_ns, cap); } EXPORT_SYMBOL(task_ns_capable); + +/** + * nsown_capable - Check superior capability to one's own user_ns + * @cap: The capability in question + * + * Return true if the current task has the given superior capability + * targeted at its own user namespace. + */ +bool nsown_capable(int cap) +{ + return ns_capable(current_user_ns(), cap); +} diff --git a/kernel/cred.c b/kernel/cred.c index 5557b55048d..8093c16b84b 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -54,6 +54,7 @@ struct cred init_cred = { .cap_effective = CAP_INIT_EFF_SET, .cap_bset = CAP_INIT_BSET, .user = INIT_USER, + .user_ns = &init_user_ns, .group_info = &init_groups, #ifdef CONFIG_KEYS .tgcred = &init_tgcred, @@ -410,6 +411,11 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) goto error_put; } + /* cache user_ns in cred. Doesn't need a refcount because it will + * stay pinned by cred->user + */ + new->user_ns = new->user->user_ns; + #ifdef CONFIG_KEYS /* new threads get their own thread keyrings if their parent already * had one */ @@ -741,12 +747,6 @@ int set_create_files_as(struct cred *new, struct inode *inode) } EXPORT_SYMBOL(set_create_files_as); -struct user_namespace *current_user_ns(void) -{ - return _current_user_ns(); -} -EXPORT_SYMBOL(current_user_ns); - #ifdef CONFIG_DEBUG_CREDENTIALS bool creds_are_invalid(const struct cred *cred) diff --git a/kernel/exit.c b/kernel/exit.c index f5d2f63bae0..8dd87418154 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1016,7 +1016,7 @@ NORET_TYPE void do_exit(long code) /* * FIXME: do that only when needed, using sched_exit tracepoint */ - flush_ptrace_hw_breakpoint(tsk); + ptrace_put_breakpoints(tsk); exit_notify(tsk, group_dead); #ifdef CONFIG_NUMA diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 8935369d503..6275970b218 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -216,7 +216,6 @@ int suspend_devices_and_enter(suspend_state_t state) goto Close; } suspend_console(); - pm_restrict_gfp_mask(); suspend_test_start(); error = dpm_suspend_start(PMSG_SUSPEND); if (error) { @@ -233,7 +232,6 @@ int suspend_devices_and_enter(suspend_state_t state) suspend_test_start(); dpm_resume_end(PMSG_RESUME); suspend_test_finish("resume devices"); - pm_restore_gfp_mask(); resume_console(); Close: if (suspend_ops->end) @@ -294,7 +292,9 @@ int enter_state(suspend_state_t state) goto Finish; pr_debug("PM: Entering %s sleep\n", pm_states[state]); + pm_restrict_gfp_mask(); error = suspend_devices_and_enter(state); + pm_restore_gfp_mask(); Finish: pr_debug("PM: Finishing wakeup.\n"); diff --git a/kernel/power/user.c b/kernel/power/user.c index c36c3b9e8a8..7d02d33be69 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -135,8 +135,10 @@ static int snapshot_release(struct inode *inode, struct file *filp) free_basic_memory_bitmaps(); data = filp->private_data; free_all_swap_pages(data->swap); - if (data->frozen) + if (data->frozen) { + pm_restore_gfp_mask(); thaw_processes(); + } pm_notifier_call_chain(data->mode == O_RDONLY ? PM_POST_HIBERNATION : PM_POST_RESTORE); atomic_inc(&snapshot_device_available); @@ -379,6 +381,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, * PM_HIBERNATION_PREPARE */ error = suspend_devices_and_enter(PM_SUSPEND_MEM); + data->ready = 0; break; case SNAPSHOT_PLATFORM_SUPPORT: diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 0fc1eed28d2..dc7ab65f3b3 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -22,6 +22,7 @@ #include <linux/syscalls.h> #include <linux/uaccess.h> #include <linux/regset.h> +#include <linux/hw_breakpoint.h> /* @@ -879,3 +880,19 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, return ret; } #endif /* CONFIG_COMPAT */ + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +int ptrace_get_breakpoints(struct task_struct *tsk) +{ + if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt)) + return 0; + + return -1; +} + +void ptrace_put_breakpoints(struct task_struct *tsk) +{ + if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt)) + flush_ptrace_hw_breakpoint(tsk); +} +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 6519cf62d9c..0e17c10f8a9 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -685,8 +685,8 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) /* Add clocksource to the clcoksource list */ mutex_lock(&clocksource_mutex); clocksource_enqueue(cs); - clocksource_select(); clocksource_enqueue_watchdog(cs); + clocksource_select(); mutex_unlock(&clocksource_mutex); return 0; } @@ -706,8 +706,8 @@ int clocksource_register(struct clocksource *cs) mutex_lock(&clocksource_mutex); clocksource_enqueue(cs); - clocksource_select(); clocksource_enqueue_watchdog(cs); + clocksource_select(); mutex_unlock(&clocksource_mutex); return 0; } diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index da800ffa810..723c7637e55 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -522,10 +522,11 @@ static void tick_broadcast_init_next_event(struct cpumask *mask, */ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { + int cpu = smp_processor_id(); + /* Set it up only once ! */ if (bc->event_handler != tick_handle_oneshot_broadcast) { int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; - int cpu = smp_processor_id(); bc->event_handler = tick_handle_oneshot_broadcast; clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); @@ -551,6 +552,15 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) tick_broadcast_set_event(tick_next_period, 1); } else bc->next_event.tv64 = KTIME_MAX; + } else { + /* + * The first cpu which switches to oneshot mode sets + * the bit for all other cpus which are in the general + * (periodic) broadcast mask. So the bit is set and + * would prevent the first broadcast enter after this + * to program the bc device. + */ + tick_broadcast_clear_oneshot(cpu); } } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d38c16a06a6..1cb49be7c7f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1110,6 +1110,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, entry->preempt_count = pc & 0xff; entry->pid = (tsk) ? tsk->pid : 0; + entry->padding = 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e88f74fe1d4..2fe11034135 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -116,6 +116,7 @@ static int trace_define_common_fields(void) __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); + __common_field(int, padding); return ret; } diff --git a/lib/vsprintf.c b/lib/vsprintf.c index bc0ac6b333d..dfd60192bc2 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -797,7 +797,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr, return string(buf, end, uuid, spec); } -int kptr_restrict = 1; +int kptr_restrict __read_mostly; /* * Show a '%p' thing. A kernel extension is that the '%p' is followed diff --git a/mm/memory.c b/mm/memory.c index 27f42537811..61e66f02656 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1412,9 +1412,8 @@ no_page_table: static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) { - return (vma->vm_flags & VM_GROWSDOWN) && - (vma->vm_start == addr) && - !vma_stack_continue(vma->vm_prev, addr); + return stack_guard_page_start(vma, addr) || + stack_guard_page_end(vma, addr+PAGE_SIZE); } /** @@ -1551,12 +1550,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, continue; } - /* - * For mlock, just skip the stack guard page. - */ - if ((gup_flags & FOLL_MLOCK) && stack_guard_page(vma, start)) - goto next_page; - do { struct page *page; unsigned int foll_flags = gup_flags; @@ -1573,6 +1566,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int ret; unsigned int fault_flags = 0; + /* For mlock, just skip the stack guard page. */ + if (foll_flags & FOLL_MLOCK) { + if (stack_guard_page(vma, start)) + goto next_page; + } if (foll_flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (nonblocking) diff --git a/mm/mmap.c b/mm/mmap.c index e27e0cf0de0..772140c53ab 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1767,10 +1767,13 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) size = address - vma->vm_start; grow = (address - vma->vm_end) >> PAGE_SHIFT; - error = acct_stack_growth(vma, size, grow); - if (!error) { - vma->vm_end = address; - perf_event_mmap(vma); + error = -ENOMEM; + if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { + error = acct_stack_growth(vma, size, grow); + if (!error) { + vma->vm_end = address; + perf_event_mmap(vma); + } } } vma_unlock_anon_vma(vma); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9f8a97b9a35..3f8bce264df 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2317,6 +2317,21 @@ void free_pages(unsigned long addr, unsigned int order) EXPORT_SYMBOL(free_pages); +static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) +{ + if (addr) { + unsigned long alloc_end = addr + (PAGE_SIZE << order); + unsigned long used = addr + PAGE_ALIGN(size); + + split_page(virt_to_page((void *)addr), order); + while (used < alloc_end) { + free_page(used); + used += PAGE_SIZE; + } + } + return (void *)addr; +} + /** * alloc_pages_exact - allocate an exact number physically-contiguous pages. * @size: the number of bytes to allocate @@ -2336,22 +2351,33 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) unsigned long addr; addr = __get_free_pages(gfp_mask, order); - if (addr) { - unsigned long alloc_end = addr + (PAGE_SIZE << order); - unsigned long used = addr + PAGE_ALIGN(size); - - split_page(virt_to_page((void *)addr), order); - while (used < alloc_end) { - free_page(used); - used += PAGE_SIZE; - } - } - - return (void *)addr; + return make_alloc_exact(addr, order, size); } EXPORT_SYMBOL(alloc_pages_exact); /** + * alloc_pages_exact_nid - allocate an exact number of physically-contiguous + * pages on a node. + * @nid: the preferred node ID where memory should be allocated + * @size: the number of bytes to allocate + * @gfp_mask: GFP flags for the allocation + * + * Like alloc_pages_exact(), but try to allocate on node nid first before falling + * back. + * Note this is not alloc_pages_exact_node() which allocates on a specific node, + * but is not exact. + */ +void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) +{ + unsigned order = get_order(size); + struct page *p = alloc_pages_node(nid, gfp_mask, order); + if (!p) + return NULL; + return make_alloc_exact((unsigned long)page_address(p), order, size); +} +EXPORT_SYMBOL(alloc_pages_exact_nid); + +/** * free_pages_exact - release memory allocated via alloc_pages_exact() * @virt: the value returned by alloc_pages_exact. * @size: size of allocation, same value as passed to alloc_pages_exact(). @@ -3564,7 +3590,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) if (!slab_is_available()) { zone->wait_table = (wait_queue_head_t *) - alloc_bootmem_node(pgdat, alloc_size); + alloc_bootmem_node_nopanic(pgdat, alloc_size); } else { /* * This case means that a zone whose size was 0 gets new memory @@ -4141,7 +4167,8 @@ static void __init setup_usemap(struct pglist_data *pgdat, unsigned long usemapsize = usemap_size(zonesize); zone->pageblock_flags = NULL; if (usemapsize) - zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); + zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, + usemapsize); } #else static inline void setup_usemap(struct pglist_data *pgdat, @@ -4307,7 +4334,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) size = (end - start) * sizeof(struct page); map = alloc_remap(pgdat->node_id, size); if (!map) - map = alloc_bootmem_node(pgdat, size); + map = alloc_bootmem_node_nopanic(pgdat, size); pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); } #ifndef CONFIG_NEED_MULTIPLE_NODES diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 99055010cec..2daadc322ba 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -134,7 +134,7 @@ static void *__init_refok alloc_page_cgroup(size_t size, int nid) { void *addr = NULL; - addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_NOWARN); + addr = alloc_pages_exact_nid(nid, size, GFP_KERNEL | __GFP_NOWARN); if (addr) return addr; diff --git a/mm/shmem.c b/mm/shmem.c index 8fa27e4e582..dfc7069102e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -852,7 +852,7 @@ static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) { - struct inode *inode; + struct address_space *mapping; unsigned long idx; unsigned long size; unsigned long limit; @@ -875,8 +875,10 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s if (size > SHMEM_NR_DIRECT) size = SHMEM_NR_DIRECT; offset = shmem_find_swp(entry, ptr, ptr+size); - if (offset >= 0) + if (offset >= 0) { + shmem_swp_balance_unmap(); goto found; + } if (!info->i_indirect) goto lost2; @@ -914,11 +916,11 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s if (size > ENTRIES_PER_PAGE) size = ENTRIES_PER_PAGE; offset = shmem_find_swp(entry, ptr, ptr+size); - shmem_swp_unmap(ptr); if (offset >= 0) { shmem_dir_unmap(dir); goto found; } + shmem_swp_unmap(ptr); } } lost1: @@ -928,8 +930,7 @@ lost2: return 0; found: idx += offset; - inode = igrab(&info->vfs_inode); - spin_unlock(&info->lock); + ptr += offset; /* * Move _head_ to start search for next from here. @@ -940,37 +941,18 @@ found: */ if (shmem_swaplist.next != &info->swaplist) list_move_tail(&shmem_swaplist, &info->swaplist); - mutex_unlock(&shmem_swaplist_mutex); - error = 1; - if (!inode) - goto out; /* - * Charge page using GFP_KERNEL while we can wait. - * Charged back to the user(not to caller) when swap account is used. - * add_to_page_cache() will be called with GFP_NOWAIT. + * We rely on shmem_swaplist_mutex, not only to protect the swaplist, + * but also to hold up shmem_evict_inode(): so inode cannot be freed + * beneath us (pagelock doesn't help until the page is in pagecache). */ - error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); - if (error) - goto out; - error = radix_tree_preload(GFP_KERNEL); - if (error) { - mem_cgroup_uncharge_cache_page(page); - goto out; - } - error = 1; - - spin_lock(&info->lock); - ptr = shmem_swp_entry(info, idx, NULL); - if (ptr && ptr->val == entry.val) { - error = add_to_page_cache_locked(page, inode->i_mapping, - idx, GFP_NOWAIT); - /* does mem_cgroup_uncharge_cache_page on error */ - } else /* we must compensate for our precharge above */ - mem_cgroup_uncharge_cache_page(page); + mapping = info->vfs_inode.i_mapping; + error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT); + /* which does mem_cgroup_uncharge_cache_page on error */ if (error == -EEXIST) { - struct page *filepage = find_get_page(inode->i_mapping, idx); + struct page *filepage = find_get_page(mapping, idx); error = 1; if (filepage) { /* @@ -990,14 +972,8 @@ found: swap_free(entry); error = 1; /* not an error, but entry was found */ } - if (ptr) - shmem_swp_unmap(ptr); + shmem_swp_unmap(ptr); spin_unlock(&info->lock); - radix_tree_preload_end(); -out: - unlock_page(page); - page_cache_release(page); - iput(inode); /* allows for NULL */ return error; } @@ -1009,6 +985,26 @@ int shmem_unuse(swp_entry_t entry, struct page *page) struct list_head *p, *next; struct shmem_inode_info *info; int found = 0; + int error; + + /* + * Charge page using GFP_KERNEL while we can wait, before taking + * the shmem_swaplist_mutex which might hold up shmem_writepage(). + * Charged back to the user (not to caller) when swap account is used. + * add_to_page_cache() will be called with GFP_NOWAIT. + */ + error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); + if (error) + goto out; + /* + * Try to preload while we can wait, to not make a habit of + * draining atomic reserves; but don't latch on to this cpu, + * it's okay if sometimes we get rescheduled after this. + */ + error = radix_tree_preload(GFP_KERNEL); + if (error) + goto uncharge; + radix_tree_preload_end(); mutex_lock(&shmem_swaplist_mutex); list_for_each_safe(p, next, &shmem_swaplist) { @@ -1016,17 +1012,19 @@ int shmem_unuse(swp_entry_t entry, struct page *page) found = shmem_unuse_inode(info, entry, page); cond_resched(); if (found) - goto out; + break; } mutex_unlock(&shmem_swaplist_mutex); - /* - * Can some race bring us here? We've been holding page lock, - * so I think not; but would rather try again later than BUG() - */ + +uncharge: + if (!found) + mem_cgroup_uncharge_cache_page(page); + if (found < 0) + error = found; +out: unlock_page(page); page_cache_release(page); -out: - return (found < 0) ? found : 0; + return error; } /* @@ -1064,7 +1062,25 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) else swap.val = 0; + /* + * Add inode to shmem_unuse()'s list of swapped-out inodes, + * if it's not already there. Do it now because we cannot take + * mutex while holding spinlock, and must do so before the page + * is moved to swap cache, when its pagelock no longer protects + * the inode from eviction. But don't unlock the mutex until + * we've taken the spinlock, because shmem_unuse_inode() will + * prune a !swapped inode from the swaplist under both locks. + */ + if (swap.val) { + mutex_lock(&shmem_swaplist_mutex); + if (list_empty(&info->swaplist)) + list_add_tail(&info->swaplist, &shmem_swaplist); + } + spin_lock(&info->lock); + if (swap.val) + mutex_unlock(&shmem_swaplist_mutex); + if (index >= info->next_index) { BUG_ON(!(info->flags & SHMEM_TRUNCATE)); goto unlock; @@ -1084,21 +1100,10 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) delete_from_page_cache(page); shmem_swp_set(info, entry, swap.val); shmem_swp_unmap(entry); - if (list_empty(&info->swaplist)) - inode = igrab(inode); - else - inode = NULL; spin_unlock(&info->lock); swap_shmem_alloc(swap); BUG_ON(page_mapped(page)); swap_writepage(page, wbc); - if (inode) { - mutex_lock(&shmem_swaplist_mutex); - /* move instead of add in case we're racing */ - list_move_tail(&info->swaplist, &shmem_swaplist); - mutex_unlock(&shmem_swaplist_mutex); - iput(inode); - } return 0; } @@ -1400,20 +1405,14 @@ repeat: if (sbinfo->max_blocks) { if (percpu_counter_compare(&sbinfo->used_blocks, sbinfo->max_blocks) >= 0 || - shmem_acct_block(info->flags)) { - spin_unlock(&info->lock); - error = -ENOSPC; - goto failed; - } + shmem_acct_block(info->flags)) + goto nospace; percpu_counter_inc(&sbinfo->used_blocks); spin_lock(&inode->i_lock); inode->i_blocks += BLOCKS_PER_PAGE; spin_unlock(&inode->i_lock); - } else if (shmem_acct_block(info->flags)) { - spin_unlock(&info->lock); - error = -ENOSPC; - goto failed; - } + } else if (shmem_acct_block(info->flags)) + goto nospace; if (!filepage) { int ret; @@ -1493,6 +1492,24 @@ done: error = 0; goto out; +nospace: + /* + * Perhaps the page was brought in from swap between find_lock_page + * and taking info->lock? We allow for that at add_to_page_cache_lru, + * but must also avoid reporting a spurious ENOSPC while working on a + * full tmpfs. (When filepage has been passed in to shmem_getpage, it + * is already in page cache, which prevents this race from occurring.) + */ + if (!filepage) { + struct page *page = find_get_page(mapping, idx); + if (page) { + spin_unlock(&info->lock); + page_cache_release(page); + goto repeat; + } + } + spin_unlock(&info->lock); + error = -ENOSPC; failed: if (*pagep != filepage) { unlock_page(filepage); diff --git a/mm/swap.c b/mm/swap.c index a448db377cb..5602f1a1b1e 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -396,6 +396,9 @@ static void lru_deactivate_fn(struct page *page, void *arg) if (!PageLRU(page)) return; + if (PageUnevictable(page)) + return; + /* Some processes are using the page */ if (page_mapped(page)) return; diff --git a/mm/vmscan.c b/mm/vmscan.c index f6b435c8007..8bfd45050a6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -937,7 +937,7 @@ keep_lumpy: * back off and wait for congestion to clear because further reclaim * will encounter the same problem */ - if (nr_dirty == nr_congested && nr_dirty != 0) + if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc)) zone_set_flag(zone, ZONE_CONGESTED); free_page_list(&free_pages); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 7850412f52b..0eb1a886b37 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -124,6 +124,9 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) grp->nr_vlans--; + if (vlan->flags & VLAN_FLAG_GVRP) + vlan_gvrp_request_leave(dev); + vlan_group_set_device(grp, vlan_id, NULL); if (!grp->killall) synchronize_net(); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index e34ea9e5e28..b2ff6c8d360 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -487,9 +487,6 @@ static int vlan_dev_stop(struct net_device *dev) struct vlan_dev_info *vlan = vlan_dev_info(dev); struct net_device *real_dev = vlan->real_dev; - if (vlan->flags & VLAN_FLAG_GVRP) - vlan_gvrp_request_leave(dev); - dev_mc_unsync(real_dev, dev); dev_uc_unsync(real_dev, dev); if (dev->flags & IFF_ALLMULTI) diff --git a/net/9p/client.c b/net/9p/client.c index 77367745be9..a9aa2dd6648 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -614,7 +614,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) err = c->trans_mod->request(c, req); if (err < 0) { - if (err != -ERESTARTSYS) + if (err != -ERESTARTSYS && err != -EFAULT) c->status = Disconnected; goto reterr; } diff --git a/net/9p/protocol.c b/net/9p/protocol.c index b58a501cf3d..a873277cb99 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c @@ -674,6 +674,7 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent, } strcpy(dirent->d_name, nameptr); + kfree(nameptr); out: return fake_pdu.offset; diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c index e883172f9aa..9a70ebdec56 100644 --- a/net/9p/trans_common.c +++ b/net/9p/trans_common.c @@ -63,7 +63,7 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len, int nr_pages, u8 rw) { uint32_t first_page_bytes = 0; - uint32_t pdata_mapped_pages; + int32_t pdata_mapped_pages; struct trans_rpage_info *rpinfo; *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1); @@ -75,14 +75,9 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len, rpinfo = req->tc->private; pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf, nr_pages, rw, &rpinfo->rp_data[0]); + if (pdata_mapped_pages <= 0) + return pdata_mapped_pages; - if (pdata_mapped_pages < 0) { - printk(KERN_ERR "get_user_pages_fast failed:%d udata:%p" - "nr_pages:%d\n", pdata_mapped_pages, - req->tc->pubuf, nr_pages); - pdata_mapped_pages = 0; - return -EIO; - } rpinfo->rp_nr_pages = pdata_mapped_pages; if (*pdata_off) { *pdata_len = first_page_bytes; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 94954c74f6a..42fdffd1d76 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -369,15 +369,6 @@ static void __sco_sock_close(struct sock *sk) case BT_CONNECTED: case BT_CONFIG: - if (sco_pi(sk)->conn) { - sk->sk_state = BT_DISCONN; - sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); - hci_conn_put(sco_pi(sk)->conn->hcon); - sco_pi(sk)->conn = NULL; - } else - sco_chan_del(sk, ECONNRESET); - break; - case BT_CONNECT: case BT_DISCONN: sco_chan_del(sk, ECONNRESET); diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index f3bc322c589..74ef4d4846a 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -737,7 +737,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb, nf_bridge->mask |= BRNF_PKT_TYPE; } - if (br_parse_ip_options(skb)) + if (pf == PF_INET && br_parse_ip_options(skb)) return NF_DROP; /* The physdev module checks on this */ diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 893669caa8d..1a92b369c82 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1766,7 +1766,7 @@ static int compat_table_info(const struct ebt_table_info *info, newinfo->entries_size = size; - xt_compat_init_offsets(AF_INET, info->nentries); + xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries); return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, entries, newinfo); } @@ -1882,7 +1882,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, struct xt_match *match; struct xt_target *wt; void *dst = NULL; - int off, pad = 0, ret = 0; + int off, pad = 0; unsigned int size_kern, entry_offset, match_size = mwt->match_size; strlcpy(name, mwt->u.name, sizeof(name)); @@ -1935,13 +1935,6 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, break; } - if (!dst) { - ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, - off + ebt_compat_entry_padsize()); - if (ret < 0) - return ret; - } - state->buf_kern_offset += match_size + off; state->buf_user_offset += match_size; pad = XT_ALIGN(size_kern) - size_kern; @@ -2016,50 +2009,6 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, return growth; } -#define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \ -({ \ - unsigned int __i; \ - int __ret = 0; \ - struct compat_ebt_entry_mwt *__watcher; \ - \ - for (__i = e->watchers_offset; \ - __i < (e)->target_offset; \ - __i += __watcher->watcher_size + \ - sizeof(struct compat_ebt_entry_mwt)) { \ - __watcher = (void *)(e) + __i; \ - __ret = fn(__watcher , ## args); \ - if (__ret != 0) \ - break; \ - } \ - if (__ret == 0) { \ - if (__i != (e)->target_offset) \ - __ret = -EINVAL; \ - } \ - __ret; \ -}) - -#define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \ -({ \ - unsigned int __i; \ - int __ret = 0; \ - struct compat_ebt_entry_mwt *__match; \ - \ - for (__i = sizeof(struct ebt_entry); \ - __i < (e)->watchers_offset; \ - __i += __match->match_size + \ - sizeof(struct compat_ebt_entry_mwt)) { \ - __match = (void *)(e) + __i; \ - __ret = fn(__match , ## args); \ - if (__ret != 0) \ - break; \ - } \ - if (__ret == 0) { \ - if (__i != (e)->watchers_offset) \ - __ret = -EINVAL; \ - } \ - __ret; \ -}) - /* called for all ebt_entry structures. */ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, unsigned int *total, @@ -2132,6 +2081,14 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, } } + if (state->buf_kern_start == NULL) { + unsigned int offset = buf_start - (char *) base; + + ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset); + if (ret < 0) + return ret; + } + startoff = state->buf_user_offset - startoff; BUG_ON(*total < startoff); @@ -2240,6 +2197,7 @@ static int compat_do_replace(struct net *net, void __user *user, xt_compat_lock(NFPROTO_BRIDGE); + xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); if (ret < 0) goto out_unlock; diff --git a/net/core/dev.c b/net/core/dev.c index 856b6ee9a1d..b624fe4d9bd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1284,11 +1284,13 @@ static int dev_close_many(struct list_head *head) */ int dev_close(struct net_device *dev) { - LIST_HEAD(single); + if (dev->flags & IFF_UP) { + LIST_HEAD(single); - list_add(&dev->unreg_list, &single); - dev_close_many(&single); - list_del(&single); + list_add(&dev->unreg_list, &single); + dev_close_many(&single); + list_del(&single); + } return 0; } EXPORT_SYMBOL(dev_close); @@ -5184,27 +5186,27 @@ u32 netdev_fix_features(struct net_device *dev, u32 features) /* Fix illegal checksum combinations */ if ((features & NETIF_F_HW_CSUM) && (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { - netdev_info(dev, "mixed HW and IP checksum settings.\n"); + netdev_warn(dev, "mixed HW and IP checksum settings.\n"); features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); } if ((features & NETIF_F_NO_CSUM) && (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { - netdev_info(dev, "mixed no checksumming and other settings.\n"); + netdev_warn(dev, "mixed no checksumming and other settings.\n"); features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); } /* Fix illegal SG+CSUM combinations. */ if ((features & NETIF_F_SG) && !(features & NETIF_F_ALL_CSUM)) { - netdev_info(dev, - "Dropping NETIF_F_SG since no checksum feature.\n"); + netdev_dbg(dev, + "Dropping NETIF_F_SG since no checksum feature.\n"); features &= ~NETIF_F_SG; } /* TSO requires that SG is present as well. */ if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { - netdev_info(dev, "Dropping TSO features since no SG feature.\n"); + netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); features &= ~NETIF_F_ALL_TSO; } @@ -5214,7 +5216,7 @@ u32 netdev_fix_features(struct net_device *dev, u32 features) /* Software GSO depends on SG. */ if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { - netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); + netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); features &= ~NETIF_F_GSO; } @@ -5224,13 +5226,13 @@ u32 netdev_fix_features(struct net_device *dev, u32 features) if (!((features & NETIF_F_GEN_CSUM) || (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { - netdev_info(dev, + netdev_dbg(dev, "Dropping NETIF_F_UFO since no checksum offload features.\n"); features &= ~NETIF_F_UFO; } if (!(features & NETIF_F_SG)) { - netdev_info(dev, + netdev_dbg(dev, "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); features &= ~NETIF_F_UFO; } @@ -5412,12 +5414,6 @@ int register_netdevice(struct net_device *dev) dev->features |= NETIF_F_SOFT_FEATURES; dev->wanted_features = dev->features & dev->hw_features; - /* Avoid warning from netdev_fix_features() for GSO without SG */ - if (!(dev->wanted_features & NETIF_F_SG)) { - dev->wanted_features &= ~NETIF_F_GSO; - dev->features &= ~NETIF_F_GSO; - } - /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, * vlan_dev_init() will do the dev->features check, so these features * are enabled only if supported by underlying device. diff --git a/net/dccp/options.c b/net/dccp/options.c index f06ffcfc8d7..4b2ab657ac8 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -123,6 +123,8 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, case DCCPO_CHANGE_L ... DCCPO_CONFIRM_R: if (pkt_type == DCCP_PKT_DATA) /* RFC 4340, 6 */ break; + if (len == 0) + goto out_invalid_option; rc = dccp_feat_parse_options(sk, dreq, mandatory, opt, *value, value + 1, len - 1); if (rc) diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index a1151b8adf3..b1d282f11be 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -223,31 +223,30 @@ static void ip_expire(unsigned long arg) if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { struct sk_buff *head = qp->q.fragments; + const struct iphdr *iph; + int err; rcu_read_lock(); head->dev = dev_get_by_index_rcu(net, qp->iif); if (!head->dev) goto out_rcu_unlock; + /* skb dst is stale, drop it, and perform route lookup again */ + skb_dst_drop(head); + iph = ip_hdr(head); + err = ip_route_input_noref(head, iph->daddr, iph->saddr, + iph->tos, head->dev); + if (err) + goto out_rcu_unlock; + /* - * Only search router table for the head fragment, - * when defraging timeout at PRE_ROUTING HOOK. + * Only an end host needs to send an ICMP + * "Fragment Reassembly Timeout" message, per RFC792. */ - if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) { - const struct iphdr *iph = ip_hdr(head); - int err = ip_route_input(head, iph->daddr, iph->saddr, - iph->tos, head->dev); - if (unlikely(err)) - goto out_rcu_unlock; - - /* - * Only an end host needs to send an ICMP - * "Fragment Reassembly Timeout" message, per RFC792. - */ - if (skb_rtable(head)->rt_type != RTN_LOCAL) - goto out_rcu_unlock; + if (qp->user == IP_DEFRAG_CONNTRACK_IN && + skb_rtable(head)->rt_type != RTN_LOCAL) + goto out_rcu_unlock; - } /* Send an ICMP "Fragment Reassembly Timeout" message. */ icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 34340c9c95f..f376b05cca8 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -93,6 +93,7 @@ struct bictcp { u32 ack_cnt; /* number of acks */ u32 tcp_cwnd; /* estimated tcp cwnd */ #define ACK_RATIO_SHIFT 4 +#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT) u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ u8 sample_cnt; /* number of samples to decide curr_rtt */ u8 found; /* the exit point is found? */ @@ -398,8 +399,12 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) u32 delay; if (icsk->icsk_ca_state == TCP_CA_Open) { - cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; - ca->delayed_ack += cnt; + u32 ratio = ca->delayed_ack; + + ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT; + ratio += cnt; + + ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT); } /* Some calls are for duplicates without timetamps */ diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 571aa96a175..2d51840e53a 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -69,7 +69,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb) } EXPORT_SYMBOL(xfrm4_prepare_output); -static int xfrm4_output_finish(struct sk_buff *skb) +int xfrm4_output_finish(struct sk_buff *skb) { #ifdef CONFIG_NETFILTER if (!skb_dst(skb)->xfrm) { @@ -86,7 +86,11 @@ static int xfrm4_output_finish(struct sk_buff *skb) int xfrm4_output(struct sk_buff *skb) { + struct dst_entry *dst = skb_dst(skb); + struct xfrm_state *x = dst->xfrm; + return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, - NULL, skb_dst(skb)->dev, xfrm4_output_finish, + NULL, dst->dev, + x->outer_mode->afinfo->output_finish, !(IPCB(skb)->flags & IPSKB_REROUTED)); } diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c index 1717c64628d..805d63ef434 100644 --- a/net/ipv4/xfrm4_state.c +++ b/net/ipv4/xfrm4_state.c @@ -78,6 +78,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = { .init_tempsel = __xfrm4_init_tempsel, .init_temprop = xfrm4_init_temprop, .output = xfrm4_output, + .output_finish = xfrm4_output_finish, .extract_input = xfrm4_extract_input, .extract_output = xfrm4_extract_output, .transport_finish = xfrm4_transport_finish, diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index 28e74488a32..a5a4c5dd539 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c @@ -45,6 +45,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) int tcphoff, needs_ack; const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); struct ipv6hdr *ip6h; +#define DEFAULT_TOS_VALUE 0x0U + const __u8 tclass = DEFAULT_TOS_VALUE; struct dst_entry *dst = NULL; u8 proto; struct flowi6 fl6; @@ -124,7 +126,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) skb_put(nskb, sizeof(struct ipv6hdr)); skb_reset_network_header(nskb); ip6h = ipv6_hdr(nskb); - ip6h->version = 6; + *(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20)); ip6h->hop_limit = ip6_dst_hoplimit(dst); ip6h->nexthdr = IPPROTO_TCP; ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr); diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 8e688b3de9a..49a91c5f562 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -79,7 +79,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) } EXPORT_SYMBOL(xfrm6_prepare_output); -static int xfrm6_output_finish(struct sk_buff *skb) +int xfrm6_output_finish(struct sk_buff *skb) { #ifdef CONFIG_NETFILTER IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; @@ -97,9 +97,9 @@ static int __xfrm6_output(struct sk_buff *skb) if ((x && x->props.mode == XFRM_MODE_TUNNEL) && ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || dst_allfrag(skb_dst(skb)))) { - return ip6_fragment(skb, xfrm6_output_finish); + return ip6_fragment(skb, x->outer_mode->afinfo->output_finish); } - return xfrm6_output_finish(skb); + return x->outer_mode->afinfo->output_finish(skb); } int xfrm6_output(struct sk_buff *skb) diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c index afe941e9415..248f0b2a7ee 100644 --- a/net/ipv6/xfrm6_state.c +++ b/net/ipv6/xfrm6_state.c @@ -178,6 +178,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = { .tmpl_sort = __xfrm6_tmpl_sort, .state_sort = __xfrm6_state_sort, .output = xfrm6_output, + .output_finish = xfrm6_output_finish, .extract_input = xfrm6_extract_input, .extract_output = xfrm6_extract_output, .transport_finish = xfrm6_transport_finish, diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index ce4596ed126..bd1224fd216 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -237,6 +237,10 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) &local->dynamic_ps_disable_work); } + /* Don't restart the timer if we're not disassociated */ + if (!ifmgd->associated) + return TX_CONTINUE; + mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c index 2dc6de13ac1..059af3120be 100644 --- a/net/netfilter/ipvs/ip_vs_app.c +++ b/net/netfilter/ipvs/ip_vs_app.c @@ -572,11 +572,11 @@ static const struct file_operations ip_vs_app_fops = { .open = ip_vs_app_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_net, }; #endif -static int __net_init __ip_vs_app_init(struct net *net) +int __net_init __ip_vs_app_init(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -585,26 +585,17 @@ static int __net_init __ip_vs_app_init(struct net *net) return 0; } -static void __net_exit __ip_vs_app_cleanup(struct net *net) +void __net_exit __ip_vs_app_cleanup(struct net *net) { proc_net_remove(net, "ip_vs_app"); } -static struct pernet_operations ip_vs_app_ops = { - .init = __ip_vs_app_init, - .exit = __ip_vs_app_cleanup, -}; - int __init ip_vs_app_init(void) { - int rv; - - rv = register_pernet_subsys(&ip_vs_app_ops); - return rv; + return 0; } void ip_vs_app_cleanup(void) { - unregister_pernet_subsys(&ip_vs_app_ops); } diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index c97bd45975b..bf28ac2fc99 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -1046,7 +1046,7 @@ static const struct file_operations ip_vs_conn_fops = { .open = ip_vs_conn_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_net, }; static const char *ip_vs_origin_name(unsigned flags) @@ -1114,7 +1114,7 @@ static const struct file_operations ip_vs_conn_sync_fops = { .open = ip_vs_conn_sync_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_net, }; #endif @@ -1258,22 +1258,17 @@ int __net_init __ip_vs_conn_init(struct net *net) return 0; } -static void __net_exit __ip_vs_conn_cleanup(struct net *net) +void __net_exit __ip_vs_conn_cleanup(struct net *net) { /* flush all the connection entries first */ ip_vs_conn_flush(net); proc_net_remove(net, "ip_vs_conn"); proc_net_remove(net, "ip_vs_conn_sync"); } -static struct pernet_operations ipvs_conn_ops = { - .init = __ip_vs_conn_init, - .exit = __ip_vs_conn_cleanup, -}; int __init ip_vs_conn_init(void) { int idx; - int retc; /* Compute size and mask */ ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; @@ -1309,17 +1304,14 @@ int __init ip_vs_conn_init(void) rwlock_init(&__ip_vs_conntbl_lock_array[idx].l); } - retc = register_pernet_subsys(&ipvs_conn_ops); - /* calculate the random value for connection hash */ get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd)); - return retc; + return 0; } void ip_vs_conn_cleanup(void) { - unregister_pernet_subsys(&ipvs_conn_ops); /* Release the empty cache */ kmem_cache_destroy(ip_vs_conn_cachep); vfree(ip_vs_conn_tab); diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 07accf6b240..a74dae6c5db 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1113,6 +1113,9 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) return NF_ACCEPT; net = skb_net(skb); + if (!net_ipvs(net)->enable) + return NF_ACCEPT; + ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { @@ -1343,6 +1346,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) return NF_ACCEPT; /* The packet looks wrong, ignore */ net = skb_net(skb); + pd = ip_vs_proto_data_get(net, cih->protocol); if (!pd) return NF_ACCEPT; @@ -1529,6 +1533,11 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) IP_VS_DBG_ADDR(af, &iph.daddr), hooknum); return NF_ACCEPT; } + /* ipvs enabled in this netns ? */ + net = skb_net(skb); + if (!net_ipvs(net)->enable) + return NF_ACCEPT; + ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); /* Bad... Do not break raw sockets */ @@ -1562,7 +1571,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); } - net = skb_net(skb); /* Protocol supported? */ pd = ip_vs_proto_data_get(net, iph.protocol); if (unlikely(!pd)) @@ -1588,7 +1596,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) } IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); - net = skb_net(skb); ipvs = net_ipvs(net); /* Check the server status */ if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { @@ -1743,10 +1750,16 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb, int (*okfn)(struct sk_buff *)) { int r; + struct net *net; if (ip_hdr(skb)->protocol != IPPROTO_ICMP) return NF_ACCEPT; + /* ipvs enabled in this netns ? */ + net = skb_net(skb); + if (!net_ipvs(net)->enable) + return NF_ACCEPT; + return ip_vs_in_icmp(skb, &r, hooknum); } @@ -1757,10 +1770,16 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, int (*okfn)(struct sk_buff *)) { int r; + struct net *net; if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) return NF_ACCEPT; + /* ipvs enabled in this netns ? */ + net = skb_net(skb); + if (!net_ipvs(net)->enable) + return NF_ACCEPT; + return ip_vs_in_icmp_v6(skb, &r, hooknum); } #endif @@ -1884,19 +1903,70 @@ static int __net_init __ip_vs_init(struct net *net) pr_err("%s(): no memory.\n", __func__); return -ENOMEM; } + /* Hold the beast until a service is registerd */ + ipvs->enable = 0; ipvs->net = net; /* Counters used for creating unique names */ ipvs->gen = atomic_read(&ipvs_netns_cnt); atomic_inc(&ipvs_netns_cnt); net->ipvs = ipvs; + + if (__ip_vs_estimator_init(net) < 0) + goto estimator_fail; + + if (__ip_vs_control_init(net) < 0) + goto control_fail; + + if (__ip_vs_protocol_init(net) < 0) + goto protocol_fail; + + if (__ip_vs_app_init(net) < 0) + goto app_fail; + + if (__ip_vs_conn_init(net) < 0) + goto conn_fail; + + if (__ip_vs_sync_init(net) < 0) + goto sync_fail; + printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n", sizeof(struct netns_ipvs), ipvs->gen); return 0; +/* + * Error handling + */ + +sync_fail: + __ip_vs_conn_cleanup(net); +conn_fail: + __ip_vs_app_cleanup(net); +app_fail: + __ip_vs_protocol_cleanup(net); +protocol_fail: + __ip_vs_control_cleanup(net); +control_fail: + __ip_vs_estimator_cleanup(net); +estimator_fail: + return -ENOMEM; } static void __net_exit __ip_vs_cleanup(struct net *net) { - IP_VS_DBG(10, "ipvs netns %d released\n", net_ipvs(net)->gen); + __ip_vs_service_cleanup(net); /* ip_vs_flush() with locks */ + __ip_vs_conn_cleanup(net); + __ip_vs_app_cleanup(net); + __ip_vs_protocol_cleanup(net); + __ip_vs_control_cleanup(net); + __ip_vs_estimator_cleanup(net); + IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen); +} + +static void __net_exit __ip_vs_dev_cleanup(struct net *net) +{ + EnterFunction(2); + net_ipvs(net)->enable = 0; /* Disable packet reception */ + __ip_vs_sync_cleanup(net); + LeaveFunction(2); } static struct pernet_operations ipvs_core_ops = { @@ -1906,6 +1976,10 @@ static struct pernet_operations ipvs_core_ops = { .size = sizeof(struct netns_ipvs), }; +static struct pernet_operations ipvs_core_dev_ops = { + .exit = __ip_vs_dev_cleanup, +}; + /* * Initialize IP Virtual Server */ @@ -1913,10 +1987,6 @@ static int __init ip_vs_init(void) { int ret; - ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */ - if (ret < 0) - return ret; - ip_vs_estimator_init(); ret = ip_vs_control_init(); if (ret < 0) { @@ -1944,15 +2014,28 @@ static int __init ip_vs_init(void) goto cleanup_conn; } + ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */ + if (ret < 0) + goto cleanup_sync; + + ret = register_pernet_device(&ipvs_core_dev_ops); + if (ret < 0) + goto cleanup_sub; + ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); if (ret < 0) { pr_err("can't register hooks.\n"); - goto cleanup_sync; + goto cleanup_dev; } pr_info("ipvs loaded.\n"); + return ret; +cleanup_dev: + unregister_pernet_device(&ipvs_core_dev_ops); +cleanup_sub: + unregister_pernet_subsys(&ipvs_core_ops); cleanup_sync: ip_vs_sync_cleanup(); cleanup_conn: @@ -1964,20 +2047,20 @@ cleanup_sync: ip_vs_control_cleanup(); cleanup_estimator: ip_vs_estimator_cleanup(); - unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ return ret; } static void __exit ip_vs_cleanup(void) { nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); + unregister_pernet_device(&ipvs_core_dev_ops); + unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ ip_vs_sync_cleanup(); ip_vs_conn_cleanup(); ip_vs_app_cleanup(); ip_vs_protocol_cleanup(); ip_vs_control_cleanup(); ip_vs_estimator_cleanup(); - unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ pr_info("ipvs unloaded.\n"); } diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index ae47090bf45..37890f228b1 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -69,6 +69,11 @@ int ip_vs_get_debug_level(void) } #endif + +/* Protos */ +static void __ip_vs_del_service(struct ip_vs_service *svc); + + #ifdef CONFIG_IP_VS_IPV6 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */ static int __ip_vs_addr_is_local_v6(struct net *net, @@ -1214,6 +1219,8 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, write_unlock_bh(&__ip_vs_svc_lock); *svc_p = svc; + /* Now there is a service - full throttle */ + ipvs->enable = 1; return 0; @@ -1472,6 +1479,84 @@ static int ip_vs_flush(struct net *net) return 0; } +/* + * Delete service by {netns} in the service table. + * Called by __ip_vs_cleanup() + */ +void __ip_vs_service_cleanup(struct net *net) +{ + EnterFunction(2); + /* Check for "full" addressed entries */ + mutex_lock(&__ip_vs_mutex); + ip_vs_flush(net); + mutex_unlock(&__ip_vs_mutex); + LeaveFunction(2); +} +/* + * Release dst hold by dst_cache + */ +static inline void +__ip_vs_dev_reset(struct ip_vs_dest *dest, struct net_device *dev) +{ + spin_lock_bh(&dest->dst_lock); + if (dest->dst_cache && dest->dst_cache->dev == dev) { + IP_VS_DBG_BUF(3, "Reset dev:%s dest %s:%u ,dest->refcnt=%d\n", + dev->name, + IP_VS_DBG_ADDR(dest->af, &dest->addr), + ntohs(dest->port), + atomic_read(&dest->refcnt)); + ip_vs_dst_reset(dest); + } + spin_unlock_bh(&dest->dst_lock); + +} +/* + * Netdev event receiver + * Currently only NETDEV_UNREGISTER is handled, i.e. if we hold a reference to + * a device that is "unregister" it must be released. + */ +static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct net_device *dev = ptr; + struct net *net = dev_net(dev); + struct ip_vs_service *svc; + struct ip_vs_dest *dest; + unsigned int idx; + + if (event != NETDEV_UNREGISTER) + return NOTIFY_DONE; + IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name); + EnterFunction(2); + mutex_lock(&__ip_vs_mutex); + for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { + list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { + if (net_eq(svc->net, net)) { + list_for_each_entry(dest, &svc->destinations, + n_list) { + __ip_vs_dev_reset(dest, dev); + } + } + } + + list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { + if (net_eq(svc->net, net)) { + list_for_each_entry(dest, &svc->destinations, + n_list) { + __ip_vs_dev_reset(dest, dev); + } + } + + } + } + + list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) { + __ip_vs_dev_reset(dest, dev); + } + mutex_unlock(&__ip_vs_mutex); + LeaveFunction(2); + return NOTIFY_DONE; +} /* * Zero counters in a service or all services @@ -1981,7 +2066,7 @@ static const struct file_operations ip_vs_info_fops = { .open = ip_vs_info_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_private, + .release = seq_release_net, }; #endif @@ -2024,7 +2109,7 @@ static const struct file_operations ip_vs_stats_fops = { .open = ip_vs_stats_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = single_release, + .release = single_release_net, }; static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v) @@ -2093,7 +2178,7 @@ static const struct file_operations ip_vs_stats_percpu_fops = { .open = ip_vs_stats_percpu_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = single_release, + .release = single_release_net, }; #endif @@ -3588,6 +3673,10 @@ void __net_init __ip_vs_control_cleanup_sysctl(struct net *net) { } #endif +static struct notifier_block ip_vs_dst_notifier = { + .notifier_call = ip_vs_dst_event, +}; + int __net_init __ip_vs_control_init(struct net *net) { int idx; @@ -3626,7 +3715,7 @@ err: return -ENOMEM; } -static void __net_exit __ip_vs_control_cleanup(struct net *net) +void __net_exit __ip_vs_control_cleanup(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -3639,11 +3728,6 @@ static void __net_exit __ip_vs_control_cleanup(struct net *net) free_percpu(ipvs->tot_stats.cpustats); } -static struct pernet_operations ipvs_control_ops = { - .init = __ip_vs_control_init, - .exit = __ip_vs_control_cleanup, -}; - int __init ip_vs_control_init(void) { int idx; @@ -3657,33 +3741,32 @@ int __init ip_vs_control_init(void) INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]); } - ret = register_pernet_subsys(&ipvs_control_ops); - if (ret) { - pr_err("cannot register namespace.\n"); - goto err; - } - smp_wmb(); /* Do we really need it now ? */ ret = nf_register_sockopt(&ip_vs_sockopts); if (ret) { pr_err("cannot register sockopt.\n"); - goto err_net; + goto err_sock; } ret = ip_vs_genl_register(); if (ret) { pr_err("cannot register Generic Netlink interface.\n"); - nf_unregister_sockopt(&ip_vs_sockopts); - goto err_net; + goto err_genl; } + ret = register_netdevice_notifier(&ip_vs_dst_notifier); + if (ret < 0) + goto err_notf; + LeaveFunction(2); return 0; -err_net: - unregister_pernet_subsys(&ipvs_control_ops); -err: +err_notf: + ip_vs_genl_unregister(); +err_genl: + nf_unregister_sockopt(&ip_vs_sockopts); +err_sock: return ret; } @@ -3691,7 +3774,6 @@ err: void ip_vs_control_cleanup(void) { EnterFunction(2); - unregister_pernet_subsys(&ipvs_control_ops); ip_vs_genl_unregister(); nf_unregister_sockopt(&ip_vs_sockopts); LeaveFunction(2); diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c index 8c8766ca56a..508cce98777 100644 --- a/net/netfilter/ipvs/ip_vs_est.c +++ b/net/netfilter/ipvs/ip_vs_est.c @@ -192,7 +192,7 @@ void ip_vs_read_estimator(struct ip_vs_stats_user *dst, dst->outbps = (e->outbps + 0xF) >> 5; } -static int __net_init __ip_vs_estimator_init(struct net *net) +int __net_init __ip_vs_estimator_init(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -203,24 +203,16 @@ static int __net_init __ip_vs_estimator_init(struct net *net) return 0; } -static void __net_exit __ip_vs_estimator_exit(struct net *net) +void __net_exit __ip_vs_estimator_cleanup(struct net *net) { del_timer_sync(&net_ipvs(net)->est_timer); } -static struct pernet_operations ip_vs_app_ops = { - .init = __ip_vs_estimator_init, - .exit = __ip_vs_estimator_exit, -}; int __init ip_vs_estimator_init(void) { - int rv; - - rv = register_pernet_subsys(&ip_vs_app_ops); - return rv; + return 0; } void ip_vs_estimator_cleanup(void) { - unregister_pernet_subsys(&ip_vs_app_ops); } diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index 17484a4416e..eb86028536f 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -316,7 +316,7 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, /* * per network name-space init */ -static int __net_init __ip_vs_protocol_init(struct net *net) +int __net_init __ip_vs_protocol_init(struct net *net) { #ifdef CONFIG_IP_VS_PROTO_TCP register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); @@ -336,7 +336,7 @@ static int __net_init __ip_vs_protocol_init(struct net *net) return 0; } -static void __net_exit __ip_vs_protocol_cleanup(struct net *net) +void __net_exit __ip_vs_protocol_cleanup(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_proto_data *pd; @@ -349,11 +349,6 @@ static void __net_exit __ip_vs_protocol_cleanup(struct net *net) } } -static struct pernet_operations ipvs_proto_ops = { - .init = __ip_vs_protocol_init, - .exit = __ip_vs_protocol_cleanup, -}; - int __init ip_vs_protocol_init(void) { char protocols[64]; @@ -382,7 +377,6 @@ int __init ip_vs_protocol_init(void) REGISTER_PROTOCOL(&ip_vs_protocol_esp); #endif pr_info("Registered protocols (%s)\n", &protocols[2]); - return register_pernet_subsys(&ipvs_proto_ops); return 0; } @@ -393,7 +387,6 @@ void ip_vs_protocol_cleanup(void) struct ip_vs_protocol *pp; int i; - unregister_pernet_subsys(&ipvs_proto_ops); /* unregister all the ipvs protocols */ for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { while ((pp = ip_vs_proto_table[i]) != NULL) diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 3e7961e85e9..e292e5bddc7 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1303,13 +1303,18 @@ static struct socket *make_send_sock(struct net *net) struct socket *sock; int result; - /* First create a socket */ - result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); + /* First create a socket move it to right name space later */ + result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); return ERR_PTR(result); } - + /* + * Kernel sockets that are a part of a namespace, should not + * hold a reference to a namespace in order to allow to stop it. + * After sk_change_net should be released using sk_release_kernel. + */ + sk_change_net(sock->sk, net); result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn); if (result < 0) { pr_err("Error setting outbound mcast interface\n"); @@ -1334,8 +1339,8 @@ static struct socket *make_send_sock(struct net *net) return sock; - error: - sock_release(sock); +error: + sk_release_kernel(sock->sk); return ERR_PTR(result); } @@ -1350,12 +1355,17 @@ static struct socket *make_receive_sock(struct net *net) int result; /* First create a socket */ - result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); + result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); return ERR_PTR(result); } - + /* + * Kernel sockets that are a part of a namespace, should not + * hold a reference to a namespace in order to allow to stop it. + * After sk_change_net should be released using sk_release_kernel. + */ + sk_change_net(sock->sk, net); /* it is equivalent to the REUSEADDR option in user-space */ sock->sk->sk_reuse = 1; @@ -1377,8 +1387,8 @@ static struct socket *make_receive_sock(struct net *net) return sock; - error: - sock_release(sock); +error: + sk_release_kernel(sock->sk); return ERR_PTR(result); } @@ -1473,7 +1483,7 @@ static int sync_thread_master(void *data) ip_vs_sync_buff_release(sb); /* release the sending multicast socket */ - sock_release(tinfo->sock); + sk_release_kernel(tinfo->sock->sk); kfree(tinfo); return 0; @@ -1513,7 +1523,7 @@ static int sync_thread_backup(void *data) } /* release the sending multicast socket */ - sock_release(tinfo->sock); + sk_release_kernel(tinfo->sock->sk); kfree(tinfo->buf); kfree(tinfo); @@ -1601,7 +1611,7 @@ outtinfo: outbuf: kfree(buf); outsocket: - sock_release(sock); + sk_release_kernel(sock->sk); out: return result; } @@ -1610,6 +1620,7 @@ out: int stop_sync_thread(struct net *net, int state) { struct netns_ipvs *ipvs = net_ipvs(net); + int retc = -EINVAL; IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); @@ -1629,7 +1640,7 @@ int stop_sync_thread(struct net *net, int state) spin_lock_bh(&ipvs->sync_lock); ipvs->sync_state &= ~IP_VS_STATE_MASTER; spin_unlock_bh(&ipvs->sync_lock); - kthread_stop(ipvs->master_thread); + retc = kthread_stop(ipvs->master_thread); ipvs->master_thread = NULL; } else if (state == IP_VS_STATE_BACKUP) { if (!ipvs->backup_thread) @@ -1639,22 +1650,20 @@ int stop_sync_thread(struct net *net, int state) task_pid_nr(ipvs->backup_thread)); ipvs->sync_state &= ~IP_VS_STATE_BACKUP; - kthread_stop(ipvs->backup_thread); + retc = kthread_stop(ipvs->backup_thread); ipvs->backup_thread = NULL; - } else { - return -EINVAL; } /* decrease the module use count */ ip_vs_use_count_dec(); - return 0; + return retc; } /* * Initialize data struct for each netns */ -static int __net_init __ip_vs_sync_init(struct net *net) +int __net_init __ip_vs_sync_init(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -1668,24 +1677,24 @@ static int __net_init __ip_vs_sync_init(struct net *net) return 0; } -static void __ip_vs_sync_cleanup(struct net *net) +void __ip_vs_sync_cleanup(struct net *net) { - stop_sync_thread(net, IP_VS_STATE_MASTER); - stop_sync_thread(net, IP_VS_STATE_BACKUP); -} + int retc; -static struct pernet_operations ipvs_sync_ops = { - .init = __ip_vs_sync_init, - .exit = __ip_vs_sync_cleanup, -}; + retc = stop_sync_thread(net, IP_VS_STATE_MASTER); + if (retc && retc != -ESRCH) + pr_err("Failed to stop Master Daemon\n"); + retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); + if (retc && retc != -ESRCH) + pr_err("Failed to stop Backup Daemon\n"); +} int __init ip_vs_sync_init(void) { - return register_pernet_subsys(&ipvs_sync_ops); + return 0; } void ip_vs_sync_cleanup(void) { - unregister_pernet_subsys(&ipvs_sync_ops); } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 30bf8a167fc..482e90c6185 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1334,6 +1334,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, struct nf_conn *ct; int err = -EINVAL; struct nf_conntrack_helper *helper; + struct nf_conn_tstamp *tstamp; ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); if (IS_ERR(ct)) @@ -1451,6 +1452,9 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, __set_bit(IPS_EXPECTED_BIT, &ct->status); ct->master = master_ct; } + tstamp = nf_conn_tstamp_find(ct); + if (tstamp) + tstamp->start = ktime_to_ns(ktime_get_real()); add_timer(&ct->timeout); nf_conntrack_hash_insert(ct); diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index a9adf4c6b29..8a025a585d2 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -455,6 +455,7 @@ void xt_compat_flush_offsets(u_int8_t af) vfree(xt[af].compat_tab); xt[af].compat_tab = NULL; xt[af].number = 0; + xt[af].cur = 0; } } EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); @@ -473,8 +474,7 @@ int xt_compat_calc_jump(u_int8_t af, unsigned int offset) else return mid ? tmp[mid - 1].delta : 0; } - WARN_ON_ONCE(1); - return 0; + return left ? tmp[left - 1].delta : 0; } EXPORT_SYMBOL_GPL(xt_compat_calc_jump); diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c index 0a229191e55..ae8271652ef 100644 --- a/net/netfilter/xt_DSCP.c +++ b/net/netfilter/xt_DSCP.c @@ -99,7 +99,7 @@ tos_tg6(struct sk_buff *skb, const struct xt_action_param *par) u_int8_t orig, nv; orig = ipv6_get_dsfield(iph); - nv = (orig & info->tos_mask) ^ info->tos_value; + nv = (orig & ~info->tos_mask) ^ info->tos_value; if (orig != nv) { if (!skb_make_writable(skb, sizeof(struct iphdr))) diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index 481a86fdc40..61805d7b38a 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c @@ -272,11 +272,6 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par) { int ret; - if (strcmp(par->table, "raw") == 0) { - pr_info("state is undetermined at the time of raw table\n"); - return -EINVAL; - } - ret = nf_ct_l3proto_try_module_get(par->family); if (ret < 0) pr_info("cannot load conntrack support for proto=%u\n", diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 15792d8b627..b4d745ea8ee 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1406,6 +1406,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, struct net *net = xp_net(policy); unsigned long now = jiffies; struct net_device *dev; + struct xfrm_mode *inner_mode; struct dst_entry *dst_prev = NULL; struct dst_entry *dst0 = NULL; int i = 0; @@ -1436,6 +1437,17 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, goto put_states; } + if (xfrm[i]->sel.family == AF_UNSPEC) { + inner_mode = xfrm_ip2inner_mode(xfrm[i], + xfrm_af2proto(family)); + if (!inner_mode) { + err = -EAFNOSUPPORT; + dst_release(dst); + goto put_states; + } + } else + inner_mode = xfrm[i]->inner_mode; + if (!dst_prev) dst0 = dst1; else { @@ -1464,7 +1476,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, dst1->lastuse = now; dst1->input = dst_discard; - dst1->output = xfrm[i]->outer_mode->afinfo->output; + dst1->output = inner_mode->afinfo->output; dst1->next = dst_prev; dst_prev = dst1; diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index e8a781422fe..47f1b8638df 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -535,6 +535,9 @@ int xfrm_init_replay(struct xfrm_state *x) replay_esn->bmp_len * sizeof(__u32) * 8) return -EINVAL; + if ((x->props.flags & XFRM_STATE_ESN) && replay_esn->replay_window == 0) + return -EINVAL; + if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn) x->repl = &xfrm_replay_esn; else diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index e6e7ce0d3d5..7102457661d 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -1819,8 +1819,6 @@ static int filename_trans_read(struct policydb *p, void *fp) goto out; nel = le32_to_cpu(buf[0]); - printk(KERN_ERR "%s: nel=%d\n", __func__, nel); - last = p->filename_trans; while (last && last->next) last = last->next; @@ -1857,8 +1855,6 @@ static int filename_trans_read(struct policydb *p, void *fp) goto out; name[len] = 0; - printk(KERN_ERR "%s: ft=%p ft->name=%p ft->name=%s\n", __func__, ft, ft->name, ft->name); - rc = next_entry(buf, fp, sizeof(u32) * 4); if (rc) goto out; diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c index 2727befd158..b04d28039c1 100644 --- a/sound/soc/codecs/ssm2602.c +++ b/sound/soc/codecs/ssm2602.c @@ -139,7 +139,7 @@ SOC_DOUBLE_R("Capture Volume", SSM2602_LINVOL, SSM2602_RINVOL, 0, 31, 0), SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1), SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0), -SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 7, 1, 0), +SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 8, 1, 0), SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1), SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1), @@ -602,7 +602,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = { .read = ssm2602_read_reg_cache, .write = ssm2602_write, .set_bias_level = ssm2602_set_bias_level, - .reg_cache_size = sizeof(ssm2602_reg), + .reg_cache_size = ARRAY_SIZE(ssm2602_reg), .reg_word_size = sizeof(u16), .reg_cache_default = ssm2602_reg, }; @@ -614,7 +614,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = { * low = 0x1a * high = 0x1b */ -static int ssm2602_i2c_probe(struct i2c_client *i2c, +static int __devinit ssm2602_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct ssm2602_priv *ssm2602; @@ -635,7 +635,7 @@ static int ssm2602_i2c_probe(struct i2c_client *i2c, return ret; } -static int ssm2602_i2c_remove(struct i2c_client *client) +static int __devexit ssm2602_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); kfree(i2c_get_clientdata(client)); @@ -655,7 +655,7 @@ static struct i2c_driver ssm2602_i2c_driver = { .owner = THIS_MODULE, }, .probe = ssm2602_i2c_probe, - .remove = ssm2602_i2c_remove, + .remove = __devexit_p(ssm2602_i2c_remove), .id_table = ssm2602_i2c_id, }; #endif diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c index 48ffd406a71..a7b8f301bad 100644 --- a/sound/soc/codecs/uda134x.c +++ b/sound/soc/codecs/uda134x.c @@ -601,9 +601,7 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = { .reg_cache_step = 1, .read = uda134x_read_reg_cache, .write = uda134x_write, -#ifdef POWER_OFF_ON_STANDBY .set_bias_level = uda134x_set_bias_level, -#endif }; static int __devinit uda134x_codec_probe(struct platform_device *pdev) diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index f52b623bb69..824d1c8c8a3 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c @@ -692,7 +692,7 @@ SOC_ENUM("DRC Smoothing Threshold", drc_smoothing), SOC_SINGLE_TLV("DRC Startup Volume", WM8903_DRC_0, 6, 18, 0, drc_tlv_startup), SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT, - WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 96, 0, digital_tlv), + WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv), SOC_ENUM("ADC Companding Mode", adc_companding), SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0), diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index a5af834c8ef..4ddc6d3b667 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c @@ -434,17 +434,21 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai, mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE); - mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, (0x7 << 26)); + mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, + ACLKX | AHCLKX | AFSX); break; case SND_SOC_DAIFMT_CBM_CFS: /* codec is clock master and frame slave */ - mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE); + mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE); mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE); - mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); + mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE); - mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, (0x2d << 26)); + mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG, + ACLKX | ACLKR); + mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, + AFSX | AFSR); break; case SND_SOC_DAIFMT_CBM_CFM: /* codec is clock and frame master */ @@ -454,7 +458,8 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai, mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE); - mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG, (0x3f << 26)); + mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG, + ACLKX | AHCLKX | AFSX | ACLKR | AHCLKR | AFSR); break; default: @@ -644,7 +649,7 @@ static void davinci_hw_param(struct davinci_audio_dev *dev, int stream) mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, mask); mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXORD); - if ((dev->tdm_slots >= 2) || (dev->tdm_slots <= 32)) + if ((dev->tdm_slots >= 2) && (dev->tdm_slots <= 32)) mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG, FSXMOD(dev->tdm_slots), FSXMOD(0x1FF)); else @@ -660,7 +665,7 @@ static void davinci_hw_param(struct davinci_audio_dev *dev, int stream) AHCLKRE); mcasp_set_reg(dev->base + DAVINCI_MCASP_RXTDM_REG, mask); - if ((dev->tdm_slots >= 2) || (dev->tdm_slots <= 32)) + if ((dev->tdm_slots >= 2) && (dev->tdm_slots <= 32)) mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG, FSRMOD(dev->tdm_slots), FSRMOD(0x1FF)); else diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c index 419bf4f5534..cd22a54b2f1 100644 --- a/sound/soc/jz4740/jz4740-i2s.c +++ b/sound/soc/jz4740/jz4740-i2s.c @@ -133,7 +133,7 @@ static void jz4740_i2s_shutdown(struct snd_pcm_substream *substream, struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; - if (!dai->active) + if (dai->active) return; conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); diff --git a/sound/soc/mid-x86/sst_platform.c b/sound/soc/mid-x86/sst_platform.c index d567c322a2f..6b1f9d3bf34 100644 --- a/sound/soc/mid-x86/sst_platform.c +++ b/sound/soc/mid-x86/sst_platform.c @@ -376,6 +376,11 @@ static int sst_platform_pcm_hw_params(struct snd_pcm_substream *substream, return 0; } +static int sst_platform_pcm_hw_free(struct snd_pcm_substream *substream) +{ + return snd_pcm_lib_free_pages(substream); +} + static struct snd_pcm_ops sst_platform_ops = { .open = sst_platform_open, .close = sst_platform_close, @@ -384,6 +389,7 @@ static struct snd_pcm_ops sst_platform_ops = { .trigger = sst_platform_pcm_trigger, .pointer = sst_platform_pcm_pointer, .hw_params = sst_platform_pcm_hw_params, + .hw_free = sst_platform_pcm_hw_free, }; static void sst_pcm_free(struct snd_pcm *pcm) diff --git a/sound/soc/samsung/goni_wm8994.c b/sound/soc/samsung/goni_wm8994.c index f6b3a3ce591..0e80daee8b6 100644 --- a/sound/soc/samsung/goni_wm8994.c +++ b/sound/soc/samsung/goni_wm8994.c @@ -236,18 +236,18 @@ static struct snd_soc_dai_link goni_dai[] = { .name = "WM8994", .stream_name = "WM8994 HiFi", .cpu_dai_name = "samsung-i2s.0", - .codec_dai_name = "wm8994-hifi", + .codec_dai_name = "wm8994-aif1", .platform_name = "samsung-audio", - .codec_name = "wm8994-codec.0-0x1a", + .codec_name = "wm8994-codec.0-001a", .init = goni_wm8994_init, .ops = &goni_hifi_ops, }, { .name = "WM8994 Voice", .stream_name = "Voice", .cpu_dai_name = "goni-voice-dai", - .codec_dai_name = "wm8994-voice", + .codec_dai_name = "wm8994-aif2", .platform_name = "samsung-audio", - .codec_name = "wm8994-codec.0-0x1a", + .codec_name = "wm8994-codec.0-001a", .ops = &goni_voice_ops, }, }; diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index d8562ce4de7..dd55d106946 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -3291,6 +3291,8 @@ int snd_soc_register_card(struct snd_soc_card *card) if (!card->name || !card->dev) return -EINVAL; + dev_set_drvdata(card->dev, card); + snd_soc_initialize_card_lists(card); soc_init_card_debugfs(card); diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 207dee5c5b1..0c542563ea6 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -35,15 +35,21 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ -e s/sh[234].*/sh/ ) +CC = $(CROSS_COMPILE)gcc +AR = $(CROSS_COMPILE)ar + # Additional ARCH settings for x86 ifeq ($(ARCH),i386) ARCH := x86 endif ifeq ($(ARCH),x86_64) - RAW_ARCH := x86_64 - ARCH := x86 - ARCH_CFLAGS := -DARCH_X86_64 - ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S + ARCH := x86 + IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1) + ifeq (${IS_X86_64}, 1) + RAW_ARCH := x86_64 + ARCH_CFLAGS := -DARCH_X86_64 + ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S + endif endif # @@ -119,8 +125,6 @@ lib = lib export prefix bindir sharedir sysconfdir -CC = $(CROSS_COMPILE)gcc -AR = $(CROSS_COMPILE)ar RM = rm -f MKDIR = mkdir FIND = find diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 416538248a4..0974f957b8f 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -427,7 +427,7 @@ static void mmap_read_all(void) { int i; - for (i = 0; i < evsel_list->cpus->nr; i++) { + for (i = 0; i < evsel_list->nr_mmaps; i++) { if (evsel_list->mmap[i].base) mmap_read(&evsel_list->mmap[i]); } diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 11e3c845836..2f9a337b182 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c @@ -549,7 +549,7 @@ static int test__basic_mmap(void) ++foo; } - while ((event = perf_evlist__read_on_cpu(evlist, 0)) != NULL) { + while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { struct perf_sample sample; if (event->header.type != PERF_RECORD_SAMPLE) { diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 7e3d6e310bf..ebfc7cf5f63 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -801,12 +801,12 @@ static void perf_event__process_sample(const union perf_event *event, } } -static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu) +static void perf_session__mmap_read_idx(struct perf_session *self, int idx) { struct perf_sample sample; union perf_event *event; - while ((event = perf_evlist__read_on_cpu(top.evlist, cpu)) != NULL) { + while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) { perf_session__parse_sample(self, event, &sample); if (event->header.type == PERF_RECORD_SAMPLE) @@ -820,8 +820,8 @@ static void perf_session__mmap_read(struct perf_session *self) { int i; - for (i = 0; i < top.evlist->cpus->nr; i++) - perf_session__mmap_read_cpu(self, i); + for (i = 0; i < top.evlist->nr_mmaps; i++) + perf_session__mmap_read_idx(self, i); } static void start_counters(struct perf_evlist *evlist) diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 45da8d186b4..23eb22b05d2 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -166,11 +166,11 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) return NULL; } -union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) +union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) { /* XXX Move this to perf.c, making it generally available */ unsigned int page_size = sysconf(_SC_PAGE_SIZE); - struct perf_mmap *md = &evlist->mmap[cpu]; + struct perf_mmap *md = &evlist->mmap[idx]; unsigned int head = perf_mmap__read_head(md); unsigned int old = md->prev; unsigned char *data = md->base + page_size; @@ -235,31 +235,37 @@ union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) void perf_evlist__munmap(struct perf_evlist *evlist) { - int cpu; + int i; - for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { - if (evlist->mmap[cpu].base != NULL) { - munmap(evlist->mmap[cpu].base, evlist->mmap_len); - evlist->mmap[cpu].base = NULL; + for (i = 0; i < evlist->nr_mmaps; i++) { + if (evlist->mmap[i].base != NULL) { + munmap(evlist->mmap[i].base, evlist->mmap_len); + evlist->mmap[i].base = NULL; } } + + free(evlist->mmap); + evlist->mmap = NULL; } int perf_evlist__alloc_mmap(struct perf_evlist *evlist) { - evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap)); + evlist->nr_mmaps = evlist->cpus->nr; + if (evlist->cpus->map[0] == -1) + evlist->nr_mmaps = evlist->threads->nr; + evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); return evlist->mmap != NULL ? 0 : -ENOMEM; } static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel, - int cpu, int prot, int mask, int fd) + int idx, int prot, int mask, int fd) { - evlist->mmap[cpu].prev = 0; - evlist->mmap[cpu].mask = mask; - evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, + evlist->mmap[idx].prev = 0; + evlist->mmap[idx].mask = mask; + evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, MAP_SHARED, fd, 0); - if (evlist->mmap[cpu].base == MAP_FAILED) { - if (evlist->cpus->map[cpu] == -1 && evsel->attr.inherit) + if (evlist->mmap[idx].base == MAP_FAILED) { + if (evlist->cpus->map[idx] == -1 && evsel->attr.inherit) ui__warning("Inherit is not allowed on per-task " "events using mmap.\n"); return -1; @@ -269,6 +275,86 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *ev return 0; } +static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) +{ + struct perf_evsel *evsel; + int cpu, thread; + + for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { + int output = -1; + + for (thread = 0; thread < evlist->threads->nr; thread++) { + list_for_each_entry(evsel, &evlist->entries, node) { + int fd = FD(evsel, cpu, thread); + + if (output == -1) { + output = fd; + if (__perf_evlist__mmap(evlist, evsel, cpu, + prot, mask, output) < 0) + goto out_unmap; + } else { + if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) + goto out_unmap; + } + + if ((evsel->attr.read_format & PERF_FORMAT_ID) && + perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) + goto out_unmap; + } + } + } + + return 0; + +out_unmap: + for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { + if (evlist->mmap[cpu].base != NULL) { + munmap(evlist->mmap[cpu].base, evlist->mmap_len); + evlist->mmap[cpu].base = NULL; + } + } + return -1; +} + +static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) +{ + struct perf_evsel *evsel; + int thread; + + for (thread = 0; thread < evlist->threads->nr; thread++) { + int output = -1; + + list_for_each_entry(evsel, &evlist->entries, node) { + int fd = FD(evsel, 0, thread); + + if (output == -1) { + output = fd; + if (__perf_evlist__mmap(evlist, evsel, thread, + prot, mask, output) < 0) + goto out_unmap; + } else { + if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) + goto out_unmap; + } + + if ((evsel->attr.read_format & PERF_FORMAT_ID) && + perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) + goto out_unmap; + } + } + + return 0; + +out_unmap: + for (thread = 0; thread < evlist->threads->nr; thread++) { + if (evlist->mmap[thread].base != NULL) { + munmap(evlist->mmap[thread].base, evlist->mmap_len); + evlist->mmap[thread].base = NULL; + } + } + return -1; +} + /** perf_evlist__mmap - Create per cpu maps to receive events * * @evlist - list of events @@ -287,11 +373,11 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *ev int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) { unsigned int page_size = sysconf(_SC_PAGE_SIZE); - int mask = pages * page_size - 1, cpu; - struct perf_evsel *first_evsel, *evsel; + int mask = pages * page_size - 1; + struct perf_evsel *evsel; const struct cpu_map *cpus = evlist->cpus; const struct thread_map *threads = evlist->threads; - int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); + int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) return -ENOMEM; @@ -301,43 +387,18 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) evlist->overwrite = overwrite; evlist->mmap_len = (pages + 1) * page_size; - first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node); list_for_each_entry(evsel, &evlist->entries, node) { if ((evsel->attr.read_format & PERF_FORMAT_ID) && evsel->sample_id == NULL && perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) return -ENOMEM; - - for (cpu = 0; cpu < cpus->nr; cpu++) { - for (thread = 0; thread < threads->nr; thread++) { - int fd = FD(evsel, cpu, thread); - - if (evsel->idx || thread) { - if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, - FD(first_evsel, cpu, 0)) != 0) - goto out_unmap; - } else if (__perf_evlist__mmap(evlist, evsel, cpu, - prot, mask, fd) < 0) - goto out_unmap; - - if ((evsel->attr.read_format & PERF_FORMAT_ID) && - perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) - goto out_unmap; - } - } } - return 0; + if (evlist->cpus->map[0] == -1) + return perf_evlist__mmap_per_thread(evlist, prot, mask); -out_unmap: - for (cpu = 0; cpu < cpus->nr; cpu++) { - if (evlist->mmap[cpu].base != NULL) { - munmap(evlist->mmap[cpu].base, evlist->mmap_len); - evlist->mmap[cpu].base = NULL; - } - } - return -1; + return perf_evlist__mmap_per_cpu(evlist, prot, mask); } int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, @@ -348,7 +409,7 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, if (evlist->threads == NULL) return -1; - if (target_tid != -1) + if (cpu_list == NULL && target_tid != -1) evlist->cpus = cpu_map__dummy_new(); else evlist->cpus = cpu_map__new(cpu_list); diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 8b1cb7a4c5f..7109d7add14 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -17,6 +17,7 @@ struct perf_evlist { struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; int nr_entries; int nr_fds; + int nr_mmaps; int mmap_len; bool overwrite; union perf_event event_copy; @@ -46,7 +47,7 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); -union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu); +union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); int perf_evlist__alloc_mmap(struct perf_evlist *evlist); int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index f5e38451fdc..99c722672f8 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -680,7 +680,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, &cpu, &sample_id_all)) return NULL; - event = perf_evlist__read_on_cpu(evlist, cpu); + event = perf_evlist__mmap_read(evlist, cpu); if (event != NULL) { struct perf_evsel *first; PyObject *pyevent = pyrf_event__new(event); |